You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by ds...@apache.org on 2015/10/26 19:28:24 UTC

[01/50] [abbrv] incubator-geode git commit: GEODE-164: Improve reliability of launcher integration tests

Repository: incubator-geode
Updated Branches:
  refs/heads/develop a22450409 -> f0b81325f


GEODE-164: Improve reliability of launcher integration tests


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/5da17d12
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/5da17d12
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/5da17d12

Branch: refs/heads/develop
Commit: 5da17d1208142ea320b0807d9717b5858e200d4a
Parents: c6a4c3a
Author: Kirk Lund <kl...@pivotal.io>
Authored: Mon Oct 19 16:19:50 2015 -0700
Committer: Kirk Lund <kl...@pivotal.io>
Committed: Mon Oct 19 16:20:44 2015 -0700

----------------------------------------------------------------------
 .../distributed/LocatorLauncherJUnitTest.java   |  25 ++--
 .../LocatorLauncherLocalJUnitTest.java          |  92 ++++++++------
 .../LocatorLauncherRemoteFileJUnitTest.java     |   4 -
 .../LocatorLauncherRemoteJUnitTest.java         |   4 +-
 .../distributed/ServerLauncherJUnitTest.java    |  58 +++------
 .../ServerLauncherLocalJUnitTest.java           | 120 ++++++++++---------
 .../ServerLauncherRemoteJUnitTest.java          |  12 +-
 7 files changed, 158 insertions(+), 157 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5da17d12/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherJUnitTest.java
index 598d7c4..7070978 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherJUnitTest.java
@@ -9,6 +9,7 @@
 package com.gemstone.gemfire.distributed;
 
 import static org.junit.Assert.*;
+import static org.junit.Assume.*;
 
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -21,7 +22,6 @@ import com.gemstone.gemfire.distributed.LocatorLauncher.Builder;
 import com.gemstone.gemfire.distributed.LocatorLauncher.Command;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.internal.util.IOUtils;
 import com.gemstone.gemfire.test.junit.categories.UnitTest;
 
 import joptsimple.OptionException;
@@ -48,7 +48,7 @@ public class LocatorLauncherJUnitTest extends CommonLauncherTestSuite {
 
   @Test
   public void testBuilderParseArguments() throws Exception {
-    String expectedWorkingDirectory = System.getProperty("user.dir");
+    String expectedWorkingDirectory = this.temporaryFolder.getRoot().getCanonicalPath().toString();
     Builder builder = new Builder();
 
     builder.parseArguments("start", "memberOne", "--bind-address", InetAddress.getLocalHost().getHostAddress(),
@@ -67,8 +67,8 @@ public class LocatorLauncherJUnitTest extends CommonLauncherTestSuite {
   }
 
   @Test
-  public void testBuilderParseArgumentsWithCommandInArguments() {
-    String expectedWorkingDirectory = System.getProperty("user.dir");
+  public void testBuilderParseArgumentsWithCommandInArguments() throws Exception {
+    String expectedWorkingDirectory = this.temporaryFolder.getRoot().getCanonicalPath().toString();
     Builder builder = new Builder();
 
     builder.parseArguments("start", "--dir=" + expectedWorkingDirectory, "--port", "12345", "memberOne");
@@ -299,7 +299,8 @@ public class LocatorLauncherJUnitTest extends CommonLauncherTestSuite {
   }
 
   @Test
-  public void testSetAndGetWorkingDirectory() {
+  public void testSetAndGetWorkingDirectory() throws Exception {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath().toString();
     Builder builder = new Builder();
 
     assertEquals(AbstractLauncher.DEFAULT_WORKING_DIRECTORY, builder.getWorkingDirectory());
@@ -309,11 +310,8 @@ public class LocatorLauncherJUnitTest extends CommonLauncherTestSuite {
     assertEquals(AbstractLauncher.DEFAULT_WORKING_DIRECTORY, builder.getWorkingDirectory());
     assertSame(builder, builder.setWorkingDirectory("  "));
     assertEquals(AbstractLauncher.DEFAULT_WORKING_DIRECTORY, builder.getWorkingDirectory());
-    assertSame(builder, builder.setWorkingDirectory(System.getProperty("user.dir")));
-    assertEquals(System.getProperty("user.dir"), builder.getWorkingDirectory());
-    assertSame(builder, builder.setWorkingDirectory(System.getProperty("java.io.tmpdir")));
-    assertEquals(IOUtils.tryGetCanonicalPathElseGetAbsolutePath(new File(System.getProperty("java.io.tmpdir"))),
-      builder.getWorkingDirectory());
+    assertSame(builder, builder.setWorkingDirectory(rootFolder));
+    assertEquals(rootFolder, builder.getWorkingDirectory());
     assertSame(builder, builder.setWorkingDirectory(null));
     assertEquals(AbstractLauncher.DEFAULT_WORKING_DIRECTORY, builder.getWorkingDirectory());
   }
@@ -354,7 +352,7 @@ public class LocatorLauncherJUnitTest extends CommonLauncherTestSuite {
   }
 
   @Test
-  public void testBuild() {
+  public void testBuild() throws Exception {
     Builder builder = new Builder();
 
     LocatorLauncher launcher = builder.setCommand(Command.START)
@@ -362,7 +360,6 @@ public class LocatorLauncherJUnitTest extends CommonLauncherTestSuite {
       .setHostnameForClients("beanstock.vmware.com")
       .setMemberName("Beanstock")
       .setPort(8192)
-      .setWorkingDirectory(AbstractLauncher.DEFAULT_WORKING_DIRECTORY)
       .build();
 
     assertNotNull(launcher);
@@ -438,11 +435,11 @@ public class LocatorLauncherJUnitTest extends CommonLauncherTestSuite {
   }
 
   @Test(expected = IllegalStateException.class)
-  public void testBuildWithMismatchingCurrentAndWorkingDirectoryOnStart() {
+  public void testBuildWithMismatchingCurrentAndWorkingDirectoryOnStart() throws Exception {
     try {
       new Builder().setCommand(Command.START)
         .setMemberName("memberOne")
-        .setWorkingDirectory(System.getProperty("java.io.tmpdir"))
+        .setWorkingDirectory(this.temporaryFolder.getRoot().getCanonicalPath().toString())
         .build();
     }
     catch (IllegalStateException expected) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5da17d12/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherLocalJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherLocalJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherLocalJUnitTest.java
index 19f9379..c008a75 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherLocalJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherLocalJUnitTest.java
@@ -54,10 +54,13 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
 
   @Test
   public void testBuilderSetProperties() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+
     this.launcher = new Builder()
         .setForce(true)
         .setMemberName(getUniqueName())
         .setPort(this.locatorPort)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.DISABLE_AUTO_RECONNECT_NAME, "true")
         .set(DistributionConfig.LOG_LEVEL_NAME, "config")
         .set(DistributionConfig.MCAST_PORT_NAME, "0")
@@ -97,10 +100,13 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
   
   @Test
   public void testStartCreatesPidFile() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+
     this.launcher = new Builder()
         .setMemberName(getUniqueName())
         .setPort(this.locatorPort)
         .setRedirectOutput(true)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config")
         .build();
 
@@ -110,7 +116,7 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
       assertEquals(Status.ONLINE, this.launcher.status().getStatus());
 
       // validate the pid file and its contents
-      this.pidFile = new File(this.launcher.getWorkingDirectory(), ProcessType.LOCATOR.getPidFileName());
+      this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.LOCATOR.getPidFileName());
       assertTrue(this.pidFile.exists());
       final int pid = readPid(this.pidFile);
       assertTrue(pid > 0);
@@ -133,16 +139,18 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
 
   @Test
   public void testStartDeletesStaleControlFiles() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+
     // create existing control files
-    this.stopRequestFile = new File(ProcessType.LOCATOR.getStopRequestFileName());
+    this.stopRequestFile = new File(this.temporaryFolder.getRoot(), ProcessType.LOCATOR.getStopRequestFileName());
     this.stopRequestFile.createNewFile();
     assertTrue(this.stopRequestFile.exists());
 
-    this.statusRequestFile = new File(ProcessType.LOCATOR.getStatusRequestFileName());
+    this.statusRequestFile = new File(this.temporaryFolder.getRoot(), ProcessType.LOCATOR.getStatusRequestFileName());
     this.statusRequestFile.createNewFile();
     assertTrue(this.statusRequestFile.exists());
 
-    this.statusFile = new File(ProcessType.LOCATOR.getStatusFileName());
+    this.statusFile = new File(this.temporaryFolder.getRoot(), ProcessType.LOCATOR.getStatusFileName());
     this.statusFile.createNewFile();
     assertTrue(this.statusFile.exists());
     
@@ -151,6 +159,7 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
         .setMemberName(getUniqueName())
         .setPort(this.locatorPort)
         .setRedirectOutput(true)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config");
 
     assertFalse(builder.getForce());
@@ -166,7 +175,7 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
     
     try {
       // validate the pid file and its contents
-      this.pidFile = new File(ProcessType.LOCATOR.getPidFileName());
+      this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.LOCATOR.getPidFileName());
       assertTrue(this.pidFile.exists());
       final int pid = readPid(this.pidFile);
       assertTrue(pid > 0);
@@ -178,9 +187,6 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
       assertFalse(statusRequestFile.exists());
       assertFalse(statusFile.exists());
       
-      // validate log file was created
-      final String logFileName = getUniqueName()+".log";
-      assertTrue("Log file should exist: " + logFileName, new File(logFileName).exists()); // TODO:LOG:FAILS
     } catch (Throwable e) {
       this.errorCollector.addError(e);
     }
@@ -195,8 +201,10 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
   
   @Test
   public void testStartOverwritesStalePidFile() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+
     // create existing pid file
-    this.pidFile = new File(ProcessType.LOCATOR.getPidFileName());
+    this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.LOCATOR.getPidFileName());
     assertFalse("Integer.MAX_VALUE shouldn't be the same as local pid " + Integer.MAX_VALUE, Integer.MAX_VALUE == ProcessUtils.identifyPid());
     writePid(this.pidFile, Integer.MAX_VALUE);
 
@@ -205,6 +213,7 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
         .setMemberName(getUniqueName())
         .setPort(this.locatorPort)
         .setRedirectOutput(true)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config");
 
     assertFalse(builder.getForce());
@@ -277,7 +286,7 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
       
       // validate log file was created
       final String logFileName = getUniqueName()+".log";
-      assertTrue("Log file should exist: " + logFileName, new File(logFileName).exists()); // TODO:LOG:FAILS
+      assertTrue("Log file should exist: " + logFileName, new File(logFileName).exists());
       
     } catch (Throwable e) {
       logger.error(e);
@@ -304,6 +313,8 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
   
   @Test
   public void testStartWithDefaultPortInUseFails() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+
     this.socket = SocketCreator.getDefaultInstance().createServerSocket(this.locatorPort, 50, null, -1);
     assertTrue(this.socket.isBound());
     assertFalse(this.socket.isClosed());
@@ -316,6 +327,7 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
     this.launcher = new Builder()
         .setMemberName(getUniqueName())
         .setRedirectOutput(true)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config")
         .build();
     
@@ -355,12 +367,12 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
     }
 
     try {
-      this.pidFile = new File (ProcessType.LOCATOR.getPidFileName());
+      this.pidFile = new File (this.temporaryFolder.getRoot(), ProcessType.LOCATOR.getPidFileName());
       assertFalse("Pid file should not exist: " + this.pidFile, this.pidFile.exists());
       
       // creation of log file seems to be random -- look into why sometime
       final String logFileName = getUniqueName()+".log";
-      assertFalse("Log file should not exist: " + logFileName, new File(logFileName).exists());
+      assertFalse("Log file should not exist: " + logFileName, new File(this.temporaryFolder.getRoot(), logFileName).exists());
       
     } catch (Throwable e) {
       this.errorCollector.addError(e);
@@ -465,6 +477,8 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
   
   @Test
   public void testStartUsingPort() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+
     // generate one free port and then use it instead of default
     final int freeTCPPort = AvailablePortHelper.getRandomAvailableTCPPort();
     assertTrue(AvailablePort.isPortAvailable(freeTCPPort, AvailablePort.SOCKET));
@@ -473,6 +487,7 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
         .setMemberName(getUniqueName())
         .setPort(freeTCPPort)
         .setRedirectOutput(true)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config")
         .build();
 
@@ -483,17 +498,13 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
       waitForLocatorToStart(this.launcher);
 
       // validate the pid file and its contents
-      this.pidFile = new File(ProcessType.LOCATOR.getPidFileName());
+      this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.LOCATOR.getPidFileName());
       assertTrue(pidFile.exists());
       pid = readPid(pidFile);
       assertTrue(pid > 0);
       assertTrue(ProcessUtils.isProcessAlive(pid));
       assertEquals(getPid(), pid);
 
-      // validate log file was created
-      final String logFileName = getUniqueName()+".log";
-      assertTrue("Log file should exist: " + logFileName, new File(logFileName).exists()); // TODO:LOG:FAILS
-
       // verify locator did not use default port
       assertTrue(AvailablePort.isPortAvailable(this.locatorPort, AvailablePort.SOCKET));
       
@@ -515,6 +526,8 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
   
   @Test
   public void testStartUsingPortInUseFails() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+
     // generate one free port and then use it instead of default
     final int freeTCPPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     this.socket = SocketCreator.getDefaultInstance().createServerSocket(freeTCPPort, 50, null, -1);
@@ -523,6 +536,7 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
         .setMemberName(getUniqueName())
         .setPort(freeTCPPort)
         .setRedirectOutput(true)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config")
         .build();
     
@@ -549,12 +563,12 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
     }
 
     try {
-      this.pidFile = new File (ProcessType.LOCATOR.getPidFileName());
+      this.pidFile = new File (this.temporaryFolder.getRoot(), ProcessType.LOCATOR.getPidFileName());
       assertFalse("Pid file should not exist: " + this.pidFile, this.pidFile.exists());
       
       // creation of log file seems to be random -- look into why sometime
       final String logFileName = getUniqueName()+".log";
-      assertFalse("Log file should not exist: " + logFileName, new File(logFileName).exists());
+      assertFalse("Log file should not exist: " + logFileName, new File(this.temporaryFolder.getRoot(), logFileName).exists());
       
     } catch (Throwable e) {
       this.errorCollector.addError(e);
@@ -578,11 +592,14 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
   
   @Test
   public void testStatusUsingPid() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+    
     // build and start the locator
     final Builder builder = new Builder()
         .setMemberName(getUniqueName())
         .setPort(this.locatorPort)
         .setRedirectOutput(true)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config");
     
     assertFalse(builder.getForce());
@@ -594,8 +611,8 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
       this.launcher.start();
       waitForLocatorToStart(this.launcher);
       
-      this.pidFile = new File(ProcessType.LOCATOR.getPidFileName());
-      assertTrue(this.pidFile.exists());
+      this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.LOCATOR.getPidFileName());
+      assertTrue("Pid file " + this.pidFile.getCanonicalPath().toString() + " should exist", this.pidFile.exists());
       final int pid = readPid(this.pidFile);
       assertTrue(pid > 0);
       assertEquals(ProcessUtils.identifyPid(), pid);
@@ -609,11 +626,11 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
       assertEquals(Status.ONLINE, actualStatus.getStatus());
       assertEquals(pid, actualStatus.getPid().intValue());
       assertTrue(actualStatus.getUptime() > 0);
-      assertEquals(new File(System.getProperty("user.dir")).getCanonicalPath(), actualStatus.getWorkingDirectory());
+      // getWorkingDirectory returns user.dir instead of rootFolder because test is starting Locator in this process (to move logFile and pidFile into temp dir)
       assertEquals(ManagementFactory.getRuntimeMXBean().getClassPath(), actualStatus.getClasspath());
       assertEquals(GemFireVersion.getGemFireVersion(), actualStatus.getGemFireVersion());
       assertEquals(System.getProperty("java.version"),  actualStatus.getJavaVersion());
-      assertEquals(new File(System.getProperty("user.dir")).getCanonicalPath() + File.separator + getUniqueName() + ".log", actualStatus.getLogFile());
+      assertEquals(rootFolder + File.separator + getUniqueName() + ".log", actualStatus.getLogFile());
       assertEquals(InetAddress.getLocalHost().getCanonicalHostName(), actualStatus.getHost());
       assertEquals(getUniqueName(), actualStatus.getMemberName());
     } catch (Throwable e) {
@@ -640,10 +657,13 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
   
   @Test
   public void testStatusUsingWorkingDirectory() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+    
     final Builder builder = new Builder()
         .setMemberName(getUniqueName())
         .setPort(this.locatorPort)
         .setRedirectOutput(true)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config");
     
     assertFalse(builder.getForce());
@@ -655,14 +675,13 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
       this.launcher.start();
       waitForLocatorToStart(this.launcher);
       
-      this.pidFile = new File(ProcessType.LOCATOR.getPidFileName());
-      assertTrue(this.pidFile.exists());
+      this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.LOCATOR.getPidFileName());
+      assertTrue("Pid file " + this.pidFile.getCanonicalPath().toString() + " should exist", this.pidFile.exists());
       final int pid = readPid(this.pidFile);
       assertTrue(pid > 0);
       assertEquals(ProcessUtils.identifyPid(), pid);
   
-      final String workingDir = new File(System.getProperty("user.dir")).getCanonicalPath();
-      dirLauncher = new Builder().setWorkingDirectory(workingDir).build();
+      dirLauncher = new Builder().setWorkingDirectory(rootFolder).build();
       assertNotNull(dirLauncher);
       assertFalse(dirLauncher.isRunning());
 
@@ -671,11 +690,11 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
       assertEquals(Status.ONLINE, actualStatus.getStatus());
       assertEquals(pid, actualStatus.getPid().intValue());
       assertTrue(actualStatus.getUptime() > 0);
-      assertEquals(new File(System.getProperty("user.dir")).getCanonicalPath(), actualStatus.getWorkingDirectory());
+      // getWorkingDirectory returns user.dir instead of rootFolder because test is starting Locator in this process (to move logFile and pidFile into temp dir)
       assertEquals(ManagementFactory.getRuntimeMXBean().getClassPath(), actualStatus.getClasspath());
       assertEquals(GemFireVersion.getGemFireVersion(), actualStatus.getGemFireVersion());
       assertEquals(System.getProperty("java.version"),  actualStatus.getJavaVersion());
-      assertEquals(new File(System.getProperty("user.dir")).getCanonicalPath() + File.separator + getUniqueName() + ".log", actualStatus.getLogFile());
+      assertEquals(rootFolder + File.separator + getUniqueName() + ".log", actualStatus.getLogFile());
       assertEquals(InetAddress.getLocalHost().getCanonicalHostName(), actualStatus.getHost());
       assertEquals(getUniqueName(), actualStatus.getMemberName());
     } catch (Throwable e) {
@@ -702,10 +721,13 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
   
   @Test
   public void testStopUsingPid() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+
     final Builder builder = new Builder()
         .setMemberName(getUniqueName())
         .setPort(this.locatorPort)
         .setRedirectOutput(true)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config");
 
     assertFalse(builder.getForce());
@@ -718,7 +740,7 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
       waitForLocatorToStart(this.launcher);
   
       // validate the pid file and its contents
-      this.pidFile = new File(ProcessType.LOCATOR.getPidFileName());
+      this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.LOCATOR.getPidFileName());
       assertTrue(this.pidFile.exists());
       final int pid = readPid(this.pidFile);
       assertTrue(pid > 0);
@@ -752,10 +774,13 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
   
   @Test
   public void testStopUsingWorkingDirectory() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+    
     final Builder builder = new Builder()
         .setMemberName(getUniqueName())
         .setPort(this.locatorPort)
         .setRedirectOutput(true)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config");
 
     assertFalse(builder.getForce());
@@ -768,14 +793,13 @@ public class LocatorLauncherLocalJUnitTest extends AbstractLocatorLauncherJUnitT
       waitForLocatorToStart(this.launcher);
     
       // validate the pid file and its contents
-      this.pidFile = new File(ProcessType.LOCATOR.getPidFileName());
-      assertTrue(this.pidFile.exists());
+      this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.LOCATOR.getPidFileName());
+      assertTrue("Pid file " + this.pidFile.getCanonicalPath().toString() + " should exist", this.pidFile.exists());
       final int pid = readPid(this.pidFile);
       assertTrue(pid > 0);
       assertEquals(ProcessUtils.identifyPid(), pid);
 
-      final String workingDir = new File(System.getProperty("user.dir")).getCanonicalPath();
-      dirLauncher = new Builder().setWorkingDirectory(workingDir).build();
+      dirLauncher = new Builder().setWorkingDirectory(rootFolder).build();
       assertNotNull(dirLauncher);
       assertFalse(dirLauncher.isRunning());
       

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5da17d12/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherRemoteFileJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherRemoteFileJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherRemoteFileJUnitTest.java
index 0be4a8e..36d37fd 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherRemoteFileJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherRemoteFileJUnitTest.java
@@ -122,8 +122,6 @@ public class LocatorLauncherRemoteFileJUnitTest extends LocatorLauncherRemoteJUn
       waitForFileToDelete(this.pidFile);
     } catch (Throwable e) {
       this.errorCollector.addError(e);
-    } finally {
-      new File(ProcessType.LOCATOR.getStatusRequestFileName()).delete(); // TODO: delete?
     }
   }
   
@@ -200,8 +198,6 @@ public class LocatorLauncherRemoteFileJUnitTest extends LocatorLauncherRemoteJUn
       waitForFileToDelete(pidFile);
     } catch (Throwable e) {
       this.errorCollector.addError(e);
-    } finally {
-      new File(ProcessType.LOCATOR.getStopRequestFileName()).delete(); // TODO: delete?
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5da17d12/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherRemoteJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherRemoteJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherRemoteJUnitTest.java
index 8977e47..c0c054a 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherRemoteJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherRemoteJUnitTest.java
@@ -79,7 +79,7 @@ public class LocatorLauncherRemoteJUnitTest extends AbstractLocatorLauncherJUnit
   @Ignore("TRAC bug #52304: test is broken and needs to be reworked")
   public void testRunningLocatorOutlivesForkingProcess() throws Exception {
   }/*
-    // TODO:KIRK: fix up this test
+    // TODO: fix up this test
     
     this.temporaryFolder.getRoot() = new File(getUniqueName());
     this.temporaryFolder.getRoot().mkdir();
@@ -967,7 +967,9 @@ public class LocatorLauncherRemoteJUnitTest extends AbstractLocatorLauncherJUnit
         
         Process forkedProcess = new ProcessBuilder(command).start();
 
+        @SuppressWarnings("unused")
         ProcessStreamReader processOutReader = new ProcessStreamReader.Builder(forkedProcess).inputStream(forkedProcess.getInputStream()).build().start();
+        @SuppressWarnings("unused")
         ProcessStreamReader processErrReader = new ProcessStreamReader.Builder(forkedProcess).inputStream(forkedProcess.getErrorStream()).build().start();
 
         logWriter.info(LocatorLauncherForkingProcess.class.getSimpleName() + "#main waiting for locator to start...");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5da17d12/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/ServerLauncherJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/ServerLauncherJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/ServerLauncherJUnitTest.java
index 8b0d45b..721b7ed 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/ServerLauncherJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/ServerLauncherJUnitTest.java
@@ -9,7 +9,6 @@
 package com.gemstone.gemfire.distributed;
 
 import static org.junit.Assert.*;
-import static org.junit.Assume.*;
 
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -27,8 +26,6 @@ import com.gemstone.gemfire.distributed.ServerLauncher.Command;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.support.DistributedSystemAdapter;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.internal.lang.SystemUtils;
-import com.gemstone.gemfire.internal.util.IOUtils;
 import com.gemstone.gemfire.test.junit.categories.UnitTest;
 
 import edu.umd.cs.mtc.MultithreadedTestCase;
@@ -39,9 +36,7 @@ import org.jmock.Mockery;
 import org.jmock.lib.concurrent.Synchroniser;
 import org.jmock.lib.legacy.ClassImposteriser;
 import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -60,39 +55,12 @@ import org.junit.experimental.categories.Category;
  * @see org.junit.Test
  * @since 7.0
  */
-@SuppressWarnings("deprecation")
+@SuppressWarnings({"deprecation", "unused"})
 @Category(UnitTest.class)
 public class ServerLauncherJUnitTest extends CommonLauncherTestSuite {
 
-//  private static final String GEMFIRE_PROPERTIES_FILE_NAME = "gemfire.properties";
-//  private static final String TEMPORARY_FILE_NAME = "beforeServerLauncherJUnitTest_" + GEMFIRE_PROPERTIES_FILE_NAME;
-
   private Mockery mockContext;
 
-//  @BeforeClass
-//  public static void testSuiteSetup() {
-//    if (SystemUtils.isWindows()) {
-//      return;
-//    }
-//    File file = new File(GEMFIRE_PROPERTIES_FILE_NAME);
-//    if (file.exists()) {
-//      File dest = new File(TEMPORARY_FILE_NAME);
-//      assertTrue(file.renameTo(dest));
-//    }
-//  }
-//
-//  @AfterClass
-//  public static void testSuiteTearDown() {
-//    if (SystemUtils.isWindows()) {
-//      return;
-//    }
-//    File file = new File(TEMPORARY_FILE_NAME);
-//    if (file.exists()) {
-//      File dest = new File(GEMFIRE_PROPERTIES_FILE_NAME);
-//      assertTrue(file.renameTo(dest));
-//    }
-//  }
-
   @Before
   public void setup() {
     mockContext = new Mockery() {{
@@ -109,10 +77,11 @@ public class ServerLauncherJUnitTest extends CommonLauncherTestSuite {
 
   @Test
   public void testParseArguments() throws Exception {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath().toString();
     Builder builder = new Builder();
 
     builder.parseArguments("start", "serverOne", "--assign-buckets", "--disable-default-server", "--debug", "--force",
-      "--rebalance", "--redirect-output", "--dir=" + ServerLauncher.DEFAULT_WORKING_DIRECTORY, "--pid=1234",
+      "--rebalance", "--redirect-output", "--dir=" + rootFolder, "--pid=1234",
         "--server-bind-address=" + InetAddress.getLocalHost().getHostAddress(), "--server-port=11235", "--hostname-for-clients=192.168.99.100");
 
     assertEquals(Command.START, builder.getCommand());
@@ -125,7 +94,7 @@ public class ServerLauncherJUnitTest extends CommonLauncherTestSuite {
     assertFalse(Boolean.TRUE.equals(builder.getHelp()));
     assertTrue(builder.getRebalance());
     assertTrue(builder.getRedirectOutput());
-    assertEquals(ServerLauncher.DEFAULT_WORKING_DIRECTORY, builder.getWorkingDirectory());
+    assertEquals(rootFolder, builder.getWorkingDirectory());
     assertEquals(1234, builder.getPid().intValue());
     assertEquals(InetAddress.getLocalHost(), builder.getServerBindAddress());
     assertEquals(11235, builder.getServerPort().intValue());
@@ -346,13 +315,13 @@ public class ServerLauncherJUnitTest extends CommonLauncherTestSuite {
   }
 
   @Test
-  public void testSetAndGetWorkingDirectory() {
+  public void testSetAndGetWorkingDirectory() throws Exception {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath().toString();
     Builder builder = new Builder();
 
     assertEquals(ServerLauncher.DEFAULT_WORKING_DIRECTORY, builder.getWorkingDirectory());
-    assertSame(builder, builder.setWorkingDirectory(System.getProperty("java.io.tmpdir")));
-    assertEquals(IOUtils.tryGetCanonicalPathElseGetAbsolutePath(new File(System.getProperty("java.io.tmpdir"))),
-      builder.getWorkingDirectory());
+    assertSame(builder, builder.setWorkingDirectory(rootFolder));
+    assertEquals(rootFolder, builder.getWorkingDirectory());
     assertSame(builder, builder.setWorkingDirectory("  "));
     assertEquals(ServerLauncher.DEFAULT_WORKING_DIRECTORY, builder.getWorkingDirectory());
     assertSame(builder, builder.setWorkingDirectory(""));
@@ -574,6 +543,8 @@ public class ServerLauncherJUnitTest extends CommonLauncherTestSuite {
 
   @Test
   public void testBuild() throws Exception {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath().toString();
+    
     ServerLauncher launcher = new Builder()
       .setCommand(Command.STOP)
       .setAssignBuckets(true)
@@ -582,7 +553,7 @@ public class ServerLauncherJUnitTest extends CommonLauncherTestSuite {
       .setRebalance(true)
       .setServerBindAddress(InetAddress.getLocalHost().getHostAddress())
       .setServerPort(11235)
-      .setWorkingDirectory(System.getProperty("java.io.tmpdir"))
+      .setWorkingDirectory(rootFolder)
       .setCriticalHeapPercentage(90.0f)
       .setEvictionHeapPercentage(75.0f)
       .setMaxConnections(100)
@@ -604,8 +575,7 @@ public class ServerLauncherJUnitTest extends CommonLauncherTestSuite {
     assertEquals("serverOne", launcher.getMemberName());
     assertEquals(InetAddress.getLocalHost(), launcher.getServerBindAddress());
     assertEquals(11235, launcher.getServerPort().intValue());
-    assertEquals(IOUtils.tryGetCanonicalPathElseGetAbsolutePath(new File(System.getProperty("java.io.tmpdir"))),
-      launcher.getWorkingDirectory());
+    assertEquals(rootFolder, launcher.getWorkingDirectory());
     assertEquals(90.0f, launcher.getCriticalHeapPercentage().floatValue(), 0.0f);
     assertEquals(75.0f, launcher.getEvictionHeapPercentage().floatValue(), 0.0f);
     assertEquals(100, launcher.getMaxConnections().intValue());
@@ -689,11 +659,11 @@ public class ServerLauncherJUnitTest extends CommonLauncherTestSuite {
   }
 
   @Test(expected = IllegalStateException.class)
-  public void testBuildWithInvalidWorkingDirectoryOnStart() {
+  public void testBuildWithInvalidWorkingDirectoryOnStart() throws Exception {
     try {
       new Builder().setCommand(Command.START)
         .setMemberName("serverOne")
-        .setWorkingDirectory(System.getProperty("java.io.tmpdir"))
+        .setWorkingDirectory(this.temporaryFolder.getRoot().getCanonicalPath().toString())
         .build();
     }
     catch (IllegalStateException expected) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5da17d12/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/ServerLauncherLocalJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/ServerLauncherLocalJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/ServerLauncherLocalJUnitTest.java
index e83c8ac..27dd3a3 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/ServerLauncherLocalJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/ServerLauncherLocalJUnitTest.java
@@ -69,10 +69,13 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
 
   @Test
   public void testBuilderSetProperties() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+
     this.launcher = new Builder()
         .setDisableDefaultServer(true)
         .setForce(true)
         .setMemberName(getUniqueName())
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.DISABLE_AUTO_RECONNECT_NAME, "true")
         .set(DistributionConfig.LOG_LEVEL_NAME, "config")
         .set(DistributionConfig.MCAST_PORT_NAME, "0")
@@ -116,11 +119,14 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
   
   @Test
   public void testStartCreatesPidFile() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+
     // build and start the Server locally
     final Builder builder = new Builder()
         .setDisableDefaultServer(true)
         .setMemberName(getUniqueName())
         .setRedirectOutput(true)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config")
         .set(DistributionConfig.MCAST_PORT_NAME, "0");
 
@@ -133,7 +139,7 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
       assertEquals(Status.ONLINE, this.launcher.status().getStatus());
 
       // validate the pid file and its contents
-      this.pidFile = new File(builder.getWorkingDirectory(), ProcessType.SERVER.getPidFileName());
+      this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.SERVER.getPidFileName());
       assertTrue(this.pidFile.exists());
       final int pid = readPid(this.pidFile);
       assertTrue(pid > 0);
@@ -156,16 +162,18 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
 
   @Test
   public void testStartDeletesStaleControlFiles() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+
     // create existing control files
-    this.stopRequestFile = new File(ProcessType.SERVER.getStopRequestFileName());
+    this.stopRequestFile = new File(this.temporaryFolder.getRoot(), ProcessType.SERVER.getStopRequestFileName());
     this.stopRequestFile.createNewFile();
     assertTrue(this.stopRequestFile.exists());
 
-    this.statusRequestFile = new File(ProcessType.SERVER.getStatusRequestFileName());
+    this.statusRequestFile = new File(this.temporaryFolder.getRoot(), ProcessType.SERVER.getStatusRequestFileName());
     this.statusRequestFile.createNewFile();
     assertTrue(this.statusRequestFile.exists());
 
-    this.statusFile = new File(ProcessType.SERVER.getStatusFileName());
+    this.statusFile = new File(this.temporaryFolder.getRoot(), ProcessType.SERVER.getStatusFileName());
     this.statusFile.createNewFile();
     assertTrue(this.statusFile.exists());
     
@@ -174,6 +182,7 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
         .setDisableDefaultServer(true)
         .setMemberName(getUniqueName())
         .setRedirectOutput(true)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config")
         .set(DistributionConfig.MCAST_PORT_NAME, "0");
 
@@ -190,7 +199,7 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
     
     try {
       // validate the pid file and its contents
-      this.pidFile = new File(ProcessType.SERVER.getPidFileName());
+      this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.SERVER.getPidFileName());
       assertTrue(this.pidFile.exists());
       final int pid = readPid(this.pidFile);
       assertTrue(pid > 0);
@@ -202,10 +211,6 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
       assertFalse(this.statusRequestFile.exists());
       assertFalse(this.statusFile.exists());
       
-      // validate log file was created
-      final String logFileName = getUniqueName()+".log";
-      assertTrue("Log file should exist: " + logFileName, new File(logFileName).exists());
-      
     } catch (Throwable e) {
       this.errorCollector.addError(e);
     }
@@ -220,8 +225,10 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
   
   @Test
   public void testStartOverwritesStalePidFile() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+
     // create existing pid file
-    this.pidFile = new File(ProcessType.SERVER.getPidFileName());
+    this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.SERVER.getPidFileName());
     assertFalse("Integer.MAX_VALUE shouldn't be the same as local pid " + Integer.MAX_VALUE, Integer.MAX_VALUE == ProcessUtils.identifyPid());
     writePid(this.pidFile, Integer.MAX_VALUE);
 
@@ -230,6 +237,7 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
         .setDisableDefaultServer(true)
         .setMemberName(getUniqueName())
         .setRedirectOutput(true)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config")
         .set(DistributionConfig.MCAST_PORT_NAME, "0");
 
@@ -268,6 +276,8 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
    */
   @Test
   public void testStartUsingDisableDefaultServerLeavesPortFree() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+
     // build and start the server
     assertTrue(AvailablePort.isPortAvailable(this.serverPort, AvailablePort.SOCKET));
     
@@ -276,6 +286,7 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
         .setDisableDefaultServer(true)
         .setMemberName(getUniqueName())
         .setRedirectOutput(true)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config")
         .set(DistributionConfig.MCAST_PORT_NAME, "0");
     
@@ -288,17 +299,13 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
       waitForServerToStart(this.launcher);
 
       // validate the pid file and its contents
-      this.pidFile = new File(ProcessType.SERVER.getPidFileName());
+      this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.SERVER.getPidFileName());
       assertTrue(this.pidFile.exists());
       int pid = readPid(this.pidFile);
       assertTrue(pid > 0);
       assertTrue(ProcessUtils.isProcessAlive(pid));
       assertEquals(getPid(), pid);
 
-      // validate log file was created
-      final String logFileName = getUniqueName()+".log";
-      assertTrue("Log file should exist: " + logFileName, new File(logFileName).exists());
-
       // verify server did not a port
       assertTrue(AvailablePort.isPortAvailable(this.serverPort, AvailablePort.SOCKET));
       
@@ -324,6 +331,8 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
    */
   @Test
   public void testStartUsingDisableDefaultServerSkipsPortCheck() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+
     // generate one free port and then use TEST_OVERRIDE_DEFAULT_PORT_PROPERTY
     this.socket = SocketCreator.getDefaultInstance().createServerSocket(this.serverPort, 50, null, -1);
     assertFalse(AvailablePort.isPortAvailable(this.serverPort, AvailablePort.SOCKET));
@@ -333,6 +342,7 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
         .setDisableDefaultServer(true)
         .setMemberName(getUniqueName())
         .setRedirectOutput(true)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config")
         .set(DistributionConfig.MCAST_PORT_NAME, "0");
 
@@ -345,17 +355,13 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
       waitForServerToStart(this.launcher);
 
       // validate the pid file and its contents
-      this.pidFile = new File(ProcessType.SERVER.getPidFileName());
+      this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.SERVER.getPidFileName());
       assertTrue(this.pidFile.exists());
       int pid = readPid(this.pidFile);
       assertTrue(pid > 0);
       assertTrue(ProcessUtils.isProcessAlive(pid));
       assertEquals(getPid(), pid);
 
-      // validate log file was created
-      final String logFileName = getUniqueName()+".log";
-      assertTrue("Log file should exist: " + logFileName, new File(logFileName).exists());
-      
       final ServerState status = this.launcher.status();
       final String portString = status.getPort();
       assertEquals("Port should be \"\" instead of " + portString, "", portString);
@@ -449,6 +455,8 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
   public void testStartUsingServerPortOverridesCacheXml() throws Throwable {
     // verifies part of the fix for #47664
     
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+    
     // generate two free ports
     final int[] freeTCPPorts = AvailablePortHelper.getRandomAvailableTCPPorts(2);
     assertTrue(AvailablePort.isPortAvailable(freeTCPPorts[0], AvailablePort.SOCKET));
@@ -474,6 +482,7 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
         .setMemberName(getUniqueName())
         .setRedirectOutput(true)
         .setServerPort(freeTCPPorts[1])
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config")
         .set(DistributionConfig.MCAST_PORT_NAME, "0");
 
@@ -485,17 +494,13 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
       waitForServerToStart(this.launcher);
   
       // validate the pid file and its contents
-      this.pidFile = new File(ProcessType.SERVER.getPidFileName());
+      this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.SERVER.getPidFileName());
       assertTrue(this.pidFile.exists());
       int pid = readPid(this.pidFile);
       assertTrue(pid > 0);
       assertTrue(ProcessUtils.isProcessAlive(pid));
       assertEquals(getPid(), pid);
 
-      // validate log file was created
-      final String logFileName = getUniqueName()+".log";
-      assertTrue("Log file should exist: " + logFileName, new File(logFileName).exists());
-
       // verify server used --server-port instead of default or port in cache.xml
       assertTrue(AvailablePort.isPortAvailable(freeTCPPorts[0], AvailablePort.SOCKET));
       assertFalse(AvailablePort.isPortAvailable(freeTCPPorts[1], AvailablePort.SOCKET));
@@ -524,6 +529,8 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
    */
   @Test
   public void testStartUsingServerPortUsedInsteadOfDefaultCacheXml() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+
     // write out cache.xml with one port
     final CacheCreation creation = new CacheCreation();
     final RegionAttributesCreation attrs = new RegionAttributesCreation(creation);
@@ -544,6 +551,7 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
         .setMemberName(getUniqueName())
         .setRedirectOutput(true)
         .setServerPort(this.serverPort)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config")
         .set(DistributionConfig.MCAST_PORT_NAME, "0");
 
@@ -555,17 +563,13 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
       waitForServerToStart(this.launcher);
   
       // validate the pid file and its contents
-      this.pidFile = new File(ProcessType.SERVER.getPidFileName());
+      this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.SERVER.getPidFileName());
       assertTrue(this.pidFile.exists());
       int pid = readPid(this.pidFile);
       assertTrue(pid > 0);
       assertTrue(ProcessUtils.isProcessAlive(pid));
       assertEquals(getPid(), pid);
 
-      // validate log file was created
-      final String logFileName = getUniqueName()+".log";
-      assertTrue("Log file should exist: " + logFileName, new File(logFileName).exists());
-
       // verify server used --server-port instead of default
       assertFalse(AvailablePort.isPortAvailable(this.serverPort, AvailablePort.SOCKET));
       
@@ -587,6 +591,8 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
 
   @Test
   public void testStartWithDefaultPortInUseFails() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+
     // generate one free port and then use TEST_OVERRIDE_DEFAULT_PORT_PROPERTY
     this.socket = SocketCreator.getDefaultInstance().createServerSocket(this.serverPort, 50, null, -1);
     assertFalse(AvailablePort.isPortAvailable(this.serverPort, AvailablePort.SOCKET));
@@ -595,6 +601,7 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
     final Builder builder = new Builder()
         .setMemberName(getUniqueName())
         .setRedirectOutput(true)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config")
         .set(DistributionConfig.MCAST_PORT_NAME, "0");
 
@@ -631,12 +638,12 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
     }
 
     try {
-      this.pidFile = new File (ProcessType.SERVER.getPidFileName());
+      this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.SERVER.getPidFileName());
       assertFalse("Pid file should not exist: " + this.pidFile, this.pidFile.exists());
       
       // creation of log file seems to be random -- look into why sometime
       final String logFileName = getUniqueName()+".log";
-      assertFalse("Log file should not exist: " + logFileName, new File(logFileName).exists());
+      assertFalse("Log file should not exist: " + logFileName, new File(this.temporaryFolder.getRoot(), logFileName).exists());
       
     } catch (Throwable e) {
       this.errorCollector.addError(e);
@@ -744,6 +751,8 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
    */
   @Test
   public void testStartUsingServerPortInUseFails() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+
     // generate one free port and then use TEST_OVERRIDE_DEFAULT_PORT_PROPERTY
     final int freeTCPPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     this.socket = SocketCreator.getDefaultInstance().createServerSocket(freeTCPPort, 50, null, -1);
@@ -753,6 +762,7 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
         .setMemberName(getUniqueName())
         .setRedirectOutput(true)
         .setServerPort(freeTCPPort)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config")
         .set(DistributionConfig.MCAST_PORT_NAME, "0");
 
@@ -781,18 +791,14 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
     }
 
     try {
-      this.pidFile = new File (ProcessType.SERVER.getPidFileName());
+      this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.SERVER.getPidFileName());
       assertFalse("Pid file should not exist: " + this.pidFile, this.pidFile.exists());
-      
-      // creation of log file seems to be random -- look into why sometime
-      final String logFileName = getUniqueName()+".log";
-      assertFalse("Log file should not exist: " + logFileName, new File(logFileName).exists());
     } catch (Throwable e) {
       this.errorCollector.addError(e);
     }
     
     // just in case the launcher started...
-    ServerState status = null; // TODO: this could result in NPE later
+    ServerState status = null;
     try {
       status = this.launcher.stop();
     } catch (Throwable t) { 
@@ -809,11 +815,14 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
   
   @Test
   public void testStatusUsingPid() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+    
     // build and start the server
     final Builder builder = new Builder()
         .setDisableDefaultServer(true)
         .setMemberName(getUniqueName())
         .setRedirectOutput(true)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config")
         .set(DistributionConfig.MCAST_PORT_NAME, "0");
     
@@ -826,7 +835,7 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
       this.launcher.start();
       waitForServerToStart(this.launcher);
       
-      this.pidFile = new File(ProcessType.SERVER.getPidFileName());
+      this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.SERVER.getPidFileName());
       assertTrue(this.pidFile.exists());
       final int pid = readPid(this.pidFile);
       assertTrue(pid > 0);
@@ -841,12 +850,10 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
       assertEquals(Status.ONLINE, actualStatus.getStatus());
       assertEquals(pid, actualStatus.getPid().intValue());
       assertTrue(actualStatus.getUptime() > 0);
-      assertEquals(new File(System.getProperty("user.dir")).getCanonicalPath(), actualStatus.getWorkingDirectory());
-      //assertEquals(???, actualStatus.getJvmArguments());
+      // getWorkingDirectory returns user.dir instead of rootFolder because test is starting Server in this process (to move logFile and pidFile into temp dir)
       assertEquals(ManagementFactory.getRuntimeMXBean().getClassPath(), actualStatus.getClasspath());
       assertEquals(GemFireVersion.getGemFireVersion(), actualStatus.getGemFireVersion());
       assertEquals(System.getProperty("java.version"),  actualStatus.getJavaVersion());
-      assertEquals(new File(System.getProperty("user.dir")).getCanonicalPath() + File.separator + getUniqueName() + ".log", actualStatus.getLogFile());
       assertEquals(InetAddress.getLocalHost().getCanonicalHostName(), actualStatus.getHost());
       assertEquals(getUniqueName(), actualStatus.getMemberName());
       
@@ -874,11 +881,14 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
   
   @Test
   public void testStatusUsingWorkingDirectory() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+
     // build and start the server
     final Builder builder = new Builder()
         .setDisableDefaultServer(true)
         .setMemberName(getUniqueName())
         .setRedirectOutput(true)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config")
         .set(DistributionConfig.MCAST_PORT_NAME, "0");
     
@@ -891,14 +901,13 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
       this.launcher.start();
       waitForServerToStart(this.launcher);
       
-      this.pidFile = new File(ProcessType.SERVER.getPidFileName());
+      this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.SERVER.getPidFileName());
       assertTrue(this.pidFile.exists());
       final int pid = readPid(this.pidFile);
       assertTrue(pid > 0);
       assertEquals(ProcessUtils.identifyPid(), pid);
   
-      final String workingDir = new File(System.getProperty("user.dir")).getCanonicalPath();
-      dirLauncher = new Builder().setWorkingDirectory(workingDir).build();
+      dirLauncher = new Builder().setWorkingDirectory(rootFolder).build();
       assertNotNull(dirLauncher);
       assertFalse(dirLauncher.isRunning());
 
@@ -907,12 +916,10 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
       assertEquals(Status.ONLINE, actualStatus.getStatus());
       assertEquals(pid, actualStatus.getPid().intValue());
       assertTrue(actualStatus.getUptime() > 0);
-      assertEquals(new File(System.getProperty("user.dir")).getCanonicalPath(), actualStatus.getWorkingDirectory());
-      //assertEquals(???, actualStatus.getJvmArguments());
+      // getWorkingDirectory returns user.dir instead of rootFolder because test is starting Server in this process (to move logFile and pidFile into temp dir)
       assertEquals(ManagementFactory.getRuntimeMXBean().getClassPath(), actualStatus.getClasspath());
       assertEquals(GemFireVersion.getGemFireVersion(), actualStatus.getGemFireVersion());
       assertEquals(System.getProperty("java.version"),  actualStatus.getJavaVersion());
-      assertEquals(new File(System.getProperty("user.dir")).getCanonicalPath() + File.separator + getUniqueName() + ".log", actualStatus.getLogFile());
       assertEquals(InetAddress.getLocalHost().getCanonicalHostName(), actualStatus.getHost());
       assertEquals(getUniqueName(), actualStatus.getMemberName());
       
@@ -940,11 +947,14 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
   
   @Test
   public void testStopUsingPid() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+
     // build and start the server
     final Builder builder = new Builder()
         .setDisableDefaultServer(true)
         .setMemberName(getUniqueName())
         .setRedirectOutput(true)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config")
         .set(DistributionConfig.MCAST_PORT_NAME, "0");
 
@@ -959,7 +969,7 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
       waitForServerToStart(this.launcher);
   
       // validate the pid file and its contents
-      this.pidFile = new File(ProcessType.SERVER.getPidFileName());
+      this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.SERVER.getPidFileName());
       assertTrue(this.pidFile.exists());
       final int pid = readPid(this.pidFile);
       assertTrue(pid > 0);
@@ -986,7 +996,7 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
 
     try {
       // verify the PID file was deleted
-      waitForFileToDelete(this.pidFile); // TODO
+      waitForFileToDelete(this.pidFile);
     } catch (Throwable e) {
       this.errorCollector.addError(e);
     }
@@ -994,11 +1004,14 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
   
   @Test
   public void testStopUsingWorkingDirectory() throws Throwable {
+    String rootFolder = this.temporaryFolder.getRoot().getCanonicalPath();
+
     // build and start the server
     final Builder builder = new Builder()
         .setDisableDefaultServer(true)
         .setMemberName(getUniqueName())
         .setRedirectOutput(true)
+        .setWorkingDirectory(rootFolder)
         .set(DistributionConfig.LOG_LEVEL_NAME, "config")
         .set(DistributionConfig.MCAST_PORT_NAME, "0");
 
@@ -1012,14 +1025,13 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
       waitForServerToStart(this.launcher);
     
       // validate the pid file and its contents
-      this.pidFile = new File(ProcessType.SERVER.getPidFileName());
+      this.pidFile = new File(this.temporaryFolder.getRoot(), ProcessType.SERVER.getPidFileName());
       assertTrue(this.pidFile.exists());
       final int pid = readPid(this.pidFile);
       assertTrue(pid > 0);
       assertEquals(ProcessUtils.identifyPid(), pid);
 
-      final String workingDir = new File(System.getProperty("user.dir")).getCanonicalPath();
-      dirLauncher = new Builder().setWorkingDirectory(workingDir).build();
+      dirLauncher = new Builder().setWorkingDirectory(rootFolder).build();
       assertNotNull(dirLauncher);
       assertFalse(dirLauncher.isRunning());
       
@@ -1040,7 +1052,7 @@ public class ServerLauncherLocalJUnitTest extends AbstractServerLauncherJUnitTes
 
     try {
       // verify the PID file was deleted
-      waitForFileToDelete(this.pidFile); // TODO
+      waitForFileToDelete(this.pidFile);
     } catch (Throwable e) {
       this.errorCollector.addError(e);
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5da17d12/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/ServerLauncherRemoteJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/ServerLauncherRemoteJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/ServerLauncherRemoteJUnitTest.java
index d43ad0a..a11b2e5 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/ServerLauncherRemoteJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/ServerLauncherRemoteJUnitTest.java
@@ -120,7 +120,7 @@ public class ServerLauncherRemoteJUnitTest extends AbstractServerLauncherJUnitTe
 
     @SuppressWarnings("unused")
     File file = new File(this.temporaryFolder.getRoot(), ServerLauncherForkingProcess.class.getSimpleName().concat(".log"));
-    //-logger.info("KIRK: log file is " + file);
+    //-logger.info("log file is " + file);
     
     final ProcessWrapper pw = new ProcessWrapper.Builder().mainClass(ServerLauncherForkingProcess.class).build();
     pw.execute(null, this.temporaryFolder.getRoot()).waitFor(true);
@@ -882,7 +882,7 @@ public class ServerLauncherRemoteJUnitTest extends AbstractServerLauncherJUnitTe
     this.errorCollector.checkThat(outputContainedExpectedString.get(), is(equalTo(true)));
 
     // just in case the launcher started...
-    ServerState status = null; // TODO: this could throw NPE later
+    ServerState status = null;
     try {
       status = dirLauncher.stop();
     } catch (Throwable t) { 
@@ -1360,7 +1360,7 @@ public class ServerLauncherRemoteJUnitTest extends AbstractServerLauncherJUnitTe
   public static class ServerLauncherForkingProcess {
 
     public static void main(final String... args) throws IOException, PidUnavailableException {
-      //-System.out.println("KIRK inside main");
+      //-System.out.println("inside main");
       File file = new File(System.getProperty("user.dir"), ServerLauncherForkingProcess.class.getSimpleName().concat(".log"));
       file.createNewFile();
       LocalLogWriter logWriter = new LocalLogWriter(InternalLogWriter.ALL_LEVEL, new PrintStream(new FileOutputStream(file, true)));
@@ -1387,7 +1387,7 @@ public class ServerLauncherRemoteJUnitTest extends AbstractServerLauncherJUnitTe
         logWriter.info(ServerLauncherForkingProcess.class.getSimpleName() + "#main command: " + command);
         logWriter.info(ServerLauncherForkingProcess.class.getSimpleName() + "#main starting...");
 
-        //-System.out.println("KIRK launching " + command);
+        //-System.out.println("launching " + command);
         
         @SuppressWarnings("unused")
         Process forkedProcess = new ProcessBuilder(command).start();
@@ -1398,7 +1398,7 @@ public class ServerLauncherRemoteJUnitTest extends AbstractServerLauncherJUnitTe
 //        logWriter.info(ServerLauncherForkingProcess.class.getSimpleName() + "#main waiting for Server to start...");
 //
 //        File workingDir = new File(System.getProperty("user.dir"));
-//        System.out.println("KIRK waiting for server to start in " + workingDir);
+//        System.out.println("waiting for server to start in " + workingDir);
 //        final ServerLauncher dirLauncher = new ServerLauncher.Builder()
 //            .setWorkingDirectory(workingDir.getCanonicalPath())
 //            .build();
@@ -1406,7 +1406,7 @@ public class ServerLauncherRemoteJUnitTest extends AbstractServerLauncherJUnitTe
 
         logWriter.info(ServerLauncherForkingProcess.class.getSimpleName() + "#main exiting...");
 
-        //-System.out.println("KIRK exiting");
+        //-System.out.println("exiting");
         System.exit(0);
       }
       catch (Throwable t) {


[32/50] [abbrv] incubator-geode git commit: GEODE-468: Using 1.8 generated files for AnalyzeSerializablesJUnitTest

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/37f77a90/gemfire-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt b/gemfire-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
index 58ef0ab..25e8813 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
@@ -218,8 +218,8 @@ com/gemstone/gemfire/cache/query/internal/utils/PDXUtils$1,false
 com/gemstone/gemfire/cache/query/internal/utils/PDXUtils$2,false
 com/gemstone/gemfire/cache/snapshot/SnapshotOptions$SnapshotFormat,false
 com/gemstone/gemfire/cache/util/BoundedLinkedHashMap,true,-3419897166186852692,_maximumNumberOfEntries:int
-com/gemstone/gemfire/cache/wan/GatewaySender$OrderPolicy,false
 com/gemstone/gemfire/cache/util/Gateway$OrderPolicy,false
+com/gemstone/gemfire/cache/wan/GatewaySender$OrderPolicy,false
 com/gemstone/gemfire/compression/CompressionException,true,4118639654597191235
 com/gemstone/gemfire/compression/SnappyCompressor,true,496609875302446099
 com/gemstone/gemfire/distributed/AbstractLauncher$Status,false,description:java/lang/String
@@ -498,6 +498,7 @@ com/gemstone/gemfire/internal/redis/RedisCommandType$11,false,dataType:com/gemst
 com/gemstone/gemfire/internal/redis/RedisCommandType$110,false,dataType:com/gemstone/gemfire/internal/redis/RedisDataType,executor:com/gemstone/gemfire/internal/redis/Executor
 com/gemstone/gemfire/internal/redis/RedisCommandType$111,false,dataType:com/gemstone/gemfire/internal/redis/RedisDataType,executor:com/gemstone/gemfire/internal/redis/Executor
 com/gemstone/gemfire/internal/redis/RedisCommandType$112,false,dataType:com/gemstone/gemfire/internal/redis/RedisDataType,executor:com/gemstone/gemfire/internal/redis/Executor
+com/gemstone/gemfire/internal/redis/RedisCommandType$113,false,dataType:com/gemstone/gemfire/internal/redis/RedisDataType,executor:com/gemstone/gemfire/internal/redis/Executor
 com/gemstone/gemfire/internal/redis/RedisCommandType$12,false,dataType:com/gemstone/gemfire/internal/redis/RedisDataType,executor:com/gemstone/gemfire/internal/redis/Executor
 com/gemstone/gemfire/internal/redis/RedisCommandType$13,false,dataType:com/gemstone/gemfire/internal/redis/RedisDataType,executor:com/gemstone/gemfire/internal/redis/Executor
 com/gemstone/gemfire/internal/redis/RedisCommandType$14,false,dataType:com/gemstone/gemfire/internal/redis/RedisDataType,executor:com/gemstone/gemfire/internal/redis/Executor
@@ -594,7 +595,6 @@ com/gemstone/gemfire/internal/redis/RedisCommandType$96,false,dataType:com/gemst
 com/gemstone/gemfire/internal/redis/RedisCommandType$97,false,dataType:com/gemstone/gemfire/internal/redis/RedisDataType,executor:com/gemstone/gemfire/internal/redis/Executor
 com/gemstone/gemfire/internal/redis/RedisCommandType$98,false,dataType:com/gemstone/gemfire/internal/redis/RedisDataType,executor:com/gemstone/gemfire/internal/redis/Executor
 com/gemstone/gemfire/internal/redis/RedisCommandType$99,false,dataType:com/gemstone/gemfire/internal/redis/RedisDataType,executor:com/gemstone/gemfire/internal/redis/Executor
-com/gemstone/gemfire/internal/redis/RedisCommandType$113,false,dataType:com/gemstone/gemfire/internal/redis/RedisDataType,executor:com/gemstone/gemfire/internal/redis/Executor
 com/gemstone/gemfire/internal/redis/RedisDataType,false
 com/gemstone/gemfire/internal/redis/RedisDataType$1,false
 com/gemstone/gemfire/internal/redis/RedisDataType$2,false
@@ -632,17 +632,6 @@ com/gemstone/gemfire/internal/tcp/ImmutableByteBufferInputStream,false
 com/gemstone/gemfire/internal/tcp/MemberShunnedException,true,-8453126202477831557,member:com/gemstone/gemfire/internal/tcp/Stub
 com/gemstone/gemfire/internal/tcp/ReenteredConnectException,false
 com/gemstone/gemfire/internal/tcp/VersionedByteBufferInputStream,false,version:com/gemstone/gemfire/internal/Version
-com/gemstone/gemfire/internal/tools/gfsh/aggregator/AggregatorPartitionFunction,true,1
-com/gemstone/gemfire/internal/tools/gfsh/app/aggregator/AggregatorException,true,1,functionExceptions:java/lang/Throwable[]
-com/gemstone/gemfire/internal/tools/gfsh/app/cache/data/InvalidTypeException,false
-com/gemstone/gemfire/internal/tools/gfsh/app/cache/data/ListMap$ListEntry,false,key:java/lang/Object,value:java/lang/Object
-com/gemstone/gemfire/internal/tools/gfsh/app/cache/data/ListMap$ListSet,false,entryList:java/util/ArrayList
-com/gemstone/gemfire/internal/tools/gfsh/app/cache/index/LookupServiceException,true,1
-com/gemstone/gemfire/internal/tools/gfsh/app/command/CommandException,false
-com/gemstone/gemfire/internal/tools/gfsh/app/commands/index$TASK,false
-com/gemstone/gemfire/internal/tools/gfsh/app/function/command/index$DeleteType,false
-com/gemstone/gemfire/internal/tools/gfsh/app/pogo/InvalidKeyException,true,1
-com/gemstone/gemfire/internal/tools/gfsh/app/util/DBUtilException,true,1,errorCode:int
 com/gemstone/gemfire/internal/util/Breadcrumbs$CrumbType,false
 com/gemstone/gemfire/internal/util/SingletonValue$ValueState,false
 com/gemstone/gemfire/internal/util/SunAPINotFoundException,false
@@ -716,8 +705,6 @@ com/gemstone/gemfire/management/internal/cli/exceptions/CliCommandOptionValueMis
 com/gemstone/gemfire/management/internal/cli/exceptions/CliException,false
 com/gemstone/gemfire/management/internal/cli/exceptions/CreateSubregionException,true,4387344870743824916
 com/gemstone/gemfire/management/internal/cli/exceptions/IndexNotFoundException,true,1,indexName:java/lang/String,message:java/lang/String
-com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunction,true,1
-com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunction$AlterHDFSStoreAttributes,true,1,batchInterval:java/lang/Integer,batchSize:java/lang/Integer,fileRolloverInterval:java/lang/Integer,hdfsUniqueName:java/lang/String,majorCompact:java/lang/Boolean,majorCompactionInterval:java/lang/Integer,majorCompactionThreads:java/lang/Integer,maxWriteonlyFileSize:java/lang/Integer,minorCompact:java/lang/Boolean,minorCompactionThreads:java/lang/Integer,purgeInterval:java/lang/Integer
 com/gemstone/gemfire/management/internal/cli/functions/AlterRuntimeConfigFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/ChangeLogLevelFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/CloseDurableClientFunction,true,1
@@ -727,7 +714,6 @@ com/gemstone/gemfire/management/internal/cli/functions/ContunuousQueryFunction$C
 com/gemstone/gemfire/management/internal/cli/functions/CreateAsyncEventQueueFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/CreateDefinedIndexesFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/CreateDiskStoreFunction,true,1
-com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/CreateIndexFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/DataCommandFunction,true,1,optimizeForWrite:boolean
 com/gemstone/gemfire/management/internal/cli/functions/DataCommandFunction$SelectExecStep,true,1
@@ -737,7 +723,6 @@ com/gemstone/gemfire/management/internal/cli/functions/DeployFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/DescribeDiskStoreFunction,false
 com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/DestroyDiskStoreFunction,true,1
-com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/DestroyIndexFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/ExportConfigFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/ExportDataFunction,true,1
@@ -763,8 +748,6 @@ com/gemstone/gemfire/management/internal/cli/functions/ListDeployedFunction,true
 com/gemstone/gemfire/management/internal/cli/functions/ListDiskStoresFunction,false
 com/gemstone/gemfire/management/internal/cli/functions/ListDurableCqNamesFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/ListFunctionFunction,true,1
-com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunction,true,1
-com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunction$HdfsStoreDetails,true,1,memberId:java/lang/String,memberName:java/lang/String,storeName:java/lang/String
 com/gemstone/gemfire/management/internal/cli/functions/ListIndexFunction,false
 com/gemstone/gemfire/management/internal/cli/functions/LoadSharedConfigurationFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/LogFileFunction,true,1
@@ -777,7 +760,7 @@ com/gemstone/gemfire/management/internal/cli/functions/RebalanceFunction,true,1
 com/gemstone/gemfire/management/internal/cli/functions/RegionAlterFunction,true,-4846425364943216425
 com/gemstone/gemfire/management/internal/cli/functions/RegionCreateFunction,true,8746830191680509335
 com/gemstone/gemfire/management/internal/cli/functions/RegionDestroyFunction,true,9172773671865750685
-com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs,true,-5158224572470173267,asyncEventQueueIds:java/util/Set,cacheListeners:java/util/Set,cacheLoader:java/lang/String,cacheWriter:java/lang/String,cloningEnabled:java/lang/Boolean,compressor:java/lang/String,concurrencyChecksEnabled:java/lang/Boolean,concurrencyLevel:java/lang/Integer,diskStore:java/lang/String,diskSynchronous:java/lang/Boolean,enableAsyncConflation:java/lang/Boolean,enableSubscriptionConflation:java/lang/Boolean,entryExpirationIdleTime:com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs$ExpirationAttrs,entryExpirationTTL:com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs$ExpirationAttrs,evictionMax:java/lang/Integer,gatewaySenderIds:java/util/Set,hdfsStoreName:java/lang/String,hdfsWriteOnly:java/lang/Boolean,isSetCloningEnabled:boolean,isSetCompressor:boolean,isSetConcurrencyChecksEnabled:boolean,isSetConcurrencyLevel:boolean,isSetDiskSynchronous:bo
 olean,isSetEnableAsyncConflation:boolean,isSetEnableSubscriptionConflation:boolean,isSetHdfsWriteOnly:java/lang/Boolean,isSetOffHeap:boolean,isSetStatisticsEnabled:boolean,keyConstraint:java/lang/String,offHeap:java/lang/Boolean,partitionArgs:com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs$PartitionArgs,regionAttributes:com/gemstone/gemfire/cache/RegionAttributes,regionExpirationIdleTime:com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs$ExpirationAttrs,regionExpirationTTL:com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs$ExpirationAttrs,regionPath:java/lang/String,regionShortcut:com/gemstone/gemfire/cache/RegionShortcut,skipIfExists:java/lang/Boolean,statisticsEnabled:java/lang/Boolean,useAttributesFrom:java/lang/String,valueConstraint:java/lang/String
+com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs,true,-5158224572470173267,asyncEventQueueIds:java/util/Set,cacheListeners:java/util/Set,cacheLoader:java/lang/String,cacheWriter:java/lang/String,cloningEnabled:java/lang/Boolean,compressor:java/lang/String,concurrencyChecksEnabled:java/lang/Boolean,concurrencyLevel:java/lang/Integer,diskStore:java/lang/String,diskSynchronous:java/lang/Boolean,enableAsyncConflation:java/lang/Boolean,enableSubscriptionConflation:java/lang/Boolean,entryExpirationIdleTime:com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs$ExpirationAttrs,entryExpirationTTL:com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs$ExpirationAttrs,evictionMax:java/lang/Integer,gatewaySenderIds:java/util/Set,isSetCloningEnabled:boolean,isSetCompressor:boolean,isSetConcurrencyChecksEnabled:boolean,isSetConcurrencyLevel:boolean,isSetDiskSynchronous:boolean,isSetEnableAsyncConflation:boolean,isSetEnableSubscriptio
 nConflation:boolean,isSetOffHeap:boolean,isSetStatisticsEnabled:boolean,keyConstraint:java/lang/String,offHeap:java/lang/Boolean,partitionArgs:com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs$PartitionArgs,regionAttributes:com/gemstone/gemfire/cache/RegionAttributes,regionExpirationIdleTime:com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs$ExpirationAttrs,regionExpirationTTL:com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs$ExpirationAttrs,regionPath:java/lang/String,regionShortcut:com/gemstone/gemfire/cache/RegionShortcut,skipIfExists:java/lang/Boolean,statisticsEnabled:java/lang/Boolean,useAttributesFrom:java/lang/String,valueConstraint:java/lang/String
 com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs$ExpirationAttrs,true,1474255033398008062,action:com/gemstone/gemfire/cache/ExpirationAction,time:java/lang/Integer,type:com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs$ExpirationAttrs$ExpirationFor
 com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs$ExpirationAttrs$ExpirationFor,false
 com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs$PartitionArgs,true,5907052187323280919,hasPartitionAttributes:boolean,isSetPRLocalMaxMemory:boolean,isSetPRRecoveryDelay:boolean,isSetPRRedundantCopies:boolean,isSetPRStartupRecoveryDelay:boolean,isSetPRTotalMaxMemory:boolean,isSetPRTotalNumBuckets:boolean,prColocatedWith:java/lang/String,prLocalMaxMemory:int,prRecoveryDelay:long,prRedundantCopies:int,prStartupRecoveryDelay:long,prTotalMaxMemory:long,prTotalNumBuckets:int,userSpecifiedPartitionAttributes:java/util/Set
@@ -843,10 +826,6 @@ com/gemstone/gemfire/security/AuthenticationFailedException,true,-82028664722790
 com/gemstone/gemfire/security/AuthenticationRequiredException,true,4675976651103154919
 com/gemstone/gemfire/security/GemFireSecurityException,true,3814254578203076926
 com/gemstone/gemfire/security/NotAuthorizedException,true,419215768216387745,principal:java/security/Principal
-com/gemstone/java/util/concurrent/SynchronousQueueNoSpin,true,-3223113410248163686,qlock:java/util/concurrent/locks/ReentrantLock,waitingConsumers:com/gemstone/java/util/concurrent/SynchronousQueueNoSpin$WaitQueue,waitingProducers:com/gemstone/java/util/concurrent/SynchronousQueueNoSpin$WaitQueue
-com/gemstone/java/util/concurrent/SynchronousQueueNoSpin$FifoWaitQueue,true,-3623113410248163686
-com/gemstone/java/util/concurrent/SynchronousQueueNoSpin$LifoWaitQueue,true,-3633113410248163686
-com/gemstone/java/util/concurrent/SynchronousQueueNoSpin$WaitQueue,true,423369940180943459
 com/gemstone/org/apache/logging/log4j/message/GemFireParameterizedMessage,true,-665975803997290697,messagePattern:java/lang/String,stringArgs:java/lang/String[]
 com/gemstone/org/apache/logging/log4j/message/GemFireParameterizedMessageFactory,true,1
 com/gemstone/org/jgroups/ChannelClosedException,true,3183749334840801913


[19/50] [abbrv] incubator-geode git commit: GEODE-429: Remove HDFS persistence DataPolicy

Posted by ds...@apache.org.
GEODE-429: Remove HDFS persistence DataPolicy


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/1b4fd2fe
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/1b4fd2fe
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/1b4fd2fe

Branch: refs/heads/develop
Commit: 1b4fd2fe872af1520027b8e0a84ffe84b9613f27
Parents: 12318e9
Author: Ashvin Agrawal <as...@apache.org>
Authored: Mon Oct 19 14:49:31 2015 -0700
Committer: Ashvin Agrawal <as...@apache.org>
Committed: Wed Oct 21 08:55:23 2015 -0700

----------------------------------------------------------------------
 .../com/gemstone/gemfire/cache/DataPolicy.java  |   19 +-
 .../internal/cache/PartitionedRegionHelper.java |    2 -
 .../cache/xmlcache/CacheXmlGenerator.java       |    4 -
 .../internal/cache/xmlcache/CacheXmlParser.java |    6 -
 .../ColocatedRegionWithHDFSDUnitTest.java       |    2 +-
 .../hdfs/internal/RegionRecoveryDUnitTest.java  |  415 -----
 .../internal/RegionWithHDFSBasicDUnitTest.java  | 1594 ------------------
 .../RegionWithHDFSOffHeapBasicDUnitTest.java    |  114 --
 ...RegionWithHDFSPersistenceBasicDUnitTest.java |   77 -
 .../HDFSQueueRegionOperationsJUnitTest.java     |   33 -
 ...FSQueueRegionOperationsOffHeapJUnitTest.java |   54 -
 .../cache/HDFSRegionOperationsJUnitTest.java    |  542 ------
 .../HDFSRegionOperationsOffHeapJUnitTest.java   |   78 -
 13 files changed, 5 insertions(+), 2935 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java
index 4ffeaba..9223aa4 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java
@@ -88,18 +88,6 @@ public class DataPolicy implements java.io.Serializable {
    */
   public static final DataPolicy PERSISTENT_PARTITION = new DataPolicy(6, "PERSISTENT_PARTITION");
   
-  /**
-   * In addition to <code>PARTITION</code> also causes data to be stored to
-   * HDFS. The region initialization may use the data stored on HDFS.
-   */
-  public static final DataPolicy HDFS_PARTITION = new DataPolicy(7, "HDFS_PARTITION");
-  
-  /**
-   * In addition to <code>HDFS_PARTITION</code> also causes data to be stored on local
-   * disk. The data can be evicted from the local disk and still be read
-   * from HDFS.
-   */
-  public static final DataPolicy HDFS_PERSISTENT_PARTITION = new DataPolicy(10, "HDFS_PERSISTENT_PARTITION");
    /**
    * The data policy used by default; it is {@link #NORMAL}.
    */
@@ -169,7 +157,7 @@ public class DataPolicy implements java.io.Serializable {
    * @since 6.5
    */
   public boolean withPersistence() {
-    return this == PERSISTENT_PARTITION || this == PERSISTENT_REPLICATE || this == HDFS_PERSISTENT_PARTITION;
+    return this == PERSISTENT_PARTITION || this == PERSISTENT_REPLICATE;
   }
 
   /** Return whether this policy does partitioning.
@@ -179,7 +167,7 @@ public class DataPolicy implements java.io.Serializable {
    * @since 6.5
    */
   public boolean withPartitioning() {
-    return this == PARTITION || this == PERSISTENT_PARTITION || this == HDFS_PARTITION || this==HDFS_PERSISTENT_PARTITION;
+    return this == PARTITION || this == PERSISTENT_PARTITION;
   }
 
   /** Return whether this policy does preloaded.
@@ -254,7 +242,8 @@ public class DataPolicy implements java.io.Serializable {
    * @see #HDFS_PARTITION
    */
   public boolean withHDFS() {
-	  return this == HDFS_PARTITION || this == HDFS_PERSISTENT_PARTITION;
+//    return this == HDFS_PARTITION || this == HDFS_PERSISTENT_PARTITION;
+	  return false;
   }
   
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java
index 965f96c..10dc256 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java
@@ -115,8 +115,6 @@ public class PartitionedRegionHelper
     Set policies = new HashSet();
     policies.add(DEFAULT_DATA_POLICY);
     policies.add(DataPolicy.PERSISTENT_PARTITION);
-    policies.add(DataPolicy.HDFS_PARTITION);
-    policies.add(DataPolicy.HDFS_PERSISTENT_PARTITION);
 //    policies.add(DataPolicy.NORMAL);
     ALLOWED_DATA_POLICIES = Collections.unmodifiableSet(policies);
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlGenerator.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlGenerator.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlGenerator.java
index ee4e0ae..3b587b3 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlGenerator.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlGenerator.java
@@ -1904,10 +1904,6 @@ public class CacheXmlGenerator extends CacheXml implements XMLReader {
           dpString = PERSISTENT_REPLICATE_DP;
         } else if (dp == DataPolicy.PERSISTENT_PARTITION) {
           dpString = PERSISTENT_PARTITION_DP;
-        } else if (dp == DataPolicy.HDFS_PARTITION) {
-          dpString = HDFS_PARTITION_DP;
-        } else if (dp == DataPolicy.HDFS_PERSISTENT_PARTITION) {
-          dpString = HDFS_PERSISTENT_PARTITION_DP;
         } else if (dp.isPartition()) {
           if (this.version.compareTo(CacheXmlVersion.VERSION_5_1) >= 0) {
             dpString = PARTITION_DP;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
index f0b3612..2e77d3c 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
@@ -1261,12 +1261,6 @@ public class CacheXmlParser extends CacheXml implements ContentHandler {
       else if (dp.equals(PERSISTENT_PARTITION_DP)) {
         attrs.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
       }
-      else if (dp.equals(HDFS_PARTITION_DP)) {
-        attrs.setDataPolicy(DataPolicy.HDFS_PARTITION);
-      }
-      else if (dp.equals(HDFS_PERSISTENT_PARTITION_DP)) {
-        attrs.setDataPolicy(DataPolicy.HDFS_PERSISTENT_PARTITION);
-      }
       else {
         throw new InternalGemFireException(LocalizedStrings.CacheXmlParser_UNKNOWN_DATA_POLICY_0.toLocalizedString(dp));
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java
index 3b0be6b..44206dc 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java
@@ -57,7 +57,7 @@ public class ColocatedRegionWithHDFSDUnitTest extends RegionWithHDFSTestBase {
         hsf.create(uniqueName);
 
         AttributesFactory af = new AttributesFactory();
-        af.setDataPolicy(DataPolicy.HDFS_PARTITION);
+        af.setDataPolicy(DataPolicy.PARTITION);
         PartitionAttributesFactory paf = new PartitionAttributesFactory();
         paf.setTotalNumBuckets(totalnumOfBuckets);
         paf.setRedundantCopies(1);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionRecoveryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionRecoveryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionRecoveryDUnitTest.java
deleted file mode 100644
index 61ff18d..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionRecoveryDUnitTest.java
+++ /dev/null
@@ -1,415 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.File;
-import java.io.IOException;
-
-import com.gemstone.gemfire.cache.AttributesFactory;
-import com.gemstone.gemfire.cache.DataPolicy;
-import com.gemstone.gemfire.cache.EvictionAction;
-import com.gemstone.gemfire.cache.EvictionAttributes;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.cache30.CacheTestCase;
-import com.gemstone.gemfire.internal.FileUtil;
-
-import dunit.AsyncInvocation;
-import dunit.Host;
-import dunit.SerializableCallable;
-import dunit.VM;
-
-/**
- * A class for testing the recovery after restart for GemFire cluster that has
- * HDFS regions
- * 
- * @author Hemant Bhanawat
- */
-@SuppressWarnings({ "serial", "deprecation", "rawtypes" })
-public class RegionRecoveryDUnitTest extends CacheTestCase {
-  public RegionRecoveryDUnitTest(String name) {
-    super(name);
-  }
-
-  private static String homeDir = null;
-
-  public void tearDown2() throws Exception {
-    for (int h = 0; h < Host.getHostCount(); h++) {
-      Host host = Host.getHost(h);
-      SerializableCallable cleanUp = cleanUpStores();
-      for (int v = 0; v < host.getVMCount(); v++) {
-        VM vm = host.getVM(v);
-        vm.invoke(cleanUp);
-      }
-    }
-    super.tearDown2();
-  }
-
-  public SerializableCallable cleanUpStores() throws Exception {
-    SerializableCallable cleanUp = new SerializableCallable() {
-      public Object call() throws Exception {
-        if (homeDir != null) {
-          // Each VM will try to delete the same directory. But that's okay as
-          // the subsequent invocations will be no-ops.
-          FileUtil.delete(new File(homeDir));
-          homeDir = null;
-        }
-        return 0;
-      }
-    };
-    return cleanUp;
-  }
-
-  /**
-   * Tests a basic restart of the system. Events if in HDFS should be read back.
-   * The async queue is not persisted so we wait until async queue persists the
-   * items to HDFS.
-   * 
-   * @throws Exception
-   */
-  public void testBasicRestart() throws Exception {
-    disconnectFromDS();
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    VM vm2 = host.getVM(2);
-    VM vm3 = host.getVM(3);
-
-    // Going two level up to avoid home directories getting created in
-    // VM-specific directory. This avoids failures in those tests where
-    // datastores are restarted and bucket ownership changes between VMs.
-    homeDir = "../../testBasicRestart";
-    String uniqueName = "testBasicRestart";
-
-    createServerRegion(vm0, 11, 1, 500, 500, homeDir, uniqueName);
-    createServerRegion(vm1, 11, 1, 500, 500, homeDir, uniqueName);
-    createServerRegion(vm2, 11, 1, 500, 500, homeDir, uniqueName);
-    createServerRegion(vm3, 11, 1, 500, 500, homeDir, uniqueName);
-
-    doPuts(vm0, uniqueName, 1, 50);
-    doPuts(vm1, uniqueName, 40, 100);
-    doPuts(vm2, uniqueName, 40, 100);
-    doPuts(vm3, uniqueName, 90, 150);
-
-    cacheClose(vm0, true);
-    cacheClose(vm1, true);
-    cacheClose(vm2, true);
-    cacheClose(vm3, true);
-
-    createServerRegion(vm0, 11, 1, 500, 500, homeDir, uniqueName);
-    createServerRegion(vm1, 11, 1, 500, 500, homeDir, uniqueName);
-    createServerRegion(vm2, 11, 1, 500, 500, homeDir, uniqueName);
-    createServerRegion(vm3, 11, 1, 500, 500, homeDir, uniqueName);
-
-    verifyGetsForValue(vm0, uniqueName, 1, 50, false);
-    verifyGetsForValue(vm1, uniqueName, 40, 100, false);
-    verifyGetsForValue(vm2, uniqueName, 40, 100, false);
-    verifyGetsForValue(vm3, uniqueName, 90, 150, false);
-
-    cacheClose(vm0, false);
-    cacheClose(vm1, false);
-    cacheClose(vm2, false);
-    cacheClose(vm3, false);
-
-    disconnectFromDS();
-
-  }
-
-  /**
-   * Servers are stopped and restarted. Disabled due to bug 48067.
-   */
-  public void testPersistedAsyncQueue_Restart() throws Exception {
-    disconnectFromDS();
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    VM vm2 = host.getVM(2);
-    VM vm3 = host.getVM(3);
-
-    // Going two level up to avoid home directories getting created in
-    // VM-specific directory. This avoids failures in those tests where
-    // datastores are restarted and bucket ownership changes between VMs.
-    homeDir = "../../testPersistedAsyncQueue_Restart";
-    String uniqueName = "testPersistedAsyncQueue_Restart";
-
-    // create cache and region
-    createPersistedServerRegion(vm0, 11, 1, 2000, 5, homeDir, uniqueName);
-    createPersistedServerRegion(vm1, 11, 1, 2000, 5, homeDir, uniqueName);
-    createPersistedServerRegion(vm2, 11, 1, 2000, 5, homeDir, uniqueName);
-    createPersistedServerRegion(vm3, 11, 1, 2000, 5, homeDir, uniqueName);
-
-    // do some puts
-    AsyncInvocation a0 = doAsyncPuts(vm0, uniqueName, 1, 50);
-    AsyncInvocation a1 = doAsyncPuts(vm1, uniqueName, 40, 100);
-    AsyncInvocation a2 = doAsyncPuts(vm2, uniqueName, 40, 100);
-    AsyncInvocation a3 = doAsyncPuts(vm3, uniqueName, 90, 150);
-
-    a3.join();
-    a2.join();
-    a1.join();
-    a0.join();
-
-    // close the cache
-    cacheClose(vm0, true);
-    cacheClose(vm1, true);
-    cacheClose(vm2, true);
-    cacheClose(vm3, true);
-
-    // recreate the cache and regions
-    a3 = createAsyncPersistedServerRegion(vm3, 11, 1, 2000, 5, homeDir, uniqueName);
-    a2 = createAsyncPersistedServerRegion(vm2, 11, 1, 2000, 5, homeDir, uniqueName);
-    a1 = createAsyncPersistedServerRegion(vm1, 11, 1, 2000, 5, homeDir, uniqueName);
-    a0 = createAsyncPersistedServerRegion(vm0, 11, 1, 2000, 5, homeDir, uniqueName);
-
-    a3.join();
-    a2.join();
-    a1.join();
-    a0.join();
-
-    // these gets should probably fetch the data from async queue
-    verifyGetsForValue(vm0, uniqueName, 1, 50, false);
-    verifyGetsForValue(vm1, uniqueName, 40, 100, false);
-    verifyGetsForValue(vm2, uniqueName, 40, 100, false);
-    verifyGetsForValue(vm3, uniqueName, 90, 150, false);
-
-    // these gets wait for sometime before fetching the data. this will ensure
-    // that the reads are done from HDFS
-    verifyGetsForValue(vm0, uniqueName, 1, 50, true);
-    verifyGetsForValue(vm1, uniqueName, 40, 100, true);
-    verifyGetsForValue(vm2, uniqueName, 40, 100, true);
-    verifyGetsForValue(vm3, uniqueName, 90, 150, true);
-
-    cacheClose(vm0, false);
-    cacheClose(vm1, false);
-    cacheClose(vm2, false);
-    cacheClose(vm3, false);
-
-    disconnectFromDS();
-  }
-
-  /**
-   * Stops a single server. A different node becomes primary for the buckets on
-   * the stopped node. Everything should work fine. Disabled due to bug 48067
-   * 
-   */
-  public void testPersistedAsyncQueue_ServerRestart() throws Exception {
-    disconnectFromDS();
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    VM vm2 = host.getVM(2);
-    VM vm3 = host.getVM(3);
-
-    // Going two level up to avoid home directories getting created in
-    // VM-specific directory. This avoids failures in those tests where
-    // datastores are restarted and bucket ownership changes between VMs.
-    homeDir = "../../testPAQ_ServerRestart";
-    String uniqueName = "testPAQ_ServerRestart";
-
-    createPersistedServerRegion(vm0, 11, 1, 2000, 5, homeDir, uniqueName);
-    createPersistedServerRegion(vm1, 11, 1, 2000, 5, homeDir, uniqueName);
-    createPersistedServerRegion(vm2, 11, 1, 2000, 5, homeDir, uniqueName);
-    createPersistedServerRegion(vm3, 11, 1, 2000, 5, homeDir, uniqueName);
-
-    AsyncInvocation a0 = doAsyncPuts(vm0, uniqueName, 1, 50);
-    AsyncInvocation a1 = doAsyncPuts(vm1, uniqueName, 50, 75);
-    AsyncInvocation a2 = doAsyncPuts(vm2, uniqueName, 75, 100);
-    AsyncInvocation a3 = doAsyncPuts(vm3, uniqueName, 100, 150);
-
-    a3.join();
-    a2.join();
-    a1.join();
-    a0.join();
-
-    cacheClose(vm0, false);
-
-    // these gets should probably fetch the data from async queue
-    verifyGetsForValue(vm1, uniqueName, 1, 50, false);
-    verifyGetsForValue(vm2, uniqueName, 40, 100, false);
-    verifyGetsForValue(vm3, uniqueName, 70, 150, false);
-
-    // these gets wait for sometime before fetching the data. this will ensure
-    // that
-    // the reads are done from HDFS
-    verifyGetsForValue(vm2, uniqueName, 1, 100, true);
-    verifyGetsForValue(vm3, uniqueName, 40, 150, true);
-
-    cacheClose(vm1, false);
-    cacheClose(vm2, false);
-    cacheClose(vm3, false);
-
-    disconnectFromDS();
-  }
-
-  private int createPersistedServerRegion(final VM vm, final int totalnumOfBuckets,
-      final int batchSize, final int batchInterval, final int maximumEntries, 
-      final String folderPath, final String uniqueName) throws IOException {
-    
-    return (Integer) vm.invoke(new PersistedRegionCreation(vm, totalnumOfBuckets,
-      batchSize, batchInterval, maximumEntries, folderPath, uniqueName));
-  }
-  private AsyncInvocation createAsyncPersistedServerRegion(final VM vm, final int totalnumOfBuckets,
-      final int batchSize, final int batchInterval, final int maximumEntries, final String folderPath, 
-      final String uniqueName) throws IOException {
-    
-    return (AsyncInvocation) vm.invokeAsync(new PersistedRegionCreation(vm, totalnumOfBuckets,
-      batchSize, batchInterval, maximumEntries, folderPath, uniqueName));
-  }
-  
-  class PersistedRegionCreation extends SerializableCallable {
-    private VM vm;
-    private int totalnumOfBuckets;
-    private int batchSize;
-    private int maximumEntries;
-    private String folderPath;
-    private String uniqueName;
-    private int batchInterval;
-
-    PersistedRegionCreation(final VM vm, final int totalnumOfBuckets,
-        final int batchSize, final int batchInterval, final int maximumEntries,
-        final String folderPath, final String uniqueName) throws IOException {
-      this.vm = vm;
-      this.totalnumOfBuckets = totalnumOfBuckets;
-      this.batchSize = batchSize;
-      this.maximumEntries = maximumEntries;
-      this.folderPath = new File(folderPath).getCanonicalPath();
-      this.uniqueName = uniqueName;
-      this.batchInterval = batchInterval;
-    }
-
-    public Object call() throws Exception {
-
-      AttributesFactory af = new AttributesFactory();
-      af.setDataPolicy(DataPolicy.HDFS_PARTITION);
-      PartitionAttributesFactory paf = new PartitionAttributesFactory();
-      paf.setTotalNumBuckets(totalnumOfBuckets);
-      paf.setRedundantCopies(1);
-
-      af.setPartitionAttributes(paf.create());
-
-      HDFSStoreFactory hsf = getCache().createHDFSStoreFactory();
-      hsf.setHomeDir(folderPath);
-      homeDir = folderPath; // for clean-up in tearDown2()
-      hsf.setBatchSize(batchSize);
-      hsf.setBatchInterval(batchInterval);
-      hsf.setBufferPersistent(true);
-      hsf.setDiskStoreName(uniqueName + vm.getPid());
-
-      getCache().createDiskStoreFactory().create(uniqueName + vm.getPid());
-
-      af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY));
-      af.setHDFSStoreName(uniqueName);
-      af.setHDFSWriteOnly(false);
-
-      hsf.create(uniqueName);
-
-      createRootRegion(uniqueName, af.create());
-
-      return 0;
-    }
-  };
-
-  private int createServerRegion(final VM vm, final int totalnumOfBuckets,
-      final int batchSize, final int batchInterval, final int maximumEntries,
-      final String folderPath, final String uniqueName) {
-    SerializableCallable createRegion = new SerializableCallable() {
-      public Object call() throws Exception {
-        AttributesFactory af = new AttributesFactory();
-        af.setDataPolicy(DataPolicy.HDFS_PARTITION);
-        PartitionAttributesFactory paf = new PartitionAttributesFactory();
-        paf.setTotalNumBuckets(totalnumOfBuckets);
-        paf.setRedundantCopies(1);
-        af.setPartitionAttributes(paf.create());
-
-        HDFSStoreFactory hsf = getCache().createHDFSStoreFactory();
-        homeDir = new File(folderPath).getCanonicalPath();
-        hsf.setHomeDir(homeDir);
-        hsf.setBatchSize(batchSize);
-        hsf.setBatchInterval(batchInterval);
-        hsf.setBufferPersistent(false);
-        hsf.setMaxMemory(1);
-        hsf.create(uniqueName);
-        af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY));
-
-        af.setHDFSWriteOnly(false);
-        af.setHDFSStoreName(uniqueName);
-        createRootRegion(uniqueName, af.create());
-
-        return 0;
-      }
-    };
-
-    return (Integer) vm.invoke(createRegion);
-  }
-
-  private void cacheClose(VM vm, final boolean sleep) {
-    vm.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        if (sleep)
-          Thread.sleep(2000);
-        getCache().getLogger().info("Cache close in progress ");
-        getCache().close();
-        getCache().getDistributedSystem().disconnect();
-        getCache().getLogger().info("Cache closed");
-        return null;
-      }
-    });
-
-  }
-
-  private void doPuts(VM vm, final String regionName, final int start, final int end) throws Exception {
-    vm.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        Region r = getRootRegion(regionName);
-        getCache().getLogger().info("Putting entries ");
-        for (int i = start; i < end; i++) {
-          r.put("K" + i, "V" + i);
-        }
-        return null;
-      }
-
-    });
-  }
-
-  private AsyncInvocation doAsyncPuts(VM vm, final String regionName,
-      final int start, final int end) throws Exception {
-    return vm.invokeAsync(new SerializableCallable() {
-      public Object call() throws Exception {
-        Region r = getRootRegion(regionName);
-        getCache().getLogger().info("Putting entries ");
-        for (int i = start; i < end; i++) {
-          r.put("K" + i, "V" + i);
-        }
-        return null;
-      }
-
-    });
-  }
-
-  private void verifyGetsForValue(VM vm, final String regionName, final int start, final int end, final boolean sleep) throws Exception {
-    vm.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        if (sleep) {
-          Thread.sleep(2000);
-        }
-        getCache().getLogger().info("Getting entries ");
-        Region r = getRootRegion(regionName);
-        for (int i = start; i < end; i++) {
-          String k = "K" + i;
-          Object s = r.get(k);
-          String v = "V" + i;
-          assertTrue("The expected key " + v+ " didn't match the received value " + s, v.equals(s));
-        }
-        return null;
-      }
-
-    });
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSBasicDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSBasicDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSBasicDUnitTest.java
deleted file mode 100644
index 5a58dc5..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSBasicDUnitTest.java
+++ /dev/null
@@ -1,1594 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.File;
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.logging.log4j.Logger;
-
-import com.gemstone.gemfire.Delta;
-import com.gemstone.gemfire.InvalidDeltaException;
-import com.gemstone.gemfire.cache.AttributesFactory;
-import com.gemstone.gemfire.cache.DataPolicy;
-import com.gemstone.gemfire.cache.EvictionAction;
-import com.gemstone.gemfire.cache.EvictionAttributes;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
-import com.gemstone.gemfire.internal.cache.DistributedPutAllOperation;
-import com.gemstone.gemfire.internal.cache.EntryEventImpl;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
-import com.gemstone.gemfire.internal.cache.wan.parallel.ConcurrentParallelGatewaySenderQueue;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySender;
-import dunit.AsyncInvocation;
-import dunit.DistributedTestCase;
-import dunit.Host;
-import dunit.SerializableCallable;
-import dunit.SerializableRunnable;
-import dunit.VM;
-
-/**
- * A class for testing the basic HDFS functionality
- * 
- * @author Hemant Bhanawat
- */
-@SuppressWarnings({ "serial", "rawtypes", "deprecation", "unchecked", "unused" })
-public class RegionWithHDFSBasicDUnitTest extends RegionWithHDFSTestBase {
-
-  private static final Logger logger = LogService.getLogger();
-
-  private ExpectedException ee0;
-  private ExpectedException ee1; 
-
-  public RegionWithHDFSBasicDUnitTest(String name) {
-    super(name);
-  }
-
-  public void setUp() throws Exception {
-    super.setUp();
-    ee0 = DistributedTestCase.addExpectedException("com.gemstone.gemfire.cache.RegionDestroyedException");
-    ee1 = DistributedTestCase.addExpectedException("com.gemstone.gemfire.cache.RegionDestroyedException");
-  }
-
-  public void tearDown2() throws Exception {
-    ee0.remove();
-    ee1.remove();
-    super.tearDown2();
-  }
-
-  @Override
-  protected SerializableCallable getCreateRegionCallable(
-      final int totalnumOfBuckets, final int batchSizeMB,
-      final int maximumEntries, final String folderPath,
-      final String uniqueName, final int batchInterval,
-      final boolean queuePersistent, final boolean writeonly,
-      final long timeForRollover, final long maxFileSize) {
-    SerializableCallable createRegion = new SerializableCallable("Create HDFS region") {
-      public Object call() throws Exception {
-        AttributesFactory af = new AttributesFactory();
-        af.setDataPolicy(DataPolicy.HDFS_PARTITION);
-        PartitionAttributesFactory paf = new PartitionAttributesFactory();
-        paf.setTotalNumBuckets(totalnumOfBuckets);
-        paf.setRedundantCopies(1);
-
-        af.setHDFSStoreName(uniqueName);
-        af.setPartitionAttributes(paf.create());
-
-        HDFSStoreFactory hsf = getCache().createHDFSStoreFactory();
-        // Going two level up to avoid home directories getting created in
-        // VM-specific directory. This avoids failures in those tests where
-        // datastores are restarted and bucket ownership changes between VMs.
-        homeDir = new File(tmpDir + "/../../" + folderPath).getCanonicalPath();
-        logger.info("Setting homeDir to {}", homeDir);
-        hsf.setHomeDir(homeDir);
-        hsf.setBatchSize(batchSizeMB);
-        hsf.setBufferPersistent(queuePersistent);
-        hsf.setMaxMemory(3);
-        hsf.setBatchInterval(batchInterval);
-        if (timeForRollover != -1) {
-          hsf.setWriteOnlyFileRolloverInterval((int) timeForRollover);
-          System.setProperty("gemfire.HDFSRegionDirector.FILE_ROLLOVER_TASK_INTERVAL_SECONDS", "1");
-        }
-        if (maxFileSize != -1) {
-          hsf.setWriteOnlyFileRolloverSize((int) maxFileSize);
-        }
-        hsf.create(uniqueName);
-
-        af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY));
-
-        af.setHDFSWriteOnly(writeonly);
-        Region r = createRootRegion(uniqueName, af.create());
-        ((LocalRegion) r).setIsTest();
-
-        return 0;
-      }
-    };
-    return createRegion;
-  }
-
-  @Override
-  protected void doPuts(final String uniqueName, int start, int end) {
-    Region r = getRootRegion(uniqueName);
-    for (int i = start; i < end; i++) {
-      r.put("K" + i, "V" + i);
-    }
-  }
-
-  @Override
-  protected void doPutAll(final String uniqueName, Map map) {
-    Region r = getRootRegion(uniqueName);
-    r.putAll(map);
-  }
-
-  @Override
-  protected void doDestroys(final String uniqueName, int start, int end) {
-    Region r = getRootRegion(uniqueName);
-    for (int i = start; i < end; i++) {
-      r.destroy("K" + i);
-    }
-  }
-
-  @Override
-  protected void checkWithGet(String uniqueName, int start, int end, boolean expectValue) {
-    Region r = getRootRegion(uniqueName);
-    for (int i = start; i < end; i++) {
-      String expected = expectValue ? "V" + i : null;
-      assertEquals("Mismatch on key " + i, expected, r.get("K" + i));
-    }
-  }
-
-  @Override
-  protected void checkWithGetAll(String uniqueName, ArrayList arrayl) {
-    Region r = getRootRegion(uniqueName);
-    Map map = r.getAll(arrayl);
-    logger.info("Read entries {}", map.size());
-    for (Object e : map.keySet()) {
-      String v = e.toString().replaceFirst("K", "V");
-      assertTrue( "Reading entries failed for key " + e + " where value = " + map.get(e), v.equals(map.get(e)));
-    }
-  }
-
-  /**
-   * Tests if gets go to primary even if the value resides on secondary.
-   */
-  public void testValueFetchedFromLocal() {
-    disconnectFromDS();
-
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    String homeDir = "./testValueFetchedFromLocal";
-
-    createServerRegion(vm0, 7, 1, 50, homeDir, "testValueFetchedFromLocal", 1000);
-    createServerRegion(vm1, 7, 1, 50, homeDir, "testValueFetchedFromLocal", 1000);
-
-    vm0.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        Region r = getRootRegion("testValueFetchedFromLocal");
-        for (int i = 0; i < 25; i++) {
-          r.put("K" + i, "V" + i);
-        }
-        return null;
-      }
-    });
-    vm1.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        Region r = getRootRegion("testValueFetchedFromLocal");
-        for (int i = 0; i < 25; i++) {
-          String s = null;
-          String k = "K" + i;
-          s = (String) r.get(k);
-          String v = "V" + i;
-          assertTrue( "The expected key " + v+ " didn't match the received value " + s, v.equals(s));
-        }
-        // with only two members and 1 redundant copy, we will have all data locally, make sure that some
-        // get operations results in a remote get operation
-        assertTrue( "gets should always go to primary, ", ((LocalRegion)r).getCountNotFoundInLocal() != 0 );
-        return null;
-      }
-    });
-  
-    vm0.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        Region r = getRootRegion("testValueFetchedFromLocal");
-        assertTrue( "HDFS queue or HDFS should not have been accessed. They were accessed " + ((LocalRegion)r).getCountNotFoundInLocal()  + " times", 
-            ((LocalRegion)r).getCountNotFoundInLocal() == 0 );
-        return null;
-      }
-    });
-  }
-
-  public void testHDFSQueueSizeTest() {
-    disconnectFromDS();
-
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    String homeDir = "./testHDFSQueueSize";
-
-    createServerRegion(vm0, 1, 10, 50, homeDir, "testHDFSQueueSize", 100000);
-    createServerRegion(vm1, 1, 10, 50, homeDir, "testHDFSQueueSize", 100000);
-
-    vm0.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        Region r = getRootRegion("testHDFSQueueSize");
-        byte[] b = new byte[1024];
-        byte[] k = new byte[1];
-        for (int i = 0; i < 1; i++) {
-          r.put(k, b);
-        }
-        ConcurrentParallelGatewaySenderQueue hdfsqueue = (ConcurrentParallelGatewaySenderQueue)((AbstractGatewaySender)((PartitionedRegion)r).getHDFSEventQueue().getSender()).getQueue();
-        HDFSBucketRegionQueue hdfsBQ = (HDFSBucketRegionQueue)((PartitionedRegion)hdfsqueue.getRegion()).getDataStore().getLocalBucketById(0);
-        if (hdfsBQ.getBucketAdvisor().isPrimary()) {
-          assertTrue("size should not as expected on primary " + hdfsBQ.queueSizeInBytes.get(), hdfsBQ.queueSizeInBytes.get() > 1024 && hdfsBQ.queueSizeInBytes.get() < 1150);
-        } else {
-          assertTrue("size should be 0 on secondary", hdfsBQ.queueSizeInBytes.get()==0);
-        }
-        return null;
-
-      }
-    });
-    vm1.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        Region r = getRootRegion("testHDFSQueueSize");
-        ConcurrentParallelGatewaySenderQueue hdfsqueue = (ConcurrentParallelGatewaySenderQueue)((AbstractGatewaySender)((PartitionedRegion)r).getHDFSEventQueue().getSender()).getQueue();
-        HDFSBucketRegionQueue hdfsBQ = (HDFSBucketRegionQueue)((PartitionedRegion)hdfsqueue.getRegion()).getDataStore().getLocalBucketById(0);
-        if (hdfsBQ.getBucketAdvisor().isPrimary()) {
-          assertTrue("size should not as expected on primary " + hdfsBQ.queueSizeInBytes.get(), hdfsBQ.queueSizeInBytes.get() > 1024 && hdfsBQ.queueSizeInBytes.get() < 1150);
-        } else {
-          assertTrue("size should be 0 on secondary", hdfsBQ.queueSizeInBytes.get()==0);
-        }
-        return null;
-
-      }
-    });
-  }
-
-  /**
-   * Does put for write only HDFS store
-   */
-  public void testBasicPutsForWriteOnlyHDFSStore() {
-    disconnectFromDS();
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    String homeDir = "./testPutsForWriteOnlyHDFSStore";
-
-    createServerRegion(vm0, 7, 1, 20, homeDir, "testPutsForWriteOnlyHDFSStore",
-        100, true, false);
-    createServerRegion(vm1, 7, 1, 20, homeDir, "testPutsForWriteOnlyHDFSStore",
-        100, true, false);
-
-    // Do some puts
-    vm0.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        Region r = getRootRegion("testPutsForWriteOnlyHDFSStore");
-        for (int i = 0; i < 200; i++) {
-          r.put("K" + i, "V" + i);
-        }
-        return null;
-      }
-    });
-
-    vm1.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        Region r = getRootRegion("testPutsForWriteOnlyHDFSStore");
-
-        for (int i = 200; i < 400; i++) {
-          r.put("K" + i, "V" + i);
-        }
-
-        return null;
-      }
-    });
-
-  }
-
-  /**
-   * Does put for write only HDFS store
-   */
-  public void testDelta() {
-    disconnectFromDS();
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    String homeDir = "./testDelta";
-
-    // Expected from com.gemstone.gemfire.internal.cache.ServerPingMessage.send()
-    ExpectedException ee1 = DistributedTestCase.addExpectedException("java.lang.InterruptedException");
-    ExpectedException ee2 = DistributedTestCase.addExpectedException("java.lang.InterruptedException");
-    
-    createServerRegion(vm0, 7, 1, 20, homeDir, "testDelta", 100);
-    createServerRegion(vm1, 7, 1, 20, homeDir, "testDelta", 100);
-
-    // Do some puts
-    vm0.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        Region r = getRootRegion("testDelta");
-        for (int i = 0; i < 100; i++) {
-          r.put("K" + i, new CustomerDelta("V" + i, "address"));
-        }
-        for (int i = 0; i < 50; i++) {
-          CustomerDelta cd = new CustomerDelta("V" + i, "address");
-          cd.setAddress("updated address");
-          r.put("K" + i, cd);
-        }
-        return null;
-      }
-    });
-
-    vm1.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        Region r = getRootRegion("testDelta");
-
-        for (int i = 100; i < 200; i++) {
-          r.put("K" + i, new CustomerDelta("V" + i, "address"));
-        }
-        for (int i = 100; i < 150; i++) {
-          CustomerDelta cd = new CustomerDelta("V" + i, "address");
-          cd.setAddress("updated address");
-          r.put("K" + i, cd);
-        }
-
-        return null;
-      }
-    });
-    vm1.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        Region r = getRootRegion("testDelta");
-        for (int i = 0; i < 50; i++) {
-          CustomerDelta custDela =  new CustomerDelta ("V" + i, "updated address" );
-          String k = "K" + i;
-          CustomerDelta s = (CustomerDelta) r.get(k);
-
-          assertTrue( "The expected value " + custDela + " didn't match the received value " + s, custDela.equals(s));
-        }
-        for (int i = 50; i < 100; i++) {
-          CustomerDelta custDela = new CustomerDelta("V" + i, "address");
-          String k = "K" + i;
-          CustomerDelta s = (CustomerDelta) r.get(k);
-
-          assertTrue( "The expected value " + custDela + " didn't match the received value " + s, custDela.equals(s));
-        }
-        for (int i = 100; i < 150; i++) {
-          CustomerDelta custDela =  new CustomerDelta ("V" + i, "updated address" );
-          String k = "K" + i;
-          CustomerDelta s = (CustomerDelta) r.get(k);
-
-          assertTrue( "The expected value " + custDela + " didn't match the received value " + s, custDela.equals(s));
-        }
-        for (int i = 150; i < 200; i++) {
-          CustomerDelta custDela =  new CustomerDelta ("V" + i, "address" );
-          String k = "K" + i;
-          CustomerDelta s = (CustomerDelta) r.get(k);
-
-          assertTrue( "The expected value " + custDela + " didn't match the received value " + s, custDela.equals(s));
-        }
-        return null;
-      }
-    });
-    ee1.remove();
-    ee2.remove();
-
-  }
-
-  /**
-   * Puts byte arrays and fetches them back to ensure that serialization of byte
-   * arrays is proper
-   * 
-   */
-  public void testByteArrays() {
-    disconnectFromDS();
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    String homeDir = "./testByteArrays";
-
-    createServerRegion(vm0, 7, 1, 20, homeDir, "testByteArrays", 100);
-    createServerRegion(vm1, 7, 1, 20, homeDir, "testByteArrays", 100);
-
-    // Do some puts
-    vm0.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        Region r = getRootRegion("testByteArrays");
-        byte[] b1 = { 0x11, 0x44, 0x77 };
-        byte[] b2 = { 0x22, 0x55 };
-        byte[] b3 = { 0x33 };
-        for (int i = 0; i < 100; i++) {
-          int x = i % 3;
-          if (x == 0) {
-            r.put("K" + i, b1);
-          } else if (x == 1) {
-            r.put("K" + i, b2);
-          } else {
-            r.put("K" + i, b3);
-          }
-        }
-        return null;
-      }
-    });
-
-    vm1.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        Region r = getRootRegion("testByteArrays");
-
-        byte[] b1 = { 0x11, 0x44, 0x77 };
-        byte[] b2 = { 0x22, 0x55 };
-        byte[] b3 = { 0x33 };
-        for (int i = 100; i < 200; i++) {
-          int x = i % 3;
-          if (x == 0) {
-            r.put("K" + i, b1);
-          } else if (x == 1) {
-            r.put("K" + i, b2);
-          } else {
-            r.put("K" + i, b3);
-          }
-        }
-        return null;
-      }
-    });
-    vm1.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        Region r = getRootRegion("testByteArrays");
-        byte[] b1 = { 0x11, 0x44, 0x77 };
-        byte[] b2 = { 0x22, 0x55 };
-        byte[] b3 = { 0x33 };
-        for (int i = 0; i < 200; i++) {
-          int x = i % 3;
-          String k = "K" + i;
-          byte[] s = (byte[]) r.get(k);
-          if (x == 0) {
-            assertTrue( "The expected value didn't match the received value of byte array" , Arrays.equals(b1, s));
-          } else if (x == 1) {
-            assertTrue( "The expected value didn't match the received value of byte array" , Arrays.equals(b2, s));
-          } else {
-            assertTrue( "The expected value didn't match the received value of byte array" , Arrays.equals(b3, s));
-          }
-
-        }
-        return null;
-      }
-    });
-  }
-
-  private static class CustomerDelta implements Serializable, Delta {
-    private String name;
-    private String address;
-    private boolean nameChanged;
-    private boolean addressChanged;
-
-    public CustomerDelta(CustomerDelta o) {
-      this.address = o.address;
-      this.name = o.name;
-    }
-
-    public CustomerDelta(String name, String address) {
-      this.name = name;
-      this.address = address;
-    }
-
-    public void fromDelta(DataInput in) throws IOException,
-        InvalidDeltaException {
-      boolean nameC = in.readBoolean();
-      if (nameC) {
-        this.name = in.readUTF();
-      }
-      boolean addressC = in.readBoolean();
-      if (addressC) {
-        this.address = in.readUTF();
-      }
-    }
-
-    public boolean hasDelta() {
-      return nameChanged || addressChanged;
-    }
-
-    public void toDelta(DataOutput out) throws IOException {
-      out.writeBoolean(nameChanged);
-      if (this.nameChanged) {
-        out.writeUTF(name);
-      }
-      out.writeBoolean(addressChanged);
-      if (this.addressChanged) {
-        out.writeUTF(address);
-      }
-    }
-
-    public void setName(String name) {
-      this.nameChanged = true;
-      this.name = name;
-    }
-
-    public String getName() {
-      return name;
-    }
-
-    public void setAddress(String address) {
-      this.addressChanged = true;
-      this.address = address;
-    }
-
-    public String getAddress() {
-      return address;
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      if (!(obj instanceof CustomerDelta)) {
-        return false;
-      }
-      CustomerDelta other = (CustomerDelta) obj;
-      return this.name.equals(other.name) && this.address.equals(other.address);
-    }
-
-    @Override
-    public int hashCode() {
-      return this.address.hashCode() + this.name.hashCode();
-    }
-
-    @Override
-    public String toString() {
-      return "name=" + this.name + "address=" + address;
-    }
-  }
-
-  public void testClearRegionDataInQueue() throws Throwable {
-    doTestClearRegion(100000, false);
-
-  }
-
-  public void testClearRegionDataInHDFS() throws Throwable {
-    doTestClearRegion(1, true);
-  }
-
-  public void doTestClearRegion(int batchInterval, boolean waitForWriteToHDFS) throws Throwable {
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-
-    final int numEntries = 400;
-
-    String name = getName();
-    final String folderPath = "./" + name;
-    // Create some regions. Note that we want a large batch interval
-    // so that we will have some entries sitting in the queue when
-    // we do a clear.
-    final String uniqueName = name;
-    createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, batchInterval,
-        false, true);
-    createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, batchInterval,
-        false, true);
-
-    doPuts(vm0, uniqueName, numEntries);
-
-    // Make sure some files have been written to hdfs.
-    if (waitForWriteToHDFS) {
-      verifyDataInHDFS(vm0, uniqueName, true, true, waitForWriteToHDFS, numEntries);
-    }
-
-    // Do a clear
-    simulateClear(uniqueName, vm0, vm1);
-
-    validateEmpty(vm0, numEntries, uniqueName);
-    validateEmpty(vm1, numEntries, uniqueName);
-
-    // Double check that there is no data in hdfs now
-    verifyDataInHDFS(vm0, uniqueName, false, false, waitForWriteToHDFS, numEntries);
-    verifyDataInHDFS(vm1, uniqueName, false, false, waitForWriteToHDFS, numEntries);
-
-    closeCache(vm0);
-    closeCache(vm1);
-
-    AsyncInvocation async0 = createServerRegionAsync(vm0, 7, 31, 200, folderPath, 
-        uniqueName, 100000, false, true);
-    AsyncInvocation async1 = createServerRegionAsync(vm1, 7, 31, 200, folderPath, 
-        uniqueName, 100000, false, true);
-    async0.getResult();
-    async1.getResult();
-
-    validateEmpty(vm0, numEntries, uniqueName);
-    validateEmpty(vm1, numEntries, uniqueName);
-  }
-
-  private void simulateClear(final String name, VM... vms) throws Throwable {
-    simulateClearForTests(true);
-    try {
-
-      // Gemfire PRs don't support clear
-      // gemfirexd does a clear by taking gemfirexd ddl locks
-      // and then clearing each primary bucket on the primary.
-      // Simulate that by clearing all primaries on each vm.
-      // See GemFireContainer.clear
-
-      SerializableCallable clear = new SerializableCallable("clear") {
-        public Object call() throws Exception {
-          PartitionedRegion r = (PartitionedRegion) getRootRegion(name);
-
-          r.clearLocalPrimaries();
-
-          return null;
-        }
-      };
-
-      // Invoke the clears concurrently
-      AsyncInvocation[] async = new AsyncInvocation[vms.length];
-      for (int i = 0; i < vms.length; i++) {
-        async[i] = vms[i].invokeAsync(clear);
-      }
-
-      // Get the clear results.
-      for (int i = 0; i < async.length; i++) {
-        async[i].getResult();
-      }
-
-    } finally {
-      simulateClearForTests(false);
-    }
-  }
-
-  protected void simulateClearForTests(final boolean isGfxd) {
-    SerializableRunnable setGfxd = new SerializableRunnable() {
-      @Override
-      public void run() {
-        if (isGfxd) {
-          LocalRegion.simulateClearForTests(true);
-        } else {
-          LocalRegion.simulateClearForTests(false);
-        }
-      }
-    };
-    setGfxd.run();
-    invokeInEveryVM(setGfxd);
-  }
-
-  /**
-   * Test that we can locally destroy a member, without causing problems with
-   * the data in HDFS. This was disabled due to ticket 47793.
-   * 
-   * @throws InterruptedException
-   */
-  public void testLocalDestroy() throws InterruptedException {
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    int numEntries = 200;
-
-    final String folderPath = "./testLocalDestroy";
-    final String uniqueName = "testLocalDestroy";
-
-    createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
-    createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 1, false, true);
-
-    doPuts(vm0, uniqueName, numEntries);
-
-    // Make sure some files have been written to hdfs and wait for
-    // the queue to drain.
-    verifyDataInHDFS(vm0, uniqueName, true, true, true, numEntries);
-
-    validate(vm0, uniqueName, numEntries);
-
-    SerializableCallable localDestroy = new SerializableCallable("local destroy") {
-      public Object call() throws Exception {
-        Region r = getRootRegion(uniqueName);
-        r.localDestroyRegion();
-        return null;
-      }
-    };
-
-    vm0.invoke(localDestroy);
-
-    verifyNoQOrPR(vm0);
-
-    validate(vm1, uniqueName, numEntries);
-
-    vm1.invoke(localDestroy);
-
-    verifyNoQOrPR(vm1);
-
-    closeCache(vm0);
-    closeCache(vm1);
-
-    // Restart vm0 and see if the data is still available from HDFS
-    createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
-
-    validate(vm0, uniqueName, numEntries);
-  }
-
-  /**
-   * Test that doing a destroyRegion removes all data from HDFS.
-   * 
-   * @throws InterruptedException
-   */
-  public void testGlobalDestroyWithHDFSData() throws InterruptedException {
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-
-    final String folderPath = "./testGlobalDestroyWithHDFSData";
-    final String uniqueName = "testGlobalDestroyWithHDFSData";
-    int numEntries = 200;
-
-    createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
-    createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 1, false, true);
-
-    doPuts(vm0, uniqueName, numEntries);
-
-    // Make sure some files have been written to hdfs.
-    verifyDataInHDFS(vm0, uniqueName, true, true, false, numEntries);
-
-    SerializableCallable globalDestroy = new SerializableCallable("destroy") {
-      public Object call() throws Exception {
-        Region r = getRootRegion(uniqueName);
-        r.destroyRegion();
-        return null;
-      }
-    };
-
-    vm0.invoke(globalDestroy);
-
-    // make sure data is not in HDFS
-    verifyNoQOrPR(vm0);
-    verifyNoQOrPR(vm1);
-    verifyNoHDFSData(vm0, uniqueName);
-    verifyNoHDFSData(vm1, uniqueName);
-
-    closeCache(vm0);
-    closeCache(vm1);
-
-    // Restart vm0 and make sure it's still empty
-    createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
-    createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 1, false, true);
-
-    // make sure it's empty
-    validateEmpty(vm0, numEntries, uniqueName);
-    validateEmpty(vm1, numEntries, uniqueName);
-
-  }
-
-  /**
-   * Test that doing a destroyRegion removes all data from HDFS.
-   */
-  public void _testGlobalDestroyWithQueueData() {
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-
-    final String folderPath = "./testGlobalDestroyWithQueueData";
-    final String uniqueName = "testGlobalDestroyWithQueueData";
-    int numEntries = 200;
-
-    // set a large queue timeout so that data is still in the queue
-    createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 10000, false,
-        true);
-    createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 10000, false,
-        true);
-
-    doPuts(vm0, uniqueName, numEntries);
-
-    SerializableCallable globalDestroy = new SerializableCallable("destroy") {
-      public Object call() throws Exception {
-        Region r = getRootRegion(uniqueName);
-        r.destroyRegion();
-        return null;
-      }
-    };
-
-    vm0.invoke(globalDestroy);
-
-    // make sure data is not in HDFS
-    verifyNoQOrPR(vm0);
-    verifyNoQOrPR(vm1);
-    verifyNoHDFSData(vm0, uniqueName);
-    verifyNoHDFSData(vm1, uniqueName);
-
-    closeCache(vm0);
-    closeCache(vm1);
-
-    // Restart vm0 and make sure it's still empty
-    createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
-    createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 1, false, true);
-
-    // make sure it's empty
-    validateEmpty(vm0, numEntries, uniqueName);
-    validateEmpty(vm1, numEntries, uniqueName);
-
-  }
-
-  /**
-   * Make sure all async event queues and PRs a destroyed in a member
-   */
-  public void verifyNoQOrPR(VM vm) {
-    vm.invoke(new SerializableRunnable() {
-      @Override
-      public void run() {
-        GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
-        assertEquals(Collections.EMPTY_SET, cache.getAsyncEventQueues());
-        assertEquals(Collections.EMPTY_SET, cache.getPartitionedRegions());
-      }
-    });
-
-  }
-
-  /**
-   * Make sure all of the data for a region in HDFS is destroyed
-   */
-  public void verifyNoHDFSData(final VM vm, final String uniqueName) {
-    vm.invoke(new SerializableCallable() {
-      @Override
-      public Object call() throws IOException {
-        HDFSStoreImpl hdfsStore = (HDFSStoreImpl) ((GemFireCacheImpl)getCache()).findHDFSStore(uniqueName);
-        FileSystem fs = hdfsStore.getFileSystem();
-        Path path = new Path(hdfsStore.getHomeDir(), uniqueName);
-        if (fs.exists(path)) {
-          dumpFiles(vm, uniqueName);
-          fail("Found files in " + path);
-        }
-        return null;
-      }
-    });
-  }
-
-  protected AsyncInvocation doAsyncPuts(VM vm, final String regionName,
-      final int start, final int end, final String suffix) throws Exception {
-    return doAsyncPuts(vm, regionName, start, end, suffix, "");
-  }
-
-  protected AsyncInvocation doAsyncPuts(VM vm, final String regionName,
-      final int start, final int end, final String suffix, final String value)
-      throws Exception {
-    return vm.invokeAsync(new SerializableCallable("doAsyncPuts") {
-      public Object call() throws Exception {
-        Region r = getRootRegion(regionName);
-        String v = "V";
-        if (!value.equals("")) {
-          v = value;
-        }
-        logger.info("Putting entries ");
-        for (int i = start; i < end; i++) {
-          r.put("K" + i, v + i + suffix);
-        }
-        return null;
-      }
-
-    });
-  }
-
-  public void _testGlobalDestroyFromAccessor() {
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    VM vm2 = host.getVM(2);
-
-    final String folderPath = "./testGlobalDestroyFromAccessor";
-    final String uniqueName = "testGlobalDestroyFromAccessor";
-    int numEntries = 200;
-
-    createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
-    createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 1, false, true);
-    createServerAccessor(vm2, 7, 40, uniqueName);
-
-    doPuts(vm0, uniqueName, numEntries);
-
-    // Make sure some files have been written to hdfs.
-    verifyDataInHDFS(vm0, uniqueName, true, true, false, numEntries);
-
-    SerializableCallable globalDestroy = new SerializableCallable("destroy") {
-      public Object call() throws Exception {
-        Region r = getRootRegion(uniqueName);
-        r.destroyRegion();
-        return null;
-      }
-    };
-
-    // Destroy the region from an accessor
-    vm2.invoke(globalDestroy);
-
-    // make sure data is not in HDFS
-    verifyNoQOrPR(vm0);
-    verifyNoQOrPR(vm1);
-    verifyNoHDFSData(vm0, uniqueName);
-    verifyNoHDFSData(vm1, uniqueName);
-
-    closeCache(vm0);
-    closeCache(vm1);
-    closeCache(vm2);
-
-    // Restart vm0 and make sure it's still empty
-    createServerRegion(vm0, 7, 31, 40, folderPath, uniqueName, 1, false, true);
-    createServerRegion(vm1, 7, 31, 40, folderPath, uniqueName, 1, false, true);
-
-    // make sure it's empty
-    validateEmpty(vm0, numEntries, uniqueName);
-    validateEmpty(vm1, numEntries, uniqueName);
-  }
-
-  /**
-   * create a server with maxfilesize as 2 MB. Insert 4 entries of 1 MB each.
-   * There should be 2 files with 2 entries each.
-   * 
-   * @throws Throwable
-   */
-  public void testWOFileSizeParam() throws Throwable {
-    disconnectFromDS();
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-
-    String homeDir = "./testWOFileSizeParam";
-    final String uniqueName = getName();
-    String value = "V";
-    for (int i = 0; i < 20; i++) {
-      value += value;
-    }
-
-    createServerRegion(vm0, 1, 1,  500, homeDir, uniqueName, 5, true, false, 2000, 2);
-    createServerRegion(vm1, 1, 1,  500, homeDir, uniqueName, 5, true, false, 2000, 2);
-
-    AsyncInvocation a1 = doAsyncPuts(vm0, uniqueName, 1, 3, "vm0", value);
-    AsyncInvocation a2 = doAsyncPuts(vm1, uniqueName, 2, 4, "vm1", value);
-
-    a1.join();
-    a2.join();
-
-    Thread.sleep(4000);
-
-    cacheClose(vm0, false);
-    cacheClose(vm1, false);
-
-    // Start the VMs in parallel for the persistent version subclass
-    AsyncInvocation async1 = createServerRegionAsync(vm0, 1, 1,  500, homeDir, uniqueName, 5, true, false, 2000, 2);
-    AsyncInvocation async2 = createServerRegionAsync(vm1, 1, 1,  500, homeDir, uniqueName, 5, true, false, 2000, 2);
-    async1.getResult();
-    async2.getResult();
-
-    // There should be two files in bucket 0.
-    verifyTwoHDFSFilesWithTwoEntries(vm0, uniqueName, value);
-
-    cacheClose(vm0, false);
-    cacheClose(vm1, false);
-
-    disconnectFromDS();
-
-  }
-
-  /**
-   * Create server with file rollover time as 5 seconds. Insert few entries and
-   * then sleep for 7 seconds. A file should be created. Do it again. At the end, two
-   * files with inserted entries should be created.
-   * 
-   * @throws Throwable
-   */
-  public void testWOTimeForRollOverParam() throws Throwable {
-    disconnectFromDS();
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-
-    String homeDir = "./testWOTimeForRollOverParam";
-    final String uniqueName = getName();
-
-    createServerRegion(vm0, 1, 1, 500, homeDir, uniqueName, 5, true, false, 5, 1);
-    createServerRegion(vm1, 1, 1, 500, homeDir, uniqueName, 5, true, false, 5, 1);
-
-    AsyncInvocation a1 = doAsyncPuts(vm0, uniqueName, 1, 8, "vm0");
-    AsyncInvocation a2 = doAsyncPuts(vm1, uniqueName, 4, 10, "vm1");
-
-    a1.join();
-    a2.join();
-
-    Thread.sleep(7000);
-
-    a1 = doAsyncPuts(vm0, uniqueName, 10, 18, "vm0");
-    a2 = doAsyncPuts(vm1, uniqueName, 14, 20, "vm1");
-
-    a1.join();
-    a2.join();
-
-    Thread.sleep(7000);
-
-    cacheClose(vm0, false);
-    cacheClose(vm1, false);
-
-    AsyncInvocation async1 = createServerRegionAsync(vm0, 1, 1, 500, homeDir, uniqueName, 5, true, false, 5, 1);
-    AsyncInvocation async2 = createServerRegionAsync(vm1, 1, 1, 500, homeDir, uniqueName, 5, true, false, 5, 1);
-    async1.getResult();
-    async2.getResult();
-
-    // There should be two files in bucket 0.
-    // Each should have entry 1 to 10 and duplicate from 4 to 7
-    verifyTwoHDFSFiles(vm0, uniqueName);
-
-    cacheClose(vm0, false);
-    cacheClose(vm1, false);
-
-    disconnectFromDS();
-
-  }
-
-  private void createServerAccessor(VM vm, final int totalnumOfBuckets,
-      final int maximumEntries, final String uniqueName) {
-    SerializableCallable createRegion = new SerializableCallable() {
-      public Object call() throws Exception {
-        AttributesFactory af = new AttributesFactory();
-        af.setDataPolicy(DataPolicy.HDFS_PARTITION);
-        PartitionAttributesFactory paf = new PartitionAttributesFactory();
-        paf.setTotalNumBuckets(totalnumOfBuckets);
-        paf.setRedundantCopies(1);
-        // make this member an accessor.
-        paf.setLocalMaxMemory(0);
-        af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY));
-        af.setPartitionAttributes(paf.create());
-
-        Region r = createRootRegion(uniqueName, af.create());
-        assertTrue(!((PartitionedRegion) r).isDataStore());
-
-        return null;
-      }
-    };
-
-    vm.invoke(createRegion);
-  }
-
-  @Override
-  protected void verifyHDFSData(VM vm, String uniqueName) throws Exception {
-
-    HashMap<String, HashMap<String, String>> filesToEntriesMap = createFilesAndEntriesMap(vm, uniqueName, uniqueName);
-    HashMap<String, String> entriesMap = new HashMap<String, String>();
-    for (HashMap<String, String> v : filesToEntriesMap.values()) {
-      entriesMap.putAll(v);
-    }
-    verifyInEntriesMap(entriesMap, 1, 50, "vm0");
-    verifyInEntriesMap(entriesMap, 40, 100, "vm1");
-    verifyInEntriesMap(entriesMap, 40, 100, "vm2");
-    verifyInEntriesMap(entriesMap, 90, 150, "vm3");
-
-  }
-
-  protected void verifyTwoHDFSFiles(VM vm, String uniqueName) throws Exception {
-
-    HashMap<String, HashMap<String, String>> filesToEntriesMap = createFilesAndEntriesMap(vm, uniqueName, uniqueName);
-
-    assertTrue("there should be exactly two files, but there are "
-        + filesToEntriesMap.size(), filesToEntriesMap.size() == 2);
-    long timestamp = Long.MAX_VALUE;
-    String olderFile = null;
-    for (Map.Entry<String, HashMap<String, String>> e : filesToEntriesMap
-        .entrySet()) {
-      String fileName = e.getKey().substring(
-          0,
-          e.getKey().length()
-              - AbstractHoplogOrganizer.SEQ_HOPLOG_EXTENSION.length());
-      long newTimeStamp = Long.parseLong(fileName.substring(
-          fileName.indexOf("-") + 1, fileName.lastIndexOf("-")));
-      if (newTimeStamp < timestamp) {
-        olderFile = e.getKey();
-        timestamp = newTimeStamp;
-      }
-    }
-    verifyInEntriesMap(filesToEntriesMap.get(olderFile), 1, 8, "vm0");
-    verifyInEntriesMap(filesToEntriesMap.get(olderFile), 4, 10, "vm1");
-    filesToEntriesMap.remove(olderFile);
-    verifyInEntriesMap(filesToEntriesMap.values().iterator().next(), 10, 18, "vm0");
-    verifyInEntriesMap(filesToEntriesMap.values().iterator().next(), 14, 20, "vm1");
-  }
-
-  protected void verifyTwoHDFSFilesWithTwoEntries(VM vm, String uniqueName,
-      String value) throws Exception {
-
-    HashMap<String, HashMap<String, String>> filesToEntriesMap = createFilesAndEntriesMap(vm, uniqueName, uniqueName);
-    
-    assertTrue( "there should be exactly two files, but there are " + filesToEntriesMap.size(), filesToEntriesMap.size() == 2);
-    HashMap<String, String> entriesMap =  new HashMap<String, String>();
-    for (HashMap<String, String>  v : filesToEntriesMap.values()) {
-      entriesMap.putAll(v);
-    }
-    assertTrue( "Expected key K1 received  " + entriesMap.get(value+ "1vm0"), entriesMap.get(value+ "1vm0").equals("K1"));
-    assertTrue( "Expected key K2 received  " + entriesMap.get(value+ "2vm0"), entriesMap.get(value+ "2vm0").equals("K2"));
-    assertTrue( "Expected key K2 received  " + entriesMap.get(value+ "2vm1"), entriesMap.get(value+ "2vm1").equals("K2"));
-    assertTrue( "Expected key K3 received  " + entriesMap.get(value+ "3vm1"), entriesMap.get(value+ "3vm1").equals("K3"));
- }
-
-  /**
-   * verify that a PR accessor can be started
-   */
-  public void testPRAccessor() {
-    Host host = Host.getHost(0);
-    VM accessor = host.getVM(0);
-    VM datastore1 = host.getVM(1);
-    VM datastore2 = host.getVM(2);
-    VM accessor2 = host.getVM(3);
-    final String regionName = getName();
-    final String storeName = "store_" + regionName;
-
-    SerializableCallable createRegion = new SerializableCallable() {
-      @Override
-      public Object call() throws Exception {
-        HDFSStoreFactory storefactory = getCache().createHDFSStoreFactory();
-        homeDir = new File("../" + regionName).getCanonicalPath();
-        storefactory.setHomeDir(homeDir);
-        storefactory.create(storeName);
-        AttributesFactory<Integer, String> af = new AttributesFactory<Integer, String>();
-        af.setDataPolicy(DataPolicy.HDFS_PARTITION);
-        af.setHDFSStoreName(storeName);
-        Region r = getCache().createRegionFactory(af.create()).create(regionName);
-        r.put("key1", "value1");
-        return null;
-      }
-    };
-
-    SerializableCallable createAccessorRegion = new SerializableCallable() {
-      @Override
-      public Object call() throws Exception {
-        HDFSStoreFactory storefactory = getCache().createHDFSStoreFactory();
-        homeDir = new File("../" + regionName).getCanonicalPath();
-        storefactory.setHomeDir(homeDir);
-        storefactory.create(storeName);
-        // DataPolicy PARTITION with localMaxMemory 0 cannot be created
-        AttributesFactory<Integer, String> af = new AttributesFactory<Integer, String>();
-        af.setDataPolicy(DataPolicy.PARTITION);
-        PartitionAttributesFactory<Integer, String> paf = new PartitionAttributesFactory<Integer, String>();
-        paf.setLocalMaxMemory(0);
-        af.setPartitionAttributes(paf.create());
-        // DataPolicy PARTITION with localMaxMemory 0 can be created if hdfsStoreName is set
-        af.setHDFSStoreName(storeName);
-        // No need to check with different storeNames (can never be done in GemFireXD)
-        Region r = getCache().createRegionFactory(af.create()).create(regionName);
-        r.localDestroyRegion();
-        // DataPolicy HDFS_PARTITION with localMaxMemory 0 can be created
-        af = new AttributesFactory<Integer, String>();
-        af.setDataPolicy(DataPolicy.HDFS_PARTITION);
-        af.setPartitionAttributes(paf.create());
-        getCache().createRegionFactory(af.create()).create(regionName);
-        return null;
-      }
-    };
-
-    datastore1.invoke(createRegion);
-    accessor.invoke(createAccessorRegion);
-    datastore2.invoke(createRegion);
-    accessor2.invoke(createAccessorRegion);
-  }
-
-  /**
-   * verify that PUT dml does not read from hdfs
-   */
-  public void testPUTDMLSupport() {
-    doPUTDMLWork(false);
-  }
-
-  public void testPUTDMLBulkSupport() {
-    doPUTDMLWork(true);
-  }
-
-  private void doPUTDMLWork(final boolean isPutAll) {
-    Host host = Host.getHost(0);
-    VM vm1 = host.getVM(0);
-    VM vm2 = host.getVM(1);
-    final String regionName = getName();
-
-    createServerRegion(vm1, 7, 1, 50, "./" + regionName, regionName, 1000);
-    createServerRegion(vm2, 7, 1, 50, "./" + regionName, regionName, 1000);
-
-    vm1.invoke(new SerializableCallable() {
-      @Override
-      public Object call() throws Exception {
-        Region r = getCache().getRegion(regionName);
-        LocalRegion lr = (LocalRegion) r;
-        SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
-        long readsFromHDFS = stats.getRead().getCount();
-        assertEquals(0, readsFromHDFS);
-        if (isPutAll) {
-          Map m = new HashMap();
-          // map with only one entry
-          m.put("key0", "value0");
-          DistributedPutAllOperation ev = lr.newPutAllOperation(m, null);
-          lr.basicPutAll(m, ev, null);
-          m.clear();
-          // map with multiple entries
-          for (int i = 1; i < 100; i++) {
-            m.put("key" + i, "value" + i);
-          }
-          ev = lr.newPutAllOperation(m, null);
-          lr.basicPutAll(m, ev, null);
-        } else {
-          for (int i = 0; i < 100; i++) {
-            r.put("key" + i, "value" + i);
-          }
-        }
-        return null;
-      }
-    });
-
-    SerializableCallable getHDFSReadCount = new SerializableCallable() {
-      @Override
-      public Object call() throws Exception {
-        SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
-        return stats.getRead().getCount();
-      }
-    };
-
-    long vm1Count = (Long) vm1.invoke(getHDFSReadCount);
-    long vm2Count = (Long) vm2.invoke(getHDFSReadCount);
-    assertEquals(100, vm1Count + vm2Count);
-
-    pause(10 * 1000);
-
-    vm1.invoke(new SerializableCallable() {
-      @Override
-      public Object call() throws Exception {
-        // do puts using the new api
-        LocalRegion lr = (LocalRegion) getCache().getRegion(regionName);
-        if (isPutAll) {
-          Map m = new HashMap();
-          // map with only one entry
-          m.put("key0", "value0");
-          DistributedPutAllOperation ev = lr.newPutAllForPUTDmlOperation(m, null);
-          lr.basicPutAll(m, ev, null);
-          m.clear();
-          // map with multiple entries
-          for (int i = 1; i < 200; i++) {
-            m.put("key" + i, "value" + i);
-          }
-          ev = lr.newPutAllForPUTDmlOperation(m, null);
-          lr.basicPutAll(m, ev, null);
-        } else {
-          for (int i = 0; i < 200; i++) {
-            EntryEventImpl ev = lr.newPutEntryEvent("key" + i, "value" + i, null);
-            lr.validatedPut(ev, System.currentTimeMillis());
-          }
-        }
-        return null;
-      }
-    });
-
-    // verify the stat for hdfs reads has not incremented
-    vm1Count = (Long) vm1.invoke(getHDFSReadCount);
-    vm2Count = (Long) vm2.invoke(getHDFSReadCount);
-    assertEquals(100, vm1Count + vm2Count);
-
-    vm1.invoke(new SerializableCallable() {
-      @Override
-      public Object call() throws Exception {
-        Region r = getCache().getRegion(regionName);
-        for (int i = 0; i < 200; i++) {
-          assertEquals("value" + i, r.get("key" + i));
-        }
-        return null;
-      }
-    });
-  }
-
-  /**
-   * verify that get on operational data does not read from HDFS
-   */
-  public void testGetOperationalData() {
-    Host host = Host.getHost(0);
-    VM vm1 = host.getVM(0);
-    VM vm2 = host.getVM(1);
-    final String regionName = getName();
-
-    createServerRegion(vm1, 7, 1, 50, "./"+regionName, regionName, 1000);
-    createServerRegion(vm2, 7, 1, 50, "./"+regionName, regionName, 1000);
-
-    vm1.invoke(new SerializableCallable() {
-      @Override
-      public Object call() throws Exception {
-        Region r = getCache().getRegion(regionName);
-        SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
-        long readsFromHDFS = stats.getRead().getCount();
-        assertEquals(0, readsFromHDFS);
-        for (int i = 0; i < 100; i++) {
-          logger.info("SWAP:DOING PUT:key{}", i);
-          r.put("key" + i, "value" + i);
-        }
-        return null;
-      }
-    });
-
-    SerializableCallable getHDFSReadCount = new SerializableCallable() {
-      @Override
-      public Object call() throws Exception {
-        SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
-        return stats.getRead().getCount();
-      }
-    };
-
-    long vm1Count = (Long) vm1.invoke(getHDFSReadCount);
-    long vm2Count = (Long) vm2.invoke(getHDFSReadCount);
-    assertEquals(100, vm1Count + vm2Count);
-
-    pause(10 * 1000);
-
-    // verify that get increments the read stat
-    vm1.invoke(new SerializableCallable() {
-      @Override
-      public Object call() throws Exception {
-        Region r = getCache().getRegion(regionName);
-        for (int i = 0; i < 200; i++) {
-          if (i < 100) {
-            logger.info("SWAP:DOING GET:key", i);
-            assertEquals("value" + i, r.get("key" + i));
-          } else {
-            assertNull(r.get("key" + i));
-          }
-        }
-        return null;
-      }
-    });
-
-    vm1Count = (Long) vm1.invoke(getHDFSReadCount);
-    vm2Count = (Long) vm2.invoke(getHDFSReadCount);
-    // initial 100 + 150 for get (since 50 are in memory)
-    assertEquals(250, vm1Count + vm2Count);
-
-    // do gets with readFromHDFS set to false
-    vm1.invoke(new SerializableCallable() {
-      @Override
-      public Object call() throws Exception {
-        Region r = getCache().getRegion(regionName);
-        LocalRegion lr = (LocalRegion) r;
-        int numEntries = 0;
-        for (int i = 0; i < 200; i++) {
-          logger.info("SWAP:DOING GET NO READ:key", i);
-          Object val = lr.get("key"+i, null, true, false, false, null,  null, false, false/*allowReadFromHDFS*/);
-          if (val != null) {
-            numEntries++;
-          }
-        }
-        assertEquals(50, numEntries); // entries in memory
-        return null;
-      }
-    });
-
-    vm1Count = (Long) vm1.invoke(getHDFSReadCount);
-    vm2Count = (Long) vm2.invoke(getHDFSReadCount);
-    // get should not have incremented
-    assertEquals(250, vm1Count + vm2Count);
-
-    /**MergeGemXDHDFSToGFE Have not merged this API as this api is not called by any code*/
-    /*
-    // do gets using DataView
-    SerializableCallable getUsingDataView = new SerializableCallable() {
-      @Override
-      public Object call() throws Exception {
-        Region r = getCache().getRegion(regionName);
-        LocalRegion lr = (LocalRegion) r;
-        PartitionedRegion pr = (PartitionedRegion) lr;
-        long numEntries = 0;
-        for (int i=0; i<200; i++) {
-          InternalDataView idv = lr.getDataView();
-          logger.debug("SWAP:DATAVIEW");
-          Object val = idv.getLocally("key"+i, null, PartitionedRegionHelper.getHashKey(pr, "key"+i), lr, true, true, null, null, false, false);
-          if (val != null) {
-            numEntries++;
-          }
-        }
-        return numEntries;
-      }
-    };
-
-    vm1Count = (Long) vm1.invoke(getUsingDataView);
-    vm2Count = (Long) vm2.invoke(getUsingDataView);
-    assertEquals(50 * 2, vm1Count + vm2Count);// both VMs will find 50 entries*/
-
-    vm1Count = (Long) vm1.invoke(getHDFSReadCount);
-    vm2Count = (Long) vm2.invoke(getHDFSReadCount);
-    // get should not have incremented
-    assertEquals(250, vm1Count + vm2Count);
-
-  }
-
-  public void testSizeEstimate() {
-    Host host = Host.getHost(0);
-    VM vm1 = host.getVM(0);
-    VM vm2 = host.getVM(1);
-    VM vm3 = host.getVM(2);
-    final String regionName = getName();
-
-    createServerRegion(vm1, 7, 1, 50, "./"+regionName, regionName, 1000);
-    createServerRegion(vm2, 7, 1, 50, "./"+regionName, regionName, 1000);
-    createServerRegion(vm3, 7, 1, 50, "./"+regionName, regionName, 1000);
-
-    final int size = 226;
-
-    vm1.invoke(new SerializableCallable() {
-      @Override
-      public Object call() throws Exception {
-        Region r = getCache().getRegion(regionName);
-        // LocalRegion lr = (LocalRegion) r;
-        for (int i = 0; i < size; i++) {
-          r.put("key" + i, "value" + i);
-        }
-        // before flush
-        // assertEquals(size, lr.sizeEstimate());
-        return null;
-      }
-    });
-
-    pause(10 * 1000);
-
-    vm2.invoke(new SerializableCallable() {
-      @Override
-      public Object call() throws Exception {
-        Region r = getCache().getRegion(regionName);
-        LocalRegion lr = (LocalRegion) r;
-        logger.debug("SWAP:callingsizeEstimate");
-        long estimate = lr.sizeEstimate();
-        double err = Math.abs(estimate - size) / (double) size;
-        System.out.println("SWAP:estimate:" + estimate);
-        assertTrue(err < 0.2);
-        return null;
-      }
-    });
-  }
-
-  public void testForceAsyncMajorCompaction() throws Exception {
-    doForceCompactionTest(true, false);
-  }
-
-  public void testForceSyncMajorCompaction() throws Exception {
-    // more changes
-    doForceCompactionTest(true, true);
-  }
-
-  private void doForceCompactionTest(final boolean isMajor, final boolean isSynchronous) throws Exception {
-    Host host = Host.getHost(0);
-    VM vm1 = host.getVM(0);
-    VM vm2 = host.getVM(1);
-    VM vm3 = host.getVM(2);
-    final String regionName = getName();
-
-    createServerRegion(vm1, 7, 1, 50, "./" + regionName, regionName, 1000);
-    createServerRegion(vm2, 7, 1, 50, "./" + regionName, regionName, 1000);
-    createServerRegion(vm3, 7, 1, 50, "./" + regionName, regionName, 1000);
-
-    SerializableCallable noCompaction = new SerializableCallable() {
-      @Override
-      public Object call() throws Exception {
-        SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
-        if (isMajor) {
-          assertEquals(0, stats.getMajorCompaction().getCount());
-        } else {
-          assertEquals(0, stats.getMinorCompaction().getCount());
-        }
-        return null;
-      }
-    };
-
-    vm1.invoke(noCompaction);
-    vm2.invoke(noCompaction);
-    vm3.invoke(noCompaction);
-
-    vm1.invoke(new SerializableCallable() {
-      @Override
-      public Object call() throws Exception {
-        Region r = getCache().getRegion(regionName);
-        for (int i = 0; i < 500; i++) {
-          r.put("key" + i, "value" + i);
-          if (i % 100 == 0) {
-            // wait for flush
-            pause(3000);
-          }
-        }
-        pause(3000);
-        PartitionedRegion pr = (PartitionedRegion) r;
-        long lastCompactionTS = pr.lastMajorHDFSCompaction();
-        assertEquals(0, lastCompactionTS);
-        long beforeCompact = System.currentTimeMillis();
-        pr.forceHDFSCompaction(true, isSynchronous ? 0 : 1);
-        if (isSynchronous) {
-          final SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
-          assertTrue(stats.getMajorCompaction().getCount() > 0);
-          assertTrue(pr.lastMajorHDFSCompaction() >= beforeCompact);
-        }
-        return null;
-      }
-    });
-
-    if (!isSynchronous) {
-      SerializableCallable verifyCompactionStat = new SerializableCallable() {
-        @Override
-        public Object call() throws Exception {
-          final SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
-          waitForCriterion(new WaitCriterion() {
-            @Override
-            public boolean done() {
-              return stats.getMajorCompaction().getCount() > 0;
-            }
-
-            @Override
-            public String description() {
-              return "Major compaction stat not > 0";
-            }
-          }, 30 * 1000, 1000, true);
-          return null;
-        }
-      };
-
-      vm1.invoke(verifyCompactionStat);
-      vm2.invoke(verifyCompactionStat);
-      vm3.invoke(verifyCompactionStat);
-    } else {
-      SerializableCallable verifyCompactionStat = new SerializableCallable() {
-        @Override
-        public Object call() throws Exception {
-          final SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName);
-          assertTrue(stats.getMajorCompaction().getCount() > 0);
-          return null;
-        }
-      };
-      vm2.invoke(verifyCompactionStat);
-      vm3.invoke(verifyCompactionStat);
-    }
-  }
-
-  public void testFlushQueue() throws Exception {
-    doFlushQueue(false);
-  }
-
-  public void testFlushQueueWO() throws Exception {
-    doFlushQueue(true);
-  }
-
-  private void doFlushQueue(boolean wo) throws Exception {
-    Host host = Host.getHost(0);
-    VM vm1 = host.getVM(0);
-    VM vm2 = host.getVM(1);
-    VM vm3 = host.getVM(2);
-    final String regionName = getName();
-
-    createServerRegion(vm1, 7, 1, 50, "./"+regionName, regionName, 300000, wo, false);
-    createServerRegion(vm2, 7, 1, 50, "./"+regionName, regionName, 300000, wo, false);
-    createServerRegion(vm3, 7, 1, 50, "./"+regionName, regionName, 300000, wo, false);
-
-    vm1.invoke(new SerializableCallable() {
-      @Override
-      public Object call() throws Exception {
-        PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(regionName);
-        for (int i = 0; i < 500; i++) {
-          pr.put("key" + i, "value" + i);
-        }
-
-        pr.flushHDFSQueue(0);
-        return null;
-      }
-    });
-
-    SerializableCallable verify = new SerializableCallable() {
-      @Override
-      public Object call() throws Exception {
-        PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(regionName);
-        assertEquals(0, pr.getHDFSEventQueueStats().getEventQueueSize());
-        return null;
-      }
-    };
-
-    vm1.invoke(verify);
-    vm2.invoke(verify);
-    vm3.invoke(verify);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSOffHeapBasicDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSOffHeapBasicDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSOffHeapBasicDUnitTest.java
deleted file mode 100644
index ee517d2..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSOffHeapBasicDUnitTest.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.File;
-import java.util.Properties;
-
-import com.gemstone.gemfire.cache.AttributesFactory;
-import com.gemstone.gemfire.cache.DataPolicy;
-import com.gemstone.gemfire.cache.EvictionAction;
-import com.gemstone.gemfire.cache.EvictionAttributes;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.internal.cache.OffHeapTestUtil;
-
-import dunit.SerializableCallable;
-import dunit.SerializableRunnable;
-
-@SuppressWarnings({ "serial", "rawtypes", "deprecation" })
-public class RegionWithHDFSOffHeapBasicDUnitTest extends
-    RegionWithHDFSBasicDUnitTest {
-  static {
-    System.setProperty("gemfire.trackOffHeapRefCounts", "true");
-  }
-  
-  public RegionWithHDFSOffHeapBasicDUnitTest(String name) {
-    super(name);
-  }
-  
-  @Override
-  public void tearDown2() throws Exception {
-    SerializableRunnable checkOrphans = new SerializableRunnable() {
-
-      @Override
-      public void run() {
-        if(hasCache()) {
-          OffHeapTestUtil.checkOrphans();
-        }
-      }
-    };
-    try {
-      checkOrphans.run();
-      invokeInEveryVM(checkOrphans);
-    } finally {
-      // proceed with tearDown2 anyway.
-      super.tearDown2();
-    }
-  }
-
-   public void testDelta() {
-     //do nothing, deltas aren't supported with off heap.
-   }
-  
-  @Override
-  protected SerializableCallable getCreateRegionCallable(final int totalnumOfBuckets,
-      final int batchSizeMB, final int maximumEntries, final String folderPath,
-      final String uniqueName, final int batchInterval, final boolean queuePersistent,
-      final boolean writeonly, final long timeForRollover, final long maxFileSize) {
-    SerializableCallable createRegion = new SerializableCallable() {
-      public Object call() throws Exception {
-        AttributesFactory af = new AttributesFactory();
-        af.setDataPolicy(DataPolicy.HDFS_PARTITION);
-        PartitionAttributesFactory paf = new PartitionAttributesFactory();
-        paf.setTotalNumBuckets(totalnumOfBuckets);
-        paf.setRedundantCopies(1);
-        
-        af.setHDFSStoreName(uniqueName);
-        af.setPartitionAttributes(paf.create());
-        HDFSStoreFactory hsf = getCache().createHDFSStoreFactory();
-        // Going two level up to avoid home directories getting created in
-        // VM-specific directory. This avoids failures in those tests where
-        // datastores are restarted and bucket ownership changes between VMs.
-        homeDir = new File(tmpDir + "/../../" + folderPath).getCanonicalPath();
-        hsf.setHomeDir(homeDir);
-        hsf.setBatchSize(batchSizeMB);
-        hsf.setBufferPersistent(queuePersistent);
-        hsf.setMaxMemory(3);
-        hsf.setBatchInterval(batchInterval);
-        if (timeForRollover != -1) {
-          hsf.setWriteOnlyFileRolloverInterval((int)timeForRollover);
-          System.setProperty("gemfire.HDFSRegionDirector.FILE_ROLLOVER_TASK_INTERVAL_SECONDS", "1");
-        }
-        if (maxFileSize != -1) {
-          hsf.setWriteOnlyFileRolloverSize((int) maxFileSize);
-        }
-        hsf.create(uniqueName);
-        
-        af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY));
-        
-        af.setHDFSWriteOnly(writeonly);
-        af.setOffHeap(true);;
-        Region r = createRootRegion(uniqueName, af.create());
-        ((LocalRegion)r).setIsTest();
-        
-        return 0;
-      }
-    };
-    return createRegion;
-  }
-
-  @Override
-  public Properties getDistributedSystemProperties() {
-    Properties props = super.getDistributedSystemProperties();
-    props.setProperty("off-heap-memory-size", "50m");
-    return props;
-  }
-}


[41/50] [abbrv] incubator-geode git commit: [fixes GEODE-456] Fixes a race condition in the test by waiting for the condition instead.

Posted by ds...@apache.org.
[fixes GEODE-456] Fixes a race condition in the test by waiting for the condition instead.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/20c39d7f
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/20c39d7f
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/20c39d7f

Branch: refs/heads/develop
Commit: 20c39d7f3ee100a80f0243c8dbd4b598653ea951
Parents: 791a418
Author: eshu <es...@pivotal.io>
Authored: Fri Oct 23 10:34:29 2015 -0700
Committer: eshu <es...@pivotal.io>
Committed: Fri Oct 23 10:37:24 2015 -0700

----------------------------------------------------------------------
 .../cache/PartitionedRegionSingleHopDUnitTest.java  | 16 ++++++++++++++--
 1 file changed, 14 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/20c39d7f/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java
index 0e2ec72..95ead5d 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java
@@ -1110,7 +1110,19 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     ClientMetadataService cms = ((GemFireCacheImpl)cache).getClientMetadataService();
     cms.getClientPRMetadata((LocalRegion)region);
     
-    Map<String, ClientPartitionAdvisor> regionMetaData = cms.getClientPRMetadata_TEST_ONLY();    
+    final Map<String, ClientPartitionAdvisor> regionMetaData = cms.getClientPRMetadata_TEST_ONLY();    
+    
+    WaitCriterion wc = new WaitCriterion() {
+      public boolean done() {
+        return (regionMetaData.size() == 1);
+      }
+
+      public String description() {
+        return "expected metadata is ready";
+      }
+    };
+    DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+    
     assertEquals(1, regionMetaData.size());
     assertTrue(regionMetaData.containsKey(region.getFullPath()));
     
@@ -1134,7 +1146,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     
     ClientPartitionAdvisor prMetaData = regionMetaData.get(region.getFullPath());
     final Map<Integer, List<BucketServerLocation66>> clientMap  = prMetaData.getBucketServerLocationsMap_TEST_ONLY();
-    WaitCriterion wc = new WaitCriterion() {
+    wc = new WaitCriterion() {
       public boolean done() {
         return (clientMap.size() == 4);
       }


[28/50] [abbrv] incubator-geode git commit: GEODE-453, GEODE-458: Ignoring suspect string in testRegisterInterestNoDataStores

Posted by ds...@apache.org.
GEODE-453, GEODE-458: Ignoring suspect string in testRegisterInterestNoDataStores


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/5c7bbd01
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/5c7bbd01
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/5c7bbd01

Branch: refs/heads/develop
Commit: 5c7bbd019cd2dd4bb4929dc355144c584b7514dd
Parents: 937134b
Author: Dan Smith <up...@apache.org>
Authored: Wed Oct 21 14:16:54 2015 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Wed Oct 21 14:20:00 2015 -0700

----------------------------------------------------------------------
 .../cache/partitioned/PersistentPartitionedRegionDUnitTest.java | 5 +++++
 1 file changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5c7bbd01/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionDUnitTest.java
index 8b3ae52..6274dcf 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionDUnitTest.java
@@ -1325,6 +1325,11 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
   
   
   public void testRegisterInterestNoDataStores() {
+    //Closing the client may log a warning on the server
+    addExpectedException("Connection reset");
+    addExpectedException("SocketTimeoutException");
+    addExpectedException("ServerConnectivityException");
+    addExpectedException("Socket Closed");
     final Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);


[20/50] [abbrv] incubator-geode git commit: GEODE-429: Remove HdfsStore Junit and Dunits

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/TieredCompactionJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/TieredCompactionJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/TieredCompactionJUnitTest.java
deleted file mode 100644
index 7b45952..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/TieredCompactionJUnitTest.java
+++ /dev/null
@@ -1,904 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Operation;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.QueuedPersistentEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HdfsSortedOplogOrganizer.HoplogCompactor;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer.Compactor;
-import com.gemstone.gemfire.internal.cache.ForceReattemptException;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.TrackedReference;
-import com.gemstone.gemfire.internal.util.BlobHelper;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class TieredCompactionJUnitTest extends BaseHoplogTestCase {
-  static long ONE_MB = 1024 * 1024;
-  static long TEN_MB = 10 * ONE_MB;
-  
-  @Override
-  protected void configureHdfsStoreFactory() throws Exception {
-    super.configureHdfsStoreFactory();
-    
-    hsf.setInputFileCountMin(3);
-    hsf.setMinorCompaction(false);
-    hsf.setMajorCompaction(false);
-  }
-  
-  public void testMinorCompaction() throws Exception {
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
-    // #1
-    ArrayList<QueuedPersistentEvent> items = new ArrayList<QueuedPersistentEvent>();
-    items.add(new TestEvent("1", "1"));
-    items.add(new TestEvent("2", "1"));
-    items.add(new TestEvent("3", "1"));
-    items.add(new TestEvent("4", "1"));
-    organizer.flush(items.iterator(), items.size());
-
-    // #2
-    items.clear();
-    items.add(new TestEvent("2", "1"));
-    items.add(new TestEvent("4", "1"));
-    items.add(new TestEvent("6", "1"));
-    items.add(new TestEvent("8", "1"));
-    organizer.flush(items.iterator(), items.size());
-
-    // #3
-    items.clear();
-    items.add(new TestEvent("1", "1"));
-    items.add(new TestEvent("3", "1"));
-    items.add(new TestEvent("5", "1"));
-    items.add(new TestEvent("7", "1"));
-    items.add(new TestEvent("9", "1"));
-    organizer.flush(items.iterator(), items.size());
-
-    // #4
-    items.clear();
-    items.add(new TestEvent("0", "1"));
-    items.add(new TestEvent("1", "1"));
-    items.add(new TestEvent("4", "1"));
-    items.add(new TestEvent("5", "1"));
-    organizer.flush(items.iterator(), items.size());
-
-    // check file existence in bucket directory, expect 4 hoplgos
-    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-    assertEquals(4, hoplogs.length);
-
-    // After compaction expect 1 hoplog only. It should have the same sequence number as that of the
-    // youngest file compacted, which should be 4 in this case
-    organizer.getCompactor().compact(false, false);
-    hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION);
-    assertEquals(1, hoplogs.length);
-    assertEquals(1, organizer.getSortedOplogs().size());
-    Hoplog hoplog = new HFileSortedOplog(hdfsStore, hoplogs[0].getPath(), blockCache, stats, storeStats);
-    assertEquals(4, HdfsSortedOplogOrganizer.getSequenceNumber(hoplog));
-
-    // iterate on oplogs to validate data in files
-    HoplogSetIterator iter = new HoplogSetIterator(organizer.getSortedOplogs());
-    // the iteration pattern for this test should be 0-9:
-    // 0 1 4 5 oplog #4
-    // 1 3 5 7 9 oplog #3
-    // 2 4 6 8 oplog #2
-    // 1 2 3 4 oplog #1
-    int count = 0;
-    for (ByteBuffer keyBB = null; iter.hasNext();) {
-      keyBB = iter.next();
-      byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
-      assertEquals(String.valueOf(count), BlobHelper.deserializeBlob(key));
-      count++;
-    }
-    assertEquals(10, count);
-
-    // there must be 4 expired hoplogs now
-    hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
-    assertEquals(4, hoplogs.length);
-    organizer.close();
-  }
-  
-  public void testIterativeMinorCompaction() throws Exception {
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
-    // #1
-    ArrayList<QueuedPersistentEvent> items = new ArrayList<QueuedPersistentEvent>();
-    items.add(new TestEvent("1", "1"));
-    items.add(new TestEvent("2", "1"));
-    organizer.flush(items.iterator(), items.size());
-
-    items.clear();
-    items.add(new TestEvent("1", "2"));
-    items.add(new TestEvent("3", "2"));
-    organizer.flush(items.iterator(), items.size());
-
-    items.clear();
-    items.add(new TestEvent("4", "3"));
-    items.add(new TestEvent("5", "3"));
-    organizer.flush(items.iterator(), items.size());
-    
-    // check file existence in bucket directory
-    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-    assertEquals(3, hoplogs.length);
-
-    organizer.getCompactor().compact(false, false);
-    
-    FileStatus[] expired = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
-    assertEquals(3, expired.length);
-    FileStatus[] valids = HdfsSortedOplogOrganizer.filterValidHoplogs(hoplogs, expired);
-    assertEquals(0, valids.length);
-    // After compaction expect 1 hoplog only.
-    hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION);
-    assertEquals(1, hoplogs.length);
-    
-    items.clear();
-    items.add(new TestEvent("4", "4"));
-    items.add(new TestEvent("6", "4"));
-    organizer.flush(items.iterator(), items.size());
-
-    items.clear();
-    items.add(new TestEvent("7", "5"));
-    items.add(new TestEvent("8", "5"));
-    organizer.flush(items.iterator(), items.size());
-    
-    hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-    assertEquals(5, hoplogs.length);
-    
-    organizer.getCompactor().compact(false, false);
-    expired = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
-    assertEquals(6, expired.length);
-    valids = HdfsSortedOplogOrganizer.filterValidHoplogs(hoplogs, expired);
-    assertEquals(0, valids.length);    
-    hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION);
-    assertEquals(2, hoplogs.length);
-    valids = HdfsSortedOplogOrganizer.filterValidHoplogs(hoplogs, expired);
-    assertEquals(1, valids.length);
-    
-    assertEquals("2", organizer.read(BlobHelper.serializeToBlob("1")).getValue());
-    assertEquals("1", organizer.read(BlobHelper.serializeToBlob("2")).getValue());
-    assertEquals("2", organizer.read(BlobHelper.serializeToBlob("3")).getValue());
-    assertEquals("4", organizer.read(BlobHelper.serializeToBlob("4")).getValue());
-    assertEquals("3", organizer.read(BlobHelper.serializeToBlob("5")).getValue());
-    assertEquals("4", organizer.read(BlobHelper.serializeToBlob("6")).getValue());
-    assertEquals("5", organizer.read(BlobHelper.serializeToBlob("7")).getValue());
-    organizer.close();
-  }
-
-  public void testMajorCompactionWithDelete() throws Exception {
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
-    // #1
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    items.add(new TestEvent("1", "1"));
-    items.add(new TestEvent("2", "1"));
-    items.add(new TestEvent("3", "1"));
-    items.add(new TestEvent("4", "1"));
-    items.add(new TestEvent("4", "10", Operation.DESTROY));
-    organizer.flush(items.iterator(), items.size());
-
-    // #2
-    items.clear();
-    items.add(new TestEvent("2", "1", Operation.DESTROY));
-    items.add(new TestEvent("4", "1", Operation.DESTROY));
-    items.add(new TestEvent("6", "1", Operation.INVALIDATE));
-    items.add(new TestEvent("8", "1"));
-    organizer.flush(items.iterator(), items.size());
-
-    // #3
-    items.clear();
-    items.add(new TestEvent("1", "1"));
-    items.add(new TestEvent("3", "1"));
-    items.add(new TestEvent("5", "1"));
-    items.add(new TestEvent("7", "1"));
-    items.add(new TestEvent("9", "1", Operation.DESTROY));
-    organizer.flush(items.iterator(), items.size());
-
-    // #4
-    items.clear();
-    items.add(new TestEvent("0", "1", Operation.DESTROY));
-    items.add(new TestEvent("1", "1"));
-    items.add(new TestEvent("4", "1"));
-    items.add(new TestEvent("5", "1"));
-    organizer.flush(items.iterator(), items.size());
-
-    // check file existence in bucket directory, expect 4 hoplgos
-    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-    assertEquals(4, hoplogs.length);
-
-    // After compaction expect 1 hoplog only. It should have the same sequence number as that of the
-    // youngest file compacted, which should be 4 in this case
-    organizer.getCompactor().compact(true, false);
-    hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION);
-    assertEquals(1, hoplogs.length);
-    assertEquals(1, organizer.getSortedOplogs().size());
-    Hoplog hoplog = new HFileSortedOplog(hdfsStore, hoplogs[0].getPath(), blockCache, stats, storeStats);
-    assertEquals(4, HdfsSortedOplogOrganizer.getSequenceNumber(hoplog));
-
-    // iterate on oplogs to validate data in files
-    HoplogSetIterator iter = new HoplogSetIterator(organizer.getSortedOplogs());
-    int count = 0;
-
-    // entries in () are destroyed or invalidated
-    // 1, 2, 3, 4, (11)
-    // (2), (4), (6), 8
-    // 1, 3, 5, 7, (9)
-    // (0), 1, 4, 5
-    String[] expectedValues = { "1", "3", "4", "5", "7", "8" };
-    for (ByteBuffer keyBB = null; iter.hasNext();) {
-      keyBB = iter.next();
-      byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
-      assertEquals(expectedValues[count], BlobHelper.deserializeBlob(key));
-      count++;
-    }
-    assertEquals(6, count);
-
-    // there must be 4 expired hoplogs now
-    hoplogs = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
-    assertEquals(4, hoplogs.length);
-    organizer.close();
-  }
-  
-  public void testGainComputation() throws Exception {
-    HoplogOrganizer<? extends PersistedEventImpl> organizer = regionManager.create(0);
-    HdfsSortedOplogOrganizer bucket = (HdfsSortedOplogOrganizer) organizer;
-    ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
-    for (int i = 0; i < 10; i++) {
-      targets.add(new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, i * TEN_MB)));
-    }    
-
-    // each read has cost 3. Four files read cost is 3 * 4. Reduce read cost of
-    // file after compaction
-    float expect = (float) ((3 * 4.0 - 3) / (20 + 30 + 40 + 50));
-    float result = bucket.computeGain(2, 5, targets);
-    assertTrue(Math.abs(expect - result) < (expect/1000));
-    
-    // each read has cost 3 except 10MB file with read cost 2. 9 files read cost
-    // is 3 * 9. Reduce read cost of file after compaction.
-    expect = (float) ((3 * 9 - 3 - 1.0) / (10 + 20 + 30 + 40 + 50 + 60 + 70 + 80 + 90));
-    result = bucket.computeGain(0, 9, targets);
-    assertTrue(Math.abs(expect - result) < (expect/1000));
-  }
-
-  public void testGainComputeSmallFile() throws Exception {
-    HoplogOrganizer<? extends PersistedEventImpl> organizer = regionManager.create(0);
-    HdfsSortedOplogOrganizer bucket = (HdfsSortedOplogOrganizer) organizer;
-    
-    ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
-    for (int i = 0; i < 10; i++) {
-      targets.add(new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, i * TEN_MB / 1024)));
-    }
-
-    float result = bucket.computeGain(2, 5, targets);
-    assertTrue(Math.abs(8.0 - result) < (1.0/1000));
-  }
-  
-  public void testGainComputeMixedFiles() throws Exception {
-    HoplogOrganizer<? extends PersistedEventImpl> organizer = regionManager.create(0);
-    HdfsSortedOplogOrganizer bucket = (HdfsSortedOplogOrganizer) organizer;
-    
-    ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
-    for (int i = 0; i < 10; i++) {
-      targets.add(new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, i * TEN_MB / 1024)));
-    }
-    TestHoplog midHop = (TestHoplog) targets.get(4).get();
-    // one more than other files
-    midHop.size = 5  * TEN_MB;
-    
-    float expect = (float) ((4 * 2 - 3 + 1.0) / 50);
-    float result = bucket.computeGain(2, 5, targets);
-    System.out.println(expect);
-    System.out.println(result);
-    assertTrue(Math.abs(expect - result) < (expect/1000));
-  }
-  
-  public void testGainComputeBadRatio() throws Exception {
-    HoplogOrganizer<? extends PersistedEventImpl> organizer = regionManager.create(0);
-    HdfsSortedOplogOrganizer bucket = (HdfsSortedOplogOrganizer) organizer;
-    ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
-    for (int i = 0; i < 10; i++) {
-      targets.add(new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, i * TEN_MB)));
-    }
-
-    TestHoplog firstHop = (TestHoplog) targets.get(2).get();
-    // one more than other files
-    firstHop.size = (1 + 30 + 40 + 50)  * TEN_MB;
-    Float result = bucket.computeGain(2, 5, targets);
-    assertNull(result);
-  }
-  
-  public void testMinorCompactionTargetMaxSize() throws Exception {
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-    HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
-
-    ArrayList<TrackedReference<TestHoplog>> targets = new ArrayList<TrackedReference<TestHoplog>>();
-    for (int i = 0; i < 5; i++) {
-      TrackedReference<TestHoplog> hop = new TrackedReference<TestHoplog>(new TestHoplog(hdfsStore, TEN_MB + i));
-      hop.increment();
-      targets.add(hop);
-    }
-    TrackedReference<TestHoplog> oldestHop = targets.get(targets.size() - 1);
-    TestHoplog thirdHop = (TestHoplog) targets.get(2).get();
-
-    // oldest is more than max size is ignored 
-    oldestHop.get().size = HDFSStore.DEFAULT_INPUT_FILE_SIZE_MAX_MB * ONE_MB + 100;
-    List<TrackedReference<Hoplog>> list = (List<TrackedReference<Hoplog>>) targets.clone();
-    compactor.getMinorCompactionTargets(list, -1);
-    assertEquals(4, list.size());
-    for (TrackedReference<Hoplog> ref : list) {
-      assertTrue(((TestHoplog)ref.get()).size - TEN_MB < 5 );
-    }
-    
-    // third is more than max size but is not ignored
-    thirdHop.size = HDFSStore.DEFAULT_INPUT_FILE_SIZE_MAX_MB * ONE_MB + 100;
-    oldestHop.increment();
-    list = (List<TrackedReference<Hoplog>>) targets.clone();
-    compactor.getMinorCompactionTargets(list, -1);
-    assertEquals(4, list.size());
-    int i = 0;
-    for (TrackedReference<Hoplog> ref : list) {
-      if (i != 2) {
-        assertTrue(((TestHoplog) ref.get()).size - TEN_MB < 5);
-      } else {
-        assertTrue(((TestHoplog) ref.get()).size > HDFSStore.DEFAULT_INPUT_FILE_SIZE_MAX_MB * ONE_MB);
-      }
-      i++;
-    }
-  }
-  
-  public void testAlterMaxInputFileSize() throws Exception {
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-    HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
-
-    assertTrue(TEN_MB * 2 < hdfsStore.getInputFileSizeMax() * ONE_MB);
-    
-    ArrayList<TrackedReference<TestHoplog>> targets = new ArrayList<TrackedReference<TestHoplog>>();
-    for (int i = 0; i < 5; i++) {
-      TrackedReference<TestHoplog> hop = new TrackedReference<TestHoplog>(new TestHoplog(hdfsStore, TEN_MB + i));
-      hop.increment();
-      targets.add(hop);
-    }
-    
-    List<TrackedReference<Hoplog>> list = (List<TrackedReference<Hoplog>>) targets.clone();
-    compactor.getMinorCompactionTargets(list, -1);
-    assertEquals(targets.size(), list.size());
-    
-    HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
-    mutator.setInputFileSizeMax(1);
-    hdfsStore.alter(mutator);
-    
-    compactor.getMinorCompactionTargets(list, -1);
-    assertEquals(0, list.size());
-  }
-  
-  public void testAlterInputFileCount() throws Exception {
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-    HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
-    
-    assertTrue(2 < hdfsStore.getInputFileCountMax());
-    
-    ArrayList<TrackedReference<TestHoplog>> targets = new ArrayList<TrackedReference<TestHoplog>>();
-    for (int i = 0; i < 5; i++) {
-      TrackedReference<TestHoplog> hop = new TrackedReference<TestHoplog>(new TestHoplog(hdfsStore, TEN_MB + i));
-      hop.increment();
-      targets.add(hop);
-    }
-    
-    List<TrackedReference<Hoplog>> list = (List<TrackedReference<Hoplog>>) targets.clone();
-    compactor.getMinorCompactionTargets(list, -1);
-    assertEquals(targets.size(), list.size());
-    
-    HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
-    mutator.setInputFileCountMax(2);
-    mutator.setInputFileCountMin(2);
-    hdfsStore.alter(mutator);
-    
-    compactor.getMinorCompactionTargets(list, -1);
-    assertEquals(2, list.size());
-  }
-  
-  public void testAlterMajorCompactionInterval() throws Exception {
-    final AtomicInteger majorCReqCount = new AtomicInteger(0);
-    
-    final Compactor compactor = new AbstractCompactor() {
-      @Override
-      public boolean compact(boolean isMajor, boolean isForced) throws IOException {
-        majorCReqCount.incrementAndGet();
-        return true;
-      }
-    };
-    
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0) {
-      @Override
-      public synchronized Compactor getCompactor() {
-        return compactor;
-      }
-    };
-
-    // create hoplog in the past, 90 seconds before current time
-    organizer.hoplogCreated(getName(), 0, new TestHoplog(hdfsStore, ONE_MB, System.currentTimeMillis() - 90000));
-    TimeUnit.MILLISECONDS.sleep(50);
-    organizer.hoplogCreated(getName(), 0, new TestHoplog(hdfsStore, ONE_MB, System.currentTimeMillis() - 90000));
-    
-    alterMajorCompaction(hdfsStore, true);
-    
-    List<TrackedReference<Hoplog>> hoplogs = organizer.getSortedOplogs();
-    assertEquals(2, hoplogs.size());
-    
-    organizer.performMaintenance();
-    TimeUnit.MILLISECONDS.sleep(100);
-    assertEquals(0, majorCReqCount.get());
-    
-    HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
-    mutator.setMajorCompactionInterval(1);
-    hdfsStore.alter(mutator);
-    
-    organizer.performMaintenance();
-    TimeUnit.MILLISECONDS.sleep(100);
-    assertEquals(1, majorCReqCount.get());
-  }
-
-  public void testMinorCompactionTargetMinCount() throws Exception {
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-    HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
-    
-    ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
-    for (int i = 0; i < 2; i++) {
-      TrackedReference<Hoplog> hop = new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, TEN_MB + i));
-      hop.increment();
-      targets.add(hop);
-    }
-    compactor.getMinorCompactionTargets(targets, -1);
-    assertEquals(0, targets.size());
-  }
-  
-  public void testMinorCompactionLessTargetsStatsUpdate() throws Exception {
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    items.add(new TestEvent("1", "1"));
-    organizer.flush(items.iterator(), items.size());
-
-    items.clear();
-    items.add(new TestEvent("2", "2", Operation.DESTROY));
-    organizer.flush(items.iterator(), items.size());
-    
-    TimeUnit.SECONDS.sleep(1);
-    List<TrackedReference<Hoplog>> hoplogs = organizer.getSortedOplogs();
-    assertEquals(2, hoplogs.size());
-    
-    organizer.performMaintenance();
-    hoplogs = organizer.getSortedOplogs();
-    assertEquals(2, hoplogs.size());
-  }
-  
-  public void testMinorCompactionTargetsOptimizer() throws Exception {
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-    HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
-
-    ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
-    for (int i = 0; i < 6; i++) {
-      TrackedReference<Hoplog> hop = new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, TEN_MB + i));
-      hop.increment();
-      targets.add(hop);
-    }
-    List<TrackedReference<Hoplog>> list = (List<TrackedReference<Hoplog>>) targets.clone();
-    compactor.getMinorCompactionTargets(list, -1);
-    assertEquals(6, list.size());
-    
-    TestHoplog fifthHop = (TestHoplog) targets.get(4).get();
-    // fifth hop needs additional block read as it has more than max keys size 
-    fifthHop.size = (HdfsSortedOplogOrganizer.AVG_NUM_KEYS_PER_INDEX_BLOCK * 5 + 1) * 64 * 1024;
-    list = (List<TrackedReference<Hoplog>>) targets.clone();
-    compactor.getMinorCompactionTargets(list, -1);
-    assertEquals(4, list.size());
-    for (TrackedReference<Hoplog> ref : list) {
-      assertTrue(((TestHoplog)ref.get()).size - TEN_MB < 4 );
-    }
-  }
-  
-  public void testTargetsReleasedBadRatio() throws Exception {
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-    HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
-
-    ArrayList<TrackedReference<Hoplog>> targets = new ArrayList<TrackedReference<Hoplog>>();
-    for (int i = 0; i < 3; i++) {
-      TrackedReference<Hoplog> hop = new TrackedReference<Hoplog>(new TestHoplog(hdfsStore, TEN_MB + i));
-      hop.increment();
-      targets.add(hop);
-    }
-    TestHoplog oldestHop = (TestHoplog) targets.get(2).get();
-    oldestHop.size = (1 + 30)  * TEN_MB;
-    
-    List<TrackedReference<Hoplog>> list = (List<TrackedReference<Hoplog>>) targets.clone();
-    compactor.getMinorCompactionTargets(list, -1);
-    assertEquals(0, list.size());
-    assertEquals(3, targets.size());
-    for (TrackedReference<Hoplog> ref : targets) {
-      assertEquals(0, ref.uses());
-    }
-  }
-  
-  public void testMinorCTargetsIgnoreMajorC() throws Exception {
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    for (int i = 0; i < 7; i++) {
-      items.clear();
-      items.add(new TestEvent("1" + i, "1" + i));
-      organizer.flush(items.iterator(), items.size());
-    }
-    
-    HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
-    List<TrackedReference<Hoplog>> targets = organizer.getSortedOplogs();
-    compactor.getMinorCompactionTargets(targets, -1);
-    assertEquals(7, targets.size());
-    
-    targets = organizer.getSortedOplogs();
-    for (TrackedReference<Hoplog> ref : targets) {
-      ref.increment();
-    }
-    compactor.getMinorCompactionTargets(targets, 2);
-    assertEquals((7 - 2), targets.size());
-    targets = organizer.getSortedOplogs();
-    for (int i = 0; i < targets.size(); i++) {
-      if (i + 1 <= (7 - 2)) {
-        assertEquals(1, targets.get(i).uses());
-      } else {
-        assertEquals(0, targets.get(i).uses());
-      }
-    }
-    
-    targets = organizer.getSortedOplogs();
-    for (TrackedReference<Hoplog> ref : targets) {
-      if (ref.uses() == 0) {
-        ref.increment();
-      }
-      assertEquals(1, ref.uses());
-    }
-    compactor.getMinorCompactionTargets(targets, 7);
-    assertEquals(0, targets.size());
-    
-    targets = organizer.getSortedOplogs();
-    for (int i = 0; i < targets.size(); i++) {
-      assertEquals(0, targets.get(i).uses());
-    }
-  }
-  
-  public void testTargetOverlap() throws Exception {
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    for (int i = 0; i < 7; i++) {
-      items.clear();
-      items.add(new TestEvent("1" + i, "1" + i));
-      organizer.flush(items.iterator(), items.size());
-    }
-    
-    HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
-    List<TrackedReference<Hoplog>> targets = organizer.getSortedOplogs();
-    assertTrue(compactor.isMinorMajorOverlap(targets, 8));
-    assertTrue(compactor.isMinorMajorOverlap(targets, 7));
-    assertTrue(compactor.isMinorMajorOverlap(targets, 6));
-    assertTrue(compactor.isMinorMajorOverlap(targets, 1));
-    assertFalse(compactor.isMinorMajorOverlap(targets, 0));
-    assertFalse(compactor.isMinorMajorOverlap(targets, -1));
-    
-    targets.remove(targets.size() -1); // remove the last one 
-    targets.remove(targets.size() -1); // remove the last one again
-    assertFalse(compactor.isMinorMajorOverlap(targets, 1));
-    assertFalse(compactor.isMinorMajorOverlap(targets, 2));
-    assertTrue(compactor.isMinorMajorOverlap(targets, 3));
-    
-    targets.remove(3); // remove from the middle, seq num 4
-    assertTrue(compactor.isMinorMajorOverlap(targets, 4));
-    assertTrue(compactor.isMinorMajorOverlap(targets, 3));
-  }
-  
-  public void testSuspendMinorByMajor() throws Exception {
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    for (int i = 0; i < 5; i++) {
-      items.clear();
-      items.add(new TestEvent("1" + i, "1" + i));
-      organizer.flush(items.iterator(), items.size());
-    }
-    
-    HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
-
-    Hoplog hoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir + "/"
-        + getName() + "-" + System.currentTimeMillis() + "-1.ihop.tmp"), blockCache, stats, storeStats);
-    compactor.fillCompactionHoplog(false, organizer.getSortedOplogs(), hoplog, -1);
-    
-    cache.getLogger().info("<ExpectedException action=add>java.lang.InterruptedException</ExpectedException>");
-    try {
-      compactor.maxMajorCSeqNum.set(3);
-      compactor.fillCompactionHoplog(false, organizer.getSortedOplogs(), hoplog, -1);
-      fail();
-    } catch (InterruptedException e) {
-      // expected
-    }
-    cache.getLogger().info("<ExpectedException action=remove>java.lang.InterruptedException</ExpectedException>");
-    organizer.close();
-  }
-  
-  public void testMajorCompactionSetsSeqNum() throws Exception {
-    final CountDownLatch compactionStartedLatch = new CountDownLatch(1);
-    final CountDownLatch waitLatch = new CountDownLatch(1);
-    class MyOrganizer extends HdfsSortedOplogOrganizer {
-      final HoplogCompactor compactor = new MyCompactor();
-      public MyOrganizer(HdfsRegionManager region, int bucketId) throws IOException {
-        super(region, bucketId);
-      }
-      public synchronized Compactor getCompactor() {
-        return compactor;
-      }
-      class MyCompactor extends HoplogCompactor {
-        @Override
-        public long fillCompactionHoplog(boolean isMajor,
-            List<TrackedReference<Hoplog>> targets, Hoplog output,
-            int majorCSeqNum) throws IOException, InterruptedException {
-          compactionStartedLatch.countDown();
-          waitLatch.await();
-          long byteCount = 0;
-          try {
-            byteCount = super.fillCompactionHoplog(isMajor, targets, output, majorCSeqNum);
-          } catch (ForceReattemptException e) {
-            // we do not expect this in a unit test. 
-          }
-          return byteCount;
-        }
-      }
-    }
-    
-    final HdfsSortedOplogOrganizer organizer = new MyOrganizer(regionManager, 0);
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    for (int i = 0; i < 3; i++) {
-      items.clear();
-      items.add(new TestEvent("1" + i, "1" + i));
-      organizer.flush(items.iterator(), items.size());
-    }
-    
-    Thread t = new Thread(new Runnable() {
-      public void run() {
-        try {
-          organizer.getCompactor().compact(true, false);
-        } catch (IOException e) {
-          e.printStackTrace();
-        }
-      }
-    });
-    t.start();
-    compactionStartedLatch.await();
-    assertEquals(3, ((HoplogCompactor)organizer.getCompactor()).maxMajorCSeqNum.get());
-    waitLatch.countDown();
-    t.join();
-  }
-  
-  public void testMinorWatchesMajorsSeqNum() throws Exception {
-    final CountDownLatch majorCStartedLatch = new CountDownLatch(1);
-    final CountDownLatch majorCWaitLatch = new CountDownLatch(1);
-    
-    final CountDownLatch minorCStartedLatch = new CountDownLatch(1);
-    final List<TrackedReference<Hoplog>> minorTargets = new ArrayList<TrackedReference<Hoplog>>();
-    
-    class MyOrganizer extends HdfsSortedOplogOrganizer {
-      final HoplogCompactor compactor = new MyCompactor();
-      public MyOrganizer(HdfsRegionManager region, int bucketId) throws IOException {
-        super(region, bucketId);
-      }
-      public synchronized Compactor getCompactor() {
-        return compactor;
-      }
-      class MyCompactor extends HoplogCompactor {
-        @Override
-        public long fillCompactionHoplog(boolean isMajor,
-            List<TrackedReference<Hoplog>> targets, Hoplog output,
-            int majorCSeqNum) throws IOException, InterruptedException {
-          if (isMajor) {
-            majorCStartedLatch.countDown();
-            majorCWaitLatch.await();
-          } else {
-            minorCStartedLatch.countDown();
-            minorTargets.addAll(targets);
-          }
-          long byteCount =0;
-          try {
-            byteCount = super.fillCompactionHoplog(isMajor, targets, output, majorCSeqNum);
-          } catch (ForceReattemptException e) {
-            // we do not expect this in a unit test. 
-          }
-          return byteCount;
-        }
-      }
-    }
-    
-    final HdfsSortedOplogOrganizer organizer = new MyOrganizer(regionManager, 0);
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    for (int i = 0; i < 3; i++) {
-      items.clear();
-      items.add(new TestEvent("1" + i, "1" + i));
-      organizer.flush(items.iterator(), items.size());
-    }
-    
-    Thread majorCThread = new Thread(new Runnable() {
-      public void run() {
-        try {
-          organizer.getCompactor().compact(true, false);
-        } catch (IOException e) {
-          e.printStackTrace();
-        }
-      }
-    });
-    majorCThread.start();
-    majorCStartedLatch.await();
-    assertEquals(3, ((HoplogCompactor)organizer.getCompactor()).maxMajorCSeqNum.get());
-
-    // create more files for minor C
-    for (int i = 0; i < 4; i++) {
-      items.clear();
-      items.add(new TestEvent("1" + i, "1" + i));
-      organizer.flush(items.iterator(), items.size());
-    }
-    
-    Thread minorCThread = new Thread(new Runnable() {
-      public void run() {
-        try {
-          organizer.getCompactor().compact(false, false);
-        } catch (IOException e) {
-          e.printStackTrace();
-        }
-      }
-    });
-    minorCThread.start();
-    minorCThread.join();
-    assertEquals(4, minorTargets.size());
-    for (TrackedReference<Hoplog> ref : minorTargets) {
-      assertTrue(organizer.getSequenceNumber(ref.get()) >= 4);
-    }
-    
-    majorCWaitLatch.countDown();
-    majorCThread.join();
-  }
-  
-  public void testTimeBoundedSuspend() throws Exception {
-    final AtomicBoolean barrier = new AtomicBoolean(true);
-    
-    class MyOrganizer extends HdfsSortedOplogOrganizer {
-      public MyOrganizer(HdfsRegionManager region, int bucketId) throws IOException {
-        super(region, bucketId);
-      }
-      public synchronized Compactor getCompactor() {
-        return new MyCompactor();
-      }
-      class MyCompactor extends HoplogCompactor {
-        public long fillCompactionHoplog(boolean isMajor, List<TrackedReference<Hoplog>> targets, Hoplog output)
-            throws IOException, InterruptedException {
-          barrier.set(false);
-          TimeUnit.SECONDS.sleep(5 * HoplogConfig.SUSPEND_MAX_WAIT_MS_DEFAULT);
-          long byteCount =0;
-          try {
-            byteCount = super.fillCompactionHoplog(isMajor, targets, output, -1);
-          } catch (ForceReattemptException e) {
-            // we do not expect this in a unit test. 
-          }
-          return byteCount;
-        }
-      }
-    }
-    
-    HdfsSortedOplogOrganizer organizer = new MyOrganizer(regionManager, 0);
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    for (int i = 0; i < 4; i++) {
-      items.clear();
-      items.add(new TestEvent("1" + i, "1" + i));
-      organizer.flush(items.iterator(), items.size());
-    }
-
-    final HoplogCompactor compactor = (HoplogCompactor) organizer.getCompactor();
-    ExecutorService service = Executors.newCachedThreadPool();
-    service.execute(new Runnable() {
-      public void run() {
-        try {
-          compactor.compact(false, false);
-        } catch (Exception e) {
-        }
-      }
-    });
-    
-    final AtomicLong start = new AtomicLong(0);
-    final AtomicLong end = new AtomicLong(0);
-    service.execute(new Runnable() {
-      public void run() {
-        while (barrier.get()) {
-          try {
-            TimeUnit.MILLISECONDS.sleep(50);
-          } catch (InterruptedException e) {
-            e.printStackTrace();
-          }
-        }
-        
-        start.set(System.currentTimeMillis());
-        compactor.suspend();
-        end.set(System.currentTimeMillis());
-      }
-    });
-    
-    for (long i = 0; i < 5; i++) {
-      if (end.get() == 0) {
-        TimeUnit.MILLISECONDS.sleep(HoplogConfig.SUSPEND_MAX_WAIT_MS_DEFAULT / 2);
-      } else {
-        break;
-      }
-    }
-    
-    assertTrue(end.get() - start.get() < 100 + HoplogConfig.SUSPEND_MAX_WAIT_MS_DEFAULT);
-  }
-  
-  public static class TestHoplog extends AbstractHoplog {
-    long size;
-    long creationTime;
-    TestHoplog(HDFSStoreImpl store, long size) throws IOException {
-      this(store, size, 0);
-    }
-    
-    TestHoplog(HDFSStoreImpl store, long size, long creationTime) throws IOException {
-      super(store, new Path("1-" + creationTime + "-1.hop"), null);
-      this.size = size;
-      this.creationTime = creationTime;
-    }
-    
-    @Override
-    public long getSize() {
-      return size;
-    }
-    @Override
-    public long getModificationTimeStamp() {
-      if (creationTime > 0) {
-        return creationTime;
-      }
-      return super.getModificationTimeStamp();
-    }
-    @Override
-    public String toString() {
-      long name = size -  TEN_MB;
-      if (name < 0) name = size - (TEN_MB / 1024);
-      return name + "";
-    }
-    public boolean isClosed() {
-      return false;
-    }
-    public void close() throws IOException {
-    }
-    public HoplogReader getReader() throws IOException {
-      return null;
-    }
-    public HoplogWriter createWriter(int keys) throws IOException {
-      return null;
-    }
-    public void close(boolean clearCache) throws IOException {
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFKeyJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFKeyJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFKeyJUnitTest.java
deleted file mode 100644
index fe15305..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/GFKeyJUnitTest.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-
-import junit.framework.TestCase;
-
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class GFKeyJUnitTest extends TestCase {
-  public void testSerde() throws Exception {
-    String str = "str";
-    GFKey key = new GFKey();
-    key.setKey(str);
-    
-    ByteArrayOutputStream baos = new ByteArrayOutputStream();
-    DataOutputStream dos = new DataOutputStream(baos);
-    key.write(dos);
-    
-    ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
-    DataInputStream dis = new DataInputStream(bais);
-    key.readFields(dis);
-    
-    assertEquals(str, key.getKey());
-  }
-  
-  public void testCompare() {
-    GFKey keya = new GFKey();
-    keya.setKey("a");
-    
-    GFKey keyb = new GFKey();
-    keyb.setKey("b");
-    
-    assertEquals(-1, keya.compareTo(keyb));
-    assertEquals(1, keyb.compareTo(keya));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HDFSSplitIteratorJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HDFSSplitIteratorJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HDFSSplitIteratorJUnitTest.java
deleted file mode 100644
index 5ebb00e..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HDFSSplitIteratorJUnitTest.java
+++ /dev/null
@@ -1,265 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
-import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.BaseHoplogTestCase;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HFileSortedOplog;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSSplitIteratorJUnitTest extends BaseHoplogTestCase {
-  public void test1Hop1BlockIter() throws Exception {
-    Path path = new Path(testDataDir, "region/0/1-1-1.hop");
-    Hoplog oplog = new HFileSortedOplog(hdfsStore, path, blockCache, stats,
-        storeStats);
-    createHoplog(10, oplog);
-
-    Path[] paths = {path};
-    long[] starts = {0};
-    long[] lengths = {oplog.getSize()};
-    HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
-        hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
-    
-    int count = 0;
-    while (iter.hasNext()) {
-      boolean success = iter.next();
-      assertTrue(success);
-      assertEquals("key-" + count, new String((byte[])iter.getKey()));
-      count++;
-    }
-    assertEquals(10, count);
-  }
-  
-  public void test1HopNBlockIter() throws Exception {
-    Path path = new Path(testDataDir, "region/0/1-1-1.hop");
-    Hoplog oplog = new HFileSortedOplog(hdfsStore, path,
-        blockCache, stats, storeStats);
-    createHoplog(2000, oplog);
-    
-    FileSystem fs = hdfsStore.getFileSystem();
-    Reader reader = HFile.createReader(fs, path, new CacheConfig(fs.getConf()));
-    BlockIndexReader bir = reader.getDataBlockIndexReader();
-    int blockCount = bir.getRootBlockCount();
-    reader.close();
-    
-    // make sure there are more than 1 hfile blocks in the hoplog
-    assertTrue(1 < blockCount);
-
-    Path[] paths = {path};
-    long half = oplog.getSize()/2;
-    long[] starts = {0};
-    long[] lengths = {half};
-    HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
-        hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
-    
-    int count = 0;
-    while (iter.hasNext()) {
-      boolean success = iter.next();
-      assertTrue(success);
-      assertEquals("key-" + (count + 100000), new String((byte[])iter.getKey()));
-      count++;
-    }
-    // the number of iterations should be less than number of keys inserted in
-    // the hoplog
-    assertTrue(count < 2000 && count > 0);
-
-    paths = new Path[] {path};
-    starts = new long[] {half + 1};
-    lengths = new long[] {oplog.getSize()};
-    iter = HDFSSplitIterator.newInstance(
-        hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
-    
-    while (iter.hasNext()) {
-      boolean success = iter.next();
-      assertTrue(success);
-      assertEquals("key-" + (count + 100000), new String((byte[])iter.getKey()));
-      count++;
-    }
-    assertEquals(2000, count);
-
-    paths = new Path[] {path, path};
-    starts = new long[] {0, half + 1};
-    lengths = new long[] {half, oplog.getSize()};
-    iter = HDFSSplitIterator.newInstance(
-        hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
-    
-    count = 0;
-    while (iter.hasNext()) {
-      boolean success = iter.next();
-      assertTrue(success);
-      assertEquals("key-" + (count + 100000), new String((byte[])iter.getKey()));
-      count++;
-    }
-    assertEquals(2000, count);
-  }
-
-  /*
-   * This tests iterates over 3 hoplog files. The three hoplog files have the
-   * same content. Duplicate keys should not get discarded
-   */
-  public void testNHoplogNBlockIter() throws Exception {
-    Path path1 = new Path(testDataDir, "region/0/1-1-1.hop");
-    Hoplog oplog = new HFileSortedOplog(hdfsStore, path1,
-        blockCache, stats, storeStats);
-    createHoplog(2000, oplog);
-    
-    FileSystem fs = hdfsStore.getFileSystem();
-    Reader reader = HFile.createReader(fs, path1, new CacheConfig(fs.getConf()));
-    BlockIndexReader bir = reader.getDataBlockIndexReader();
-    int blockCount = bir.getRootBlockCount();
-    reader.close();
-    
-    // make sure there are more than 1 hfile blocks in the hoplog
-    assertTrue(1 < blockCount);
-    
-    Path path2 = new Path(testDataDir, "region/0/1-2-1.hop");
-    oplog = new HFileSortedOplog(hdfsStore, path2,
-        blockCache, stats, storeStats);
-    createHoplog(2000, oplog);
-
-    Path path3 = new Path(testDataDir, "region/0/1-3-1.hop");
-    oplog = new HFileSortedOplog(hdfsStore, path3,
-        blockCache, stats, storeStats);
-    createHoplog(2000, oplog);
-    
-    Path[] paths = {path1, path2, path3, path1, path2, path3};
-    long half = oplog.getSize()/2;
-    long[] starts = {0, 0, 0, half + 1, half + 1, half + 1};
-    long[] lengths = {half, half, half, oplog.getSize(), oplog.getSize(), oplog.getSize()};
-    HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
-        hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
-    
-    int[] keyCounts = new int[2000];
-    while (iter.hasNext()) {
-      boolean success = iter.next();
-      assertTrue(success);
-      String key = new String((byte[])iter.getKey()).substring("key-".length());
-      keyCounts[Integer.valueOf(key) - 100000] ++;
-    }
-    
-    for (int i : keyCounts) {
-      assertEquals(3, i);
-    }
-  }
-  
-  public void testMRLikeNHopIter() throws Exception {
-    Path path1 = new Path(testDataDir, "region/0/1-1-1.hop");
-    Hoplog oplog = new HFileSortedOplog(hdfsStore, path1,
-        blockCache, stats, storeStats);
-    createHoplog(10, oplog);
-    
-    Path path2 = new Path(testDataDir, "region/0/1-2-1.hop");
-    oplog = new HFileSortedOplog(hdfsStore, path2,
-        blockCache, stats, storeStats);
-    createHoplog(10, oplog);
-    
-    Path path3 = new Path(testDataDir, "region/0/1-3-1.hop");
-    oplog = new HFileSortedOplog(hdfsStore, path3,
-        blockCache, stats, storeStats);
-    createHoplog(10, oplog);
-    
-    Path[] paths = {path1, path2, path3};
-    long[] starts = {0, 0, 0};
-    long[] lengths = {oplog.getSize(), oplog.getSize(), oplog.getSize()};
-    HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
-        hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
-    
-    int[] keyCounts = new int[10];
-    while (iter.hasNext()) {
-      boolean success = iter.next();
-      assertTrue(success);
-      // extra has next before key read
-      iter.hasNext(); 
-      String key = new String((byte[])iter.getKey()).substring("key-".length());
-      System.out.println(key);
-      keyCounts[Integer.valueOf(key)] ++;
-    }
-    
-    for (int i : keyCounts) {
-      assertEquals(3, i);
-    }
-  }
-  
-  public void test1Hop1BlockIterSkipDeletedHoplogs() throws Exception {
-    FileSystem fs = hdfsStore.getFileSystem();
-    Path path = new Path(testDataDir, "region/0/1-1-1.hop");
-    Hoplog oplog = new HFileSortedOplog(hdfsStore, path,
-        blockCache, stats, storeStats);
-    createHoplog(10, oplog);
-
-    Path[] paths = {path};
-    long[] starts = {0};
-    long[] lengths = {oplog.getSize()};
-    
-    //Delete the Hoplog file
-    fs.delete(path, true);
-    
-    HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
-        hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
-    assertFalse(iter.hasNext());
-    
-  }
-  
-  public void testMRLikeNHopIterSkipDeletedHoplogs() throws Exception {
-    FileSystem fs = hdfsStore.getFileSystem();
-    //Create Hoplogs
-    Path path1 = new Path(testDataDir, "region/0/1-1-1.hop");
-    Hoplog oplog = new HFileSortedOplog(hdfsStore, path1,
-        blockCache, stats, storeStats);
-    createHoplog(10, oplog);
-    
-    Path path2 = new Path(testDataDir, "region/0/1-2-1.hop");
-    oplog = new HFileSortedOplog(hdfsStore, path2,
-        blockCache, stats, storeStats);
-    createHoplog(10, oplog);
-    
-    Path path3 = new Path(testDataDir, "region/0/1-3-1.hop");
-    oplog = new HFileSortedOplog(hdfsStore, path3,
-        blockCache, stats, storeStats);
-    createHoplog(10, oplog);
-    
-    Path[] paths = {path1, path2, path3};
-    long[] starts = {0, 0, 0};
-    long[] lengths = {oplog.getSize(), oplog.getSize(), oplog.getSize()};
-    HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
-        hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
-    int count = 0;
-    while (iter.hasNext()) {
-      boolean success = iter.next();
-      assertTrue(success);
-      count++;
-    }
-    assertEquals(30, count);
-    
-    for(int i = 0; i < 3; ++i){
-      fs.delete(paths[i], true);
-      iter = HDFSSplitIterator.newInstance(
-          hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
-      count = 0;
-      while (iter.hasNext()) {
-        boolean success = iter.next();
-        assertTrue(success);
-        count++;
-      }
-      assertEquals(20, count);
-      oplog = new HFileSortedOplog(hdfsStore, paths[i],
-          blockCache, stats, storeStats);
-      createHoplog(10, oplog);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtilJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtilJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtilJUnitTest.java
deleted file mode 100644
index a209b6e..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/mapreduce/HoplogUtilJUnitTest.java
+++ /dev/null
@@ -1,305 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.BaseHoplogTestCase;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HdfsSortedOplogOrganizer;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.TrackedReference;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HoplogUtilJUnitTest extends BaseHoplogTestCase {
-  Path regionPath = null;
-  
-  @Override
-  protected void configureHdfsStoreFactory() throws Exception {
-    super.configureHdfsStoreFactory();
-    
-    hsf.setInputFileCountMin(3);
-    hsf.setMinorCompaction(false);
-    hsf.setMajorCompaction(false);
-  }
-  
-  public void testHoplogListingMultiBucket() throws Exception {
-    createHoplogs();
-
-    Collection<FileStatus> hoplogs = HoplogUtil.getAllRegionHoplogs(
-        regionPath, hdfsStore.getFileSystem(),
-        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-
-    assertEquals(5, hdfsStore.getFileSystem().listStatus(regionPath).length);
-    assertEquals(15, hoplogs.size());
-  }
-
-  public void testHoplogListingMixFileTypes() throws Exception {
-    createHoplogs();
-
-    HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-    organizer.getCompactor().compact(false, false);
-
-    Collection<FileStatus> hoplogs = HoplogUtil.getAllRegionHoplogs(
-        regionPath, hdfsStore.getFileSystem(),
-        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-
-    assertEquals(7,
-        hdfsStore.getFileSystem().listStatus(new Path(regionPath, "0")).length);
-    assertEquals(15, hoplogs.size());
-  }
-
-  public void testHoplogListingEmptyBucket() throws Exception {
-    createHoplogs();
-
-    hdfsStore.getFileSystem().mkdirs(new Path(regionPath, "100"));
-
-    Collection<FileStatus> hoplogs = HoplogUtil.getAllRegionHoplogs(
-        regionPath, hdfsStore.getFileSystem(),
-        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-
-    assertEquals(6, hdfsStore.getFileSystem().listStatus(regionPath).length);
-    assertEquals(15, hoplogs.size());
-  }
-
-  public void testHoplogListingInvalidBucket() throws Exception {
-    createHoplogs();
-
-    hdfsStore.getFileSystem().rename(new Path(regionPath, "0"),
-        new Path(regionPath, "not_a_bucket"));
-
-    Collection<FileStatus> hoplogs = HoplogUtil.getAllRegionHoplogs(
-        regionPath, hdfsStore.getFileSystem(),
-        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-
-    assertEquals(5, hdfsStore.getFileSystem().listStatus(regionPath).length);
-    assertEquals(12, hoplogs.size());
-  }
-
-  public void testHoplogListingInvalidFiles() throws Exception {
-    createHoplogs();
-
-    Path bucketPath = new Path(regionPath, "0");
-    FSDataOutputStream stream = hdfsStore.getFileSystem().create(
-        new Path(bucketPath, "not_a_hoplog"));
-    stream.close();
-
-    Collection<FileStatus> hoplogs = HoplogUtil.getAllRegionHoplogs(
-        regionPath, hdfsStore.getFileSystem(),
-        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-
-    assertEquals(4, hdfsStore.getFileSystem().listStatus(bucketPath).length);
-    assertEquals(15, hoplogs.size());
-  }
-
-  public void testTimeRange() throws Exception {
-    createHoplogs();
-    // rename hoplogs for testing purpose
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(
-        regionManager, 0);
-    List<TrackedReference<Hoplog>> hoplogs = organizer.getSortedOplogs();
-    assertEquals(3, hoplogs.size());
-    hoplogs.get(0).get().rename("0-300-1.hop");
-    hoplogs.get(1).get().rename("0-310-1.hop");
-    hoplogs.get(2).get().rename("0-320-1.hop");
-    organizer.close();
-
-    organizer = new HdfsSortedOplogOrganizer(regionManager, 3);
-    hoplogs = organizer.getSortedOplogs();
-    assertEquals(3, hoplogs.size());
-    hoplogs.get(0).get().rename("0-600-1.hop");
-    hoplogs.get(1).get().rename("0-610-1.hop");
-    hoplogs.get(2).get().rename("0-620-1.hop");
-    organizer.close();
-
-    organizer = new HdfsSortedOplogOrganizer(regionManager, 6);
-    hoplogs = organizer.getSortedOplogs();
-    assertEquals(3, hoplogs.size());
-    hoplogs.get(0).get().rename("0-100-1.hop");
-    hoplogs.get(1).get().rename("0-110-1.hop");
-    hoplogs.get(2).get().rename("0-120-1.hop");
-
-    Collection<FileStatus> filtered = HoplogUtil.getRegionHoplogs(
-        regionPath, hdfsStore.getFileSystem(),
-        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION, 300, 305);
-    assertEquals(5, filtered.size());
-    assertTrue(containsHoplogWithName(filtered, "0-300-1.hop"));
-    assertTrue(containsHoplogWithName(filtered, "0-310-1.hop"));
-    assertTrue(containsHoplogWithName(filtered, "0-600-1.hop"));
-
-    filtered = HoplogUtil.getRegionHoplogs(regionPath,
-        hdfsStore.getFileSystem(),
-        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION, 250, 310);
-    assertEquals(6, filtered.size());
-    assertTrue(containsHoplogWithName(filtered, "0-300-1.hop"));
-    assertTrue(containsHoplogWithName(filtered, "0-310-1.hop"));
-    assertTrue(containsHoplogWithName(filtered, "0-320-1.hop"));
-
-    filtered = HoplogUtil.getRegionHoplogs(regionPath,
-        hdfsStore.getFileSystem(),
-        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION, 301, 311);
-    assertEquals(5, filtered.size());
-    assertTrue(containsHoplogWithName(filtered, "0-310-1.hop"));
-    assertTrue(containsHoplogWithName(filtered, "0-320-1.hop"));
-
-    filtered = HoplogUtil.getRegionHoplogs(regionPath,
-        hdfsStore.getFileSystem(),
-        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION, 301, 309);
-    assertEquals(4, filtered.size());
-    assertTrue(containsHoplogWithName(filtered, "0-310-1.hop"));
-    organizer.close();
-  }
-  
-  public void testExcludeSoonCleanedHoplogs() throws Exception {
-    FileSystem fs = hdfsStore.getFileSystem();
-    Path cleanUpIntervalPath = new Path(hdfsStore.getHomeDir(), HoplogConfig.CLEAN_UP_INTERVAL_FILE_NAME);
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(
-        regionManager, 0);
-    //delete the auto generated clean up interval file   
-    if (fs.exists(cleanUpIntervalPath)){
-      fs.delete(cleanUpIntervalPath, true);
-    }
-    
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    int count = 10;
-    for (int fileCount = 0; fileCount < 3; fileCount++) {
-      items.clear();
-      for (int itemCount = 0; itemCount < count; itemCount++) {
-        items.add(new TestEvent(("key-" + itemCount), "value"));
-      }
-      organizer.flush(items.iterator(), count);
-    }
-    List<TrackedReference<Hoplog>> hoplogs = organizer.getSortedOplogs();
-    
-    for(TrackedReference<Hoplog> hoplog : hoplogs) {
-      Path p = new Path(testDataDir, getName() + "/0/" +
-          hoplog.get().getFileName() + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
-      fs.createNewFile(p);
-    }
-    Collection<FileStatus> files = HoplogUtil.getAllRegionHoplogs(
-        regionPath, hdfsStore.getFileSystem(),
-        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-    assertEquals(3, files.size());
-    
-    TimeUnit.MINUTES.sleep(2);
-    //No clean up interval file, all expired files will be included
-    files = HoplogUtil.getAllRegionHoplogs(
-        regionPath, hdfsStore.getFileSystem(),
-        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-    assertEquals(3, files.size());
-    
-    
-    long interval = 1 * 60 * 1000;
-    HoplogUtil.exposeCleanupIntervalMillis(fs,cleanUpIntervalPath,interval);
-    
-    files = HoplogUtil.getAllRegionHoplogs(
-        regionPath, hdfsStore.getFileSystem(),
-        AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-    assertEquals(0, files.size());
-    organizer.close();  
-  }
-  
-  
-  public void testCheckpointSelection() throws Exception {
-    createHoplogs();
-    // rename hoplogs for testing purpose
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(
-        regionManager, 0);
-    List<TrackedReference<Hoplog>> hoplogs = organizer.getSortedOplogs();
-    assertEquals(3, hoplogs.size());
-    hoplogs.get(0).get().rename("0-300-1.chop");
-    hoplogs.get(1).get().rename("0-310-1.hop");
-    hoplogs.get(2).get().rename("0-320-1.hop"); // checkpoint file
-    organizer.close();
-    
-    organizer = new HdfsSortedOplogOrganizer(regionManager, 3);
-    hoplogs = organizer.getSortedOplogs();
-    assertEquals(3, hoplogs.size());
-    hoplogs.get(0).get().rename("0-600-1.hop");
-    hoplogs.get(1).get().rename("0-610-1.chop");
-    hoplogs.get(2).get().rename("0-620-1.hop");
-    organizer.close();
-    
-    organizer = new HdfsSortedOplogOrganizer(regionManager, 6);
-    hoplogs = organizer.getSortedOplogs();
-    assertEquals(3, hoplogs.size());
-    hoplogs.get(0).get().rename("0-100-1.hop");
-    hoplogs.get(1).get().rename("0-110-1.hop");
-    hoplogs.get(2).get().rename("0-120-1.chop");
-    
-    Collection<FileStatus> filtered = HoplogUtil.filterHoplogs(
-        hdfsStore.getFileSystem(), regionPath, 290, 305, false);
-    assertEquals(4, filtered.size());
-    assertTrue(containsHoplogWithName(filtered, "0-310-1.hop"));
-    assertTrue(containsHoplogWithName(filtered, "0-600-1.hop"));
-    
-    filtered = HoplogUtil.filterHoplogs(hdfsStore.getFileSystem(),
-        regionPath, 290, 305, true);
-    assertEquals(3, filtered.size());
-    assertTrue(containsHoplogWithName(filtered, "0-300-1.chop"));
-    assertTrue(containsHoplogWithName(filtered, "0-610-1.chop"));
-    assertTrue(containsHoplogWithName(filtered, "0-120-1.chop"));
-    organizer.close();
-  }
-  
-  private boolean containsHoplogWithName(Collection<FileStatus> filtered,
-      String name) {
-    for (FileStatus file : filtered) {
-      if (file.getPath().getName().equals(name)) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  private void createHoplogs() throws IOException, Exception {
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    int count = 10;
-    for (int bucketId = 0; bucketId < 15; bucketId += 3) {
-      HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager,
-          bucketId);
-      for (int fileCount = 0; fileCount < 3; fileCount++) {
-        items.clear();
-        for (int itemCount = 0; itemCount < count; itemCount++) {
-          items.add(new TestEvent(("key-" + itemCount), "value"));
-        }
-        organizer.flush(items.iterator(), count);
-      }
-    }
-  }
-  
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
-    regionPath = new Path(testDataDir, getName());
-  }
-  
-  @Override 
-  protected void tearDown() throws Exception{
-    FileSystem fs = hdfsStore.getFileSystem();
-    Path cleanUpIntervalPath = new Path(hdfsStore.getHomeDir(),HoplogConfig.CLEAN_UP_INTERVAL_FILE_NAME);
-    if (fs.exists(cleanUpIntervalPath)){
-      fs.delete(cleanUpIntervalPath, true);
-    }  
-    super.tearDown();
-  }
-}


[24/50] [abbrv] incubator-geode git commit: GEODE-429: Remove Cache.createHdfsStoreFactory method

Posted by ds...@apache.org.
GEODE-429: Remove Cache.createHdfsStoreFactory method


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/f2390a1a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/f2390a1a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/f2390a1a

Branch: refs/heads/develop
Commit: f2390a1ada2acbcabac28dd4226a67f7baf924ae
Parents: 74c3156
Author: Ashvin Agrawal <as...@apache.org>
Authored: Mon Oct 19 15:05:36 2015 -0700
Committer: Ashvin Agrawal <as...@apache.org>
Committed: Wed Oct 21 08:55:23 2015 -0700

----------------------------------------------------------------------
 .../gemstone/gemfire/cache/GemFireCache.java    |   8 -
 .../internal/cache/GemFireCacheImpl.java        |   6 -
 .../internal/cache/xmlcache/CacheCreation.java  |   5 -
 .../HDFSRegionMBeanAttributeJUnitTest.java      | 169 -------------------
 4 files changed, 188 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f2390a1a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
index d81d25d..b948c5d 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
@@ -267,12 +267,4 @@ public interface GemFireCache extends RegionService {
    * @param name the name of the HDFSStore to find.
    */
   public HDFSStore findHDFSStore(String name);
-
-   /**
-	* Creates a {@link HDFSStoreFactory} for creating a {@link HDFSStore}
-	* 
-	* @return the HDFS store factory
-	*/
-  public HDFSStoreFactory createHDFSStoreFactory();
-  
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f2390a1a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
index 0d4961b..78ea6be 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
@@ -5309,12 +5309,6 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
     }
   }
   
-  @Override
-  public HDFSStoreFactory createHDFSStoreFactory() {
-    // TODO Auto-generated method stub
-    return new HDFSStoreFactoryImpl(this);
-  }
-  
   public HDFSStoreFactory createHDFSStoreFactory(HDFSStoreCreation creation) {
     return new HDFSStoreFactoryImpl(this, creation);
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f2390a1a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
index 0347d67..e4bea7f 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
@@ -1378,11 +1378,6 @@ public class CacheCreation implements InternalCache, Extensible<Cache> {
   }
   
   @Override
-  public HDFSStoreFactory createHDFSStoreFactory() {
-    // TODO Auto-generated method stub
-    return new HDFSStoreFactoryImpl(this);
-  }
-  @Override
   public HDFSStore findHDFSStore(String storeName) {
     return (HDFSStore)this.hdfsStores.get(storeName);
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f2390a1a/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
deleted file mode 100644
index 14b61e6..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.management.bean.stats;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Set;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.io.hfile.BlockCache;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.Operation;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionFactory;
-import com.gemstone.gemfire.cache.RegionShortcut;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.SortedHDFSQueuePersistedEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer;
-import com.gemstone.gemfire.internal.cache.BucketRegion;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.internal.cache.execute.BucketMovedException;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.HFileStoreStatistics;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
-import com.gemstone.gemfire.internal.cache.versions.DiskVersionTag;
-import com.gemstone.gemfire.internal.util.BlobHelper;
-import com.gemstone.gemfire.management.ManagementService;
-import com.gemstone.gemfire.management.RegionMXBean;
-import com.gemstone.gemfire.management.internal.ManagementConstants;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-/**
- * Test for verifying HDFS related MBean attributes
- * @author rishim
- *
- */
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSRegionMBeanAttributeJUnitTest extends TestCase {
-
-  public static final String HDFS_STORE_NAME = "HDFSMBeanJUnitTestStore";
-  public static final String REGION_NAME = "HDFSMBeanJUnitTest_Region";
-  protected Path testDataDir;
-  protected Cache cache;
-
-  protected HDFSStoreFactory hsf;
-  protected HDFSStoreImpl hdfsStore;
-  protected Region<Object, Object> region;
-  SortedOplogStatistics stats;
-  HFileStoreStatistics storeStats;
-  BlockCache blockCache;
-
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
-
-    System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");
-    testDataDir = new Path("test-case");
-
-    cache = createCache();
-
-    configureHdfsStoreFactory();
-    hdfsStore = (HDFSStoreImpl) hsf.create(HDFS_STORE_NAME);
-
-    RegionFactory<Object, Object> regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION);
-//    regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
-
-    // regionfactory.setCompressionCodec("Some");
-    PartitionAttributesFactory fac = new PartitionAttributesFactory();
-    fac.setTotalNumBuckets(10);
-
-    regionfactory.setPartitionAttributes(fac.create());
-    region = regionfactory.create(REGION_NAME);
-
-  }
-
-  protected void configureHdfsStoreFactory() throws Exception {
-    hsf = this.cache.createHDFSStoreFactory();
-    hsf.setHomeDir(testDataDir.toString());
-  }
-
-  protected Cache createCache() {
-    CacheFactory cf = new CacheFactory().set("mcast-port", "0").set("log-level", "info");
-    cache = cf.create();
-    return cache;
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    hdfsStore.getFileSystem().delete(testDataDir, true);
-    cache.close();
-    super.tearDown();
-  }
-
-  public void testStoreUsageStats() throws Exception {
-
-    PartitionedRegion parRegion = (PartitionedRegion)region;
-   
-
-      ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-      for (int i = 0; i < 100; i++) {
-        String key = ("key-" + (i * 100 + i));
-        String value = ("value-" + System.nanoTime());
-        parRegion.put(key, value);
-        
-        items.add(new TestEvent(key, value));
-      }
-
-    //Dont want to create
-    Set<BucketRegion> localPrimaryBucketRegions = parRegion.getDataStore().getAllLocalPrimaryBucketRegions();
-    BucketRegion flushingBucket=  localPrimaryBucketRegions.iterator().next();
-    HoplogOrganizer hoplogOrganizer = getOrganizer(parRegion,flushingBucket.getId());
-    hoplogOrganizer.flush(items.iterator(), 100);
-    
-    GemFireCacheImpl cache = GemFireCacheImpl.getExisting();
-    ManagementService service = ManagementService.getExistingManagementService(cache);
-    RegionMXBean bean = service.getLocalRegionMBean(region.getFullPath());
-    
-
-    //assertTrue(bean.getEntryCount() == ManagementConstants.ZERO);
-    assertTrue(bean.getEntrySize() == ManagementConstants.NOT_AVAILABLE_LONG);
-    assertTrue(0 < bean.getDiskUsage());
-    
-  }
-  
-  
-  private HoplogOrganizer getOrganizer(PartitionedRegion region, int bucketId) {
-    BucketRegion br = region.getDataStore().getLocalBucketById(bucketId);
-    if (br == null) {
-      // got rebalanced or something
-      throw new BucketMovedException("Bucket region is no longer available. BucketId: " + 
-          bucketId +  " RegionPath: "  +  region.getFullPath());
-    }
-
-    return br.getHoplogOrganizer();
-  }
- 
-  
-  public static class TestEvent extends SortedHDFSQueuePersistedEvent implements Serializable {
-    private static final long serialVersionUID = 1L;
-    
-    Object key;
-    
-    public TestEvent(String k, String v) throws Exception {
-      this(k, v, Operation.PUT_IF_ABSENT);
-    }
-
-    public TestEvent(String k, String v, Operation op) throws Exception {
-      super(v, op, (byte) 0x02, false, new DiskVersionTag(), BlobHelper.serializeToBlob(k), 0);
-      this.key = k; 
-    }
-  }
-
-
-}


[22/50] [abbrv] incubator-geode git commit: GEODE-429: Remove HdfsStore Junit and Dunits

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManagerJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManagerJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManagerJUnitTest.java
deleted file mode 100644
index 011d82b..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCompactionManagerJUnitTest.java
+++ /dev/null
@@ -1,449 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSCompactionManager.CompactionRequest;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer.Compactor;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSCompactionManagerJUnitTest extends BaseHoplogTestCase {
-  /**
-   * Tests queueing of major and minor compaction requests in respective queues
-   */
-  public void testMinMajCompactionIsolation() throws Exception {
-    // no-op compactor
-    Compactor compactor = new AbstractCompactor() {
-      Object minor = new Object();
-      Object major = new Object();
-      public boolean compact(boolean isMajor, boolean isForced) throws IOException {
-        try {
-          if (isMajor) {
-            synchronized (major) {
-              major.wait();
-            }
-          } else {
-            synchronized (minor) {
-              minor.wait();
-            }
-          }
-        } catch (InterruptedException e) {
-          e.printStackTrace();
-        }
-        return true;
-      }
-    };
-
-    // compaction is disabled. all requests will wait in queue
-    HDFSCompactionManager instance = HDFSCompactionManager.getInstance(hdfsStore);
-    alterMinorCompaction(hdfsStore, true);
-    alterMajorCompaction(hdfsStore, true);
-    
-    assertEquals(0, instance.getMinorCompactor().getActiveCount());
-    assertEquals(0, instance.getMajorCompactor().getActiveCount());
-    
-    //minor request
-    CompactionRequest cr = new CompactionRequest("region", 0, compactor, false);
-    HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
-    //major request
-    cr = new CompactionRequest("region", 0, compactor, true);
-    HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
-    
-    //wait for requests to get in queue
-    TimeUnit.MILLISECONDS.sleep(50);
-    assertEquals(1, instance.getMinorCompactor().getActiveCount());
-    assertEquals(1, instance.getMajorCompactor().getActiveCount());
-  }
-
-  /**
-   * Tests compaction pause. Once compaction is stopped, requests will 
-   * start getting rejected
-   */
-  public void testAlterAutoMinorCompaction() throws Exception {
-    // each new compaction execution increments counter by 1. this way track how many pending tasks
-    final AtomicInteger totalExecuted = new AtomicInteger(0);
-    Compactor compactor = new AbstractCompactor() {
-      public boolean compact(boolean isMajor, boolean isForced) throws IOException {
-        totalExecuted.incrementAndGet();
-        return true;
-      }
-    };
-
-    // compaction is enabled. submit requests and after some time counter should be 0
-    alterMinorCompaction(hdfsStore, true);
-    CompactionRequest cr = new CompactionRequest("region", 0, compactor, false);
-    HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
-    cr = new CompactionRequest("region", 1, compactor, false);
-    HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
-
-    int totalWait = 20;
-    while (totalWait > 0 && 2 != totalExecuted.get()) {
-      // wait for operations to complete. The execution will terminate as soon as possible
-      System.out.println("waiting one small cycle for dummy request to complete");
-      TimeUnit.MILLISECONDS.sleep(50);
-      totalWait--;
-    }
-    assertEquals(2, totalExecuted.get());
-
-    // so compaction works. now disable comapction and submit large number of requests till rejected
-    // execution counter should not increase
-    alterMinorCompaction(hdfsStore, false);
-    boolean success = false;
-    int i = 0;
-    do {
-      cr = new CompactionRequest("region", ++i, compactor, false);
-      success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
-    } while (success);
-
-    TimeUnit.MILLISECONDS.sleep(500);
-    assertEquals(2, totalExecuted.get());
-  }
-  public void testAlterAutoMajorCompaction() throws Exception {
-    // each new compaction execution increments counter by 1. this way track how many pending tasks
-    final AtomicInteger totalExecuted = new AtomicInteger(0);
-    Compactor compactor = new AbstractCompactor() {
-      public boolean compact(boolean isMajor, boolean isForced) throws IOException {
-        totalExecuted.incrementAndGet();
-        return true;
-      }
-    };
-    
-    // compaction is enabled. submit requests and after some time counter should be 0
-    alterMajorCompaction(hdfsStore, true);
-    CompactionRequest cr = new CompactionRequest("region", 0, compactor, true);
-    HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
-    cr = new CompactionRequest("region", 1, compactor, true);
-    HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
-    
-    int totalWait = 20;
-    while (totalWait > 0 && 2 != totalExecuted.get()) {
-      // wait for operations to complete. The execution will terminate as soon as possible
-      System.out.println("waiting one small cycle for dummy request to complete");
-      TimeUnit.MILLISECONDS.sleep(50);
-      totalWait--;
-    }
-    assertEquals(2, totalExecuted.get());
-    
-    // so compaction works. now disable comapction and submit large number of requests till rejected
-    // execution counter should not increase
-    alterMajorCompaction(hdfsStore, false);
-    boolean success = false;
-    int i = 0;
-    do {
-      cr = new CompactionRequest("region", ++i, compactor, true);
-      success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
-      System.out.println("success: " + success);
-    } while (success);
-    
-    TimeUnit.MILLISECONDS.sleep(500);
-    assertEquals(2, totalExecuted.get());
-  }
-  
-  /**
-   * Tests duplicate compaction requests do not cause rejection
-   */
-   public void testDuplicateRequests() throws Exception {
-    final AtomicBoolean barrierOpen = new AtomicBoolean(false);
-    class TestCompactor extends AbstractCompactor {
-      AtomicBoolean busy = new AtomicBoolean(false);
-      public boolean compact(boolean isMajor, boolean isForced) throws IOException {
-        synchronized (barrierOpen) {
-          busy.set(true);
-          if (barrierOpen.get()) {
-            return false;
-          }
-          try {
-            barrierOpen.wait();
-          } catch (InterruptedException e) {
-            return false;
-          }
-          busy.set(false);
-        }
-        return true;
-      }
-      public boolean isBusy(boolean isMajor) {return busy.get();}
-    };
-    
-    System.setProperty(HoplogConfig.COMPCATION_QUEUE_CAPACITY, "10");
-
-    alterMinorCompaction(hdfsStore, true);
-    alterMajorCompaction(hdfsStore, true);
-    // capacity is 10, thread num is 2, so only the first 12 request will be
-    // submitted
-    for (int i = 0; i < 15; i++) {
-      CompactionRequest cr = new CompactionRequest("region", i, new TestCompactor(), true);
-      boolean success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
-      if (success) {
-        assertTrue("failed for " + i, i < 12);
-      } else {
-        assertTrue("failed for " + i, i >= 12);
-      }
-    }
-    
-    synchronized (barrierOpen) {
-      barrierOpen.set(true);
-      barrierOpen.notifyAll();
-    }
-    TimeUnit.MILLISECONDS.sleep(100);
-    barrierOpen.set(false);
-    
-    HDFSCompactionManager.getInstance(hdfsStore).reset();
-    TestCompactor compactor = new TestCompactor();
-    for (int i = 0; i < 10; i++) {
-      TimeUnit.MILLISECONDS.sleep(20);
-      CompactionRequest cr = new CompactionRequest("region", 0, compactor, true);
-      boolean success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
-      if (success) {
-        assertTrue("failed for " + i, i < 2);
-      } else {
-        assertTrue("failed for " + i, i > 0);
-      }
-    }
-  }
-
-  public void testForceCompactionWithAutoDisabled() throws Exception {
-    HoplogOrganizer<? extends PersistedEventImpl> organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    items.add(new TestEvent(("1"), ("1-1")));
-    organizer.flush(items.iterator(), items.size());
-
-    items.clear();
-    items.add(new TestEvent(("2"), ("2-1")));
-    organizer.flush(items.iterator(), items.size());
-    
-    FileStatus[] files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-    assertEquals(2, files.length);
-    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
-    assertEquals(0, files.length);
-    
-    CompactionRequest cr = new CompactionRequest(getName(), 0, organizer.getCompactor(), true);
-    HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
-    TimeUnit.MILLISECONDS.sleep(500);
-
-    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
-    assertEquals(0, files.length);
-    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
-    assertEquals(0, files.length);
-    
-    organizer.forceCompaction(true);
-    TimeUnit.MILLISECONDS.sleep(500);
-    
-    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
-    assertEquals(1, files.length);
-    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
-    assertEquals(2, files.length);
-  }
-
-  /**
-   * Test force major compaction completes on version upgrade even when there is only one hoplog
-   */
-  public void testForceCompaction() throws Exception {
-    HoplogOrganizer<? extends PersistedEventImpl> organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    items.add(new TestEvent(("1"), ("1-1")));
-    organizer.flush(items.iterator(), items.size());
-
-    items.clear();
-    items.add(new TestEvent(("2"), ("2-1")));
-    organizer.flush(items.iterator(), items.size());
-    
-    FileStatus[] files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-    assertEquals(2, files.length);
-    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
-    assertEquals(0, files.length);
-    
-    // isForced is true for user submitted compaction requests (through system procedure)
-    // we do not want to compact an already compacted file
-    CompactionRequest cr = new CompactionRequest(getName(), 0, organizer.getCompactor(), true, true/*isForced*/);
-    Future<CompactionStatus> status = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
-    status.get().equals(true);
-
-    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
-    assertEquals(1, files.length);
-    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
-    assertEquals(2, files.length);
-
-    // second request to force compact does not do anything
-    status = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
-    status.get().equals(false);
-    
-    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
-    assertEquals(1, files.length);
-    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
-    assertEquals(2, files.length);
-
-    // upon version upgrade force compaction is allowed
-    cr = new CompactionRequest(getName(), 0, organizer.getCompactor(), true, true, true);
-    status = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr);
-    status.get().equals(true);
-    
-    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION);
-    assertEquals(2, files.length);
-    files = getBucketHoplogs(getName() + "/0", AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
-    assertEquals(3, files.length); // + 1 for old major hoplog
-  }
-
-  /**
-   * Test successful sequential submission
-   */
-  public void testSameBucketSeqRequest() throws Exception {
-    final AtomicInteger counter = new AtomicInteger(0);
-    Compactor compactor = new AbstractCompactor() {
-      public boolean compact(boolean isMajor, boolean isForced) throws IOException {
-        counter.set(1);
-        return true;
-      }
-    };
-
-    HDFSCompactionManager.getInstance(hdfsStore).reset();
-    alterMinorCompaction(hdfsStore, true);
-    alterMajorCompaction(hdfsStore, true);
-    CompactionRequest cr = new CompactionRequest("region", 0, compactor, false);
-    assertEquals(0, counter.get());
-    boolean success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
-    assertEquals(true, success);
-    while (!counter.compareAndSet(1, 0)) {
-      TimeUnit.MILLISECONDS.sleep(20);
-    }
-    
-    assertEquals(0, counter.get());
-    success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
-    assertEquals(true, success);
-    for (int i = 0; i < 10; i++) {
-      TimeUnit.MILLISECONDS.sleep(20);
-      if (counter.get() == 1) {
-        break;
-      }
-    }
-    assertEquals(1, counter.get());
-  }
-  
-  public void testAlterMinorThreadsIncrease() throws Exception {
-    doAlterCompactionThreads(false, false);
-  }
-  public void testAlterMinorThreadsDecrease() throws Exception {
-    doAlterCompactionThreads(false, true);
-  }
-  public void testAlterMajorThreadsIncrease() throws Exception {
-    doAlterCompactionThreads(true, false);
-  }
-  public void testAlterMajorThreadsDecrease() throws Exception {
-    doAlterCompactionThreads(true, true);
-  }
-  
-  public void doAlterCompactionThreads(final boolean testMajor, boolean decrease) throws Exception {
-    final AtomicBoolean barrierOpen = new AtomicBoolean(false);
-    final AtomicInteger counter = new AtomicInteger(0);
-    class TestCompactor extends AbstractCompactor {
-      public boolean compact(boolean isMajor, boolean isForced) throws IOException {
-        synchronized (barrierOpen) {
-          if ((testMajor && !isMajor)  || (!testMajor && isMajor)) {
-            return true;
-          }
-          if (barrierOpen.get()) {
-            return false;
-          }
-          try {
-            barrierOpen.wait();
-          } catch (InterruptedException e) {
-            return false;
-          }
-          counter.incrementAndGet();
-        }
-        return true;
-      }
-    };
-    
-    System.setProperty(HoplogConfig.COMPCATION_QUEUE_CAPACITY, "1");
-
-    HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
-    int defaultThreadCount = 10;
-    if (testMajor) {
-      alterMajorCompaction(hdfsStore, true);
-      defaultThreadCount = 2;
-      mutator.setMajorCompactionThreads(15);
-      if (decrease) {
-        mutator.setMajorCompactionThreads(1);
-      }
-    } else {
-      alterMinorCompaction(hdfsStore, true);
-      mutator.setMinorCompactionThreads(15);
-      if (decrease) {
-        mutator.setMinorCompactionThreads(1);
-      }
-    }
-    
-    // capacity is 1, thread num is 10 or 2, so only the first 11 or 3 request will be
-    // submitted
-    cache.getLogger().info("<ExpectedException action=add>java.util.concurrent.RejectedExecutionException</ExpectedException>");
-    for (int i = 0; i < 15; i++) {
-      CompactionRequest cr = new CompactionRequest("region", i, new TestCompactor(), testMajor);
-      boolean success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
-      if (success) {
-        assertTrue("failed for " + i, i <= defaultThreadCount);
-      } else {
-        assertTrue("failed for " + i, i > defaultThreadCount);
-      }
-    }
-    
-    TimeUnit.MILLISECONDS.sleep(500);
-    assertEquals(0, counter.get());
-    synchronized (barrierOpen) {
-      barrierOpen.set(true);
-      barrierOpen.notifyAll();
-    }
-    TimeUnit.MILLISECONDS.sleep(500);
-    assertEquals(defaultThreadCount, counter.get());
-    
-    hdfsStore.alter(mutator);
-
-    counter.set(0);
-    barrierOpen.set(false);
-    for (int i = 0; i < 15; i++) {
-      TimeUnit.MILLISECONDS.sleep(100);
-      CompactionRequest cr = new CompactionRequest("region", i, new TestCompactor(), testMajor);
-      boolean success = HDFSCompactionManager.getInstance(hdfsStore).submitRequest(cr) != null;
-      if (decrease) {
-        if (i > 3) {
-          assertFalse("failed for " + i, success);
-        }
-      } else {
-        assertTrue("failed for " + i, success);
-      }
-    }
-    TimeUnit.MILLISECONDS.sleep(500);
-    synchronized (barrierOpen) {
-      barrierOpen.set(true);
-      barrierOpen.notifyAll();
-    }
-    TimeUnit.MILLISECONDS.sleep(500);
-    if (decrease) {
-      assertTrue(counter.get() < 4);
-    } else {
-      assertEquals(15, counter.get());
-    }
-
-    cache.getLogger().info("<ExpectedException action=remove>java.util.concurrent.RejectedExecutionException</ExpectedException>");
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSRegionDirectorJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSRegionDirectorJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSRegionDirectorJUnitTest.java
deleted file mode 100644
index dc7b987..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSRegionDirectorJUnitTest.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.hdfs.internal.HoplogListenerForRegion;
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSRegionDirectorJUnitTest extends BaseHoplogTestCase {
-  public void testDirector() throws Exception {
-    int bucketId = 0;
-
-    HdfsRegionManager mgr = regionManager;
-    
-    // no buckets have been created so far.
-    assertEquals(0, director.getBucketCount("/" + getName()));
-
-    // one bucket created
-    mgr.create(bucketId);
-    assertEquals(1, director.getBucketCount("/" + getName()));
-
-    // close bucket test
-    mgr.close(bucketId);
-    
-    // all buckets have been closed.
-    assertEquals(0, director.getBucketCount("/" + getName()));
-
-    mgr.create(bucketId);
-    assertEquals(1, director.getBucketCount("/" + getName()));
-    director.clear("/" + getName());
-    try {
-      assertEquals(0, director.getBucketCount("/" + getName()));
-      fail("The region is no longer managed, hence an exception is expected");
-    } catch (IllegalStateException e) {
-      // exception expected as the region is no longer managed
-    }
-  }
-  
-  public void testCompactionEvents() throws Exception {
-    final AtomicInteger counter = new AtomicInteger(0);
-    HoplogListener myListener = new HoplogListener() {
-      public void hoplogDeleted(String regionFolder, int bucketId, Hoplog... oplogs)
-          throws IOException {
-      }
-      public void hoplogCreated(String regionFolder, int bucketId, Hoplog... oplogs)
-          throws IOException {
-      }
-      public void compactionCompleted(String region, int bucket, boolean isMajor) {
-        counter.incrementAndGet();
-      }
-    };
-
-    HoplogListenerForRegion listenerManager = ((LocalRegion)region).getHoplogListener();
-    listenerManager.addListener(myListener);
-    
-    HoplogOrganizer bucket = regionManager.create(0);
-    // #1
-    ArrayList<PersistedEventImpl> items = new ArrayList<PersistedEventImpl>();
-    items.add(new TestEvent("1", "1"));
-    bucket.flush(items.iterator(), items.size());
-
-    // #2
-    items.clear();
-    items.add(new TestEvent("2", "1"));
-    bucket.flush(items.iterator(), items.size());
-
-    // #3
-    items.clear();
-    items.add(new TestEvent("3", "1"));
-    bucket.flush(items.iterator(), items.size());
-    
-    // #4
-    items.clear();
-    items.add(new TestEvent("4", "1"));
-    bucket.flush(items.iterator(), items.size());
-    
-    bucket.getCompactor().compact(false, false);
-    assertEquals(1, counter.get());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSStatsJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSStatsJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSStatsJUnitTest.java
deleted file mode 100644
index 1d17232..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSStatsJUnitTest.java
+++ /dev/null
@@ -1,250 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.util.ArrayList;
-
-import org.apache.hadoop.fs.Path;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.SortedHoplogPersistedEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogSetReader.HoplogIterator;
-import com.gemstone.gemfire.internal.util.BlobHelper;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSStatsJUnitTest extends BaseHoplogTestCase {
-  public void testStoreUsageStats() throws Exception {
-    HoplogOrganizer bucket = regionManager.create(0);
-    
-    long oldUsage = 0;
-    assertEquals(oldUsage, stats.getStoreUsageBytes());
-
-    for (int j = 0; j < 5; j++) {
-      ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-      for (int i = 0; i < 100; i++) {
-        String key = ("key-" + (j * 100 + i));
-        String value = ("value-" + System.nanoTime());
-        items.add(new TestEvent(key, value));
-      }
-      bucket.flush(items.iterator(), 100);
-    }
-    
-    assertTrue(0 < stats.getStoreUsageBytes());
-    oldUsage = stats.getStoreUsageBytes();
-    
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-    assertEquals(2, stats.getStoreUsageBytes() / oldUsage);
-    
-    organizer.close();
-    assertEquals(1, stats.getStoreUsageBytes() / oldUsage);
-  }
-  
-  public void testWriteStats() throws Exception {
-    HoplogOrganizer bucket = regionManager.create(0);
-
-    // validate flush stats
-    // flush and create many hoplogs and execute one compaction cycle also
-    // 5 hoplogs, total 500 keys
-    assertEquals(0, stats.getFlush().getCount());
-    assertEquals(0, stats.getFlush().getBytes());
-    assertEquals(0, stats.getActiveFileCount());
-    int bytesSent = 0;
-    for (int j = 0; j < 5; j++) {
-      ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-      for (int i = 0; i < 100; i++) {
-        String key = ("key-" + (j * 100 + i));
-        String value = ("value-" + System.nanoTime());
-        items.add(new TestEvent(key, value));
-        bytesSent += (key.getBytes().length + value.getBytes().length);
-      }
-      bucket.flush(items.iterator(), 100);
-
-      // verify stats show
-      assertEquals(j + 1, stats.getFlush().getCount());
-      assertTrue(stats.getFlush().getBytes() > bytesSent);
-      assertEquals(j + 1, stats.getActiveFileCount());
-    }
-
-    // verify compaction stats
-    assertEquals(0, stats.getMinorCompaction().getCount());
-    assertEquals(0, stats.getMinorCompaction().getBytes());
-    assertEquals(0, stats.getInactiveFileCount());
-    bucket.getCompactor().compact(false, false);
-    assertEquals(1, stats.getMinorCompaction().getCount());
-    assertEquals(1, stats.getActiveFileCount());
-    assertEquals(0, stats.getInactiveFileCount());
-    assertEquals(stats.getMinorCompaction().getBytes(), stats.getFlush()
-        .getBytes());
-  }
-  
-  public void testInactiveFileStats() throws Exception {
-    // steps 
-    // create files -> validate active and inactive file count
-    // -> increment reference by using scanner-> compact -> verify active and inactive file count 
-    HoplogOrganizer bucket = regionManager.create(0);
-    assertEquals(0, stats.getActiveFileCount());
-    assertEquals(0, stats.getInactiveFileCount());
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    for (int j = 0; j < 5; j++) {
-      items.clear();
-      for (int i = 0; i < 100; i++) {
-        String key = ("key-" + (j * 100 + i));
-        String value = ("value-" + System.nanoTime());
-        items.add(new TestEvent(key, value));
-      }
-      bucket.flush(items.iterator(), 100);
-    }
-    
-    assertEquals(5, stats.getActiveFileCount());
-    assertEquals(0, stats.getInactiveFileCount());
-    
-    HoplogIterator<byte[], PersistedEventImpl> scanner = bucket.scan();
-    bucket.getCompactor().compact(true, false);
-    assertEquals(1, stats.getActiveFileCount());
-    assertEquals(5, stats.getInactiveFileCount());
-    
-    scanner.close();
-    assertEquals(1, stats.getActiveFileCount());
-    assertEquals(0, stats.getInactiveFileCount());
-  }
-
-  public void testReadStats() throws Exception {
-    HoplogOrganizer<SortedHoplogPersistedEvent> bucket = regionManager.create(0);
-
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    for (int i = 0; i < 100; i++) {
-      items.add(new TestEvent("key-" + i, "value-" + System.nanoTime()));
-    }
-    bucket.flush(items.iterator(), 100);
-    
-    // validate read stats
-    assertEquals(0, stats.getRead().getCount());
-    assertEquals(0, stats.getRead().getBytes());
-    // number of bytes read must be greater than size of key and value and must be increasing
-    int bytesRead = "key-1".getBytes().length + "value=1233232".getBytes().length;
-    for (int i = 0; i < 5; i++) {
-      long previousRead = stats.getRead().getBytes();
-      PersistedEventImpl e = bucket.read(BlobHelper.serializeToBlob("key-" + i));
-      assertNotNull(e);
-      assertEquals(i + 1, stats.getRead().getCount());
-      assertTrue( (bytesRead + previousRead) < stats.getRead().getBytes());
-    }
-    
-    //Make sure the block cache stats are being updated.
-//    assertTrue(storeStats.getBlockCache().getMisses() > 0);
-//    assertTrue(storeStats.getBlockCache().getBytesCached() > 0);
-//    assertTrue(storeStats.getBlockCache().getCached() > 0);
-    
-    //Do a duplicate read to make sure we get a hit in the cache
-//    bucket.read(BlobHelper.serializeToBlob("key-" + 0));
-//    assertTrue(storeStats.getBlockCache().getHits() > 0);
-  }
-
-  public void testBloomStats() throws Exception {
-    HoplogOrganizer bucket = regionManager.create(0);
-
-    // create 10 hoplogs
-    for (int j = 0; j < 5; j++) {
-      ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-      for (int i = 0; i < 100; i++) {
-        String key = ("key-" + (j * 100 + i));
-        String value = ("value-" + System.nanoTime());
-        items.add(new TestEvent(key, value));
-      }
-      bucket.flush(items.iterator(), 100);
-    }
-
-    // initially bloom stat will be zero
-    // reading key in first hop will increase bloom hit by 1 (key 0 to 99)
-    // reading key in 5 hoplog will increase bloom hit by 5 (key 400 to 499)
-    assertEquals(0, stats.getBloom().getCount());
-    bucket.read(BlobHelper.serializeToBlob("key-450"));
-    assertEquals(1, stats.getBloom().getCount());
-    bucket.read(BlobHelper.serializeToBlob("key-50"));
-    assertEquals(6, stats.getBloom().getCount());
-  }
-  
-  public void testScanStats() throws Exception {
-    HFileSortedOplog hoplog = new HFileSortedOplog(hdfsStore, new Path(
-          testDataDir, "H-1-1.hop"),blockCache, stats, storeStats);
-    createHoplog(5, hoplog);
-    
-    // initially scan stats will be zero. creating a scanner should increase
-    // scan iteration stats and bytes. On scanner close scan count should be
-    // incremented
-    assertEquals(0, stats.getScan().getCount());
-    assertEquals(0, stats.getScan().getBytes());
-    assertEquals(0, stats.getScan().getTime());
-    assertEquals(0, stats.getScan().getIterations());
-    assertEquals(0, stats.getScan().getIterationTime());
-    
-    HoplogIterator<byte[], byte[]> scanner = hoplog.getReader().scan();
-    assertEquals(0, stats.getScan().getCount());
-    int count = 0;
-    for (byte[] bs = null; scanner.hasNext(); ) {
-      bs = scanner.next();
-      count += bs.length + scanner.getValue().length;
-    }
-    assertEquals(count, stats.getScan().getBytes());
-    assertEquals(5, stats.getScan().getIterations());
-    assertTrue(0 < stats.getScan().getIterationTime());
-    // getcount will be 0 as scanner.close is not being called
-    assertEquals(0, stats.getScan().getCount());
-    assertEquals(0, stats.getScan().getTime());
-    assertEquals(1, stats.getScan().getInProgress());
-    
-    scanner.close();
-    assertEquals(1, stats.getScan().getCount());
-    assertTrue(0 < stats.getScan().getTime());
-    assertTrue(stats.getScan().getIterationTime() <= stats.getScan().getTime());
-  }
-  
-  /**
-   * Validates two buckets belonging to same region update the same stats
-   */
-  public void testRegionBucketShareStats() throws Exception {
-    HoplogOrganizer bucket1 = regionManager.create(0);
-    HoplogOrganizer bucket2 = regionManager.create(1);
-
-    // validate flush stats
-    assertEquals(0, stats.getFlush().getCount());
-    assertEquals(0, stats.getActiveFileCount());
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    for (int i = 0; i < 100; i++) {
-      items.add(new TestEvent("key-" + i, "value-" + System.nanoTime()));
-    }
-    bucket1.flush(items.iterator(), 100);
-    assertEquals(1, stats.getFlush().getCount());
-    assertEquals(1, stats.getActiveFileCount());
-    items.clear();
-
-    for (int i = 0; i < 100; i++) {
-      items.add(new TestEvent("key-" + i, "value-" + System.nanoTime()));
-    }
-    bucket2.flush(items.iterator(), 100);
-    assertEquals(2, stats.getFlush().getCount());
-    assertEquals(2, stats.getActiveFileCount());
-  }
-
-  @Override
-  protected Cache createCache() {
-    CacheFactory cf = new CacheFactory().set("mcast-port", "0")
-        .set("log-level", "info")
-        .set("enable-time-statistics", "true")
-//        .set("statistic-archive-file", "statArchive.gfs")
-        ;
-    cache = cf.create();
-
-    return cache;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizerJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizerJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizerJUnitTest.java
deleted file mode 100644
index ab1ccac..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSUnsortedHoplogOrganizerJUnitTest.java
+++ /dev/null
@@ -1,297 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.UnsortedHoplogPersistedEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogSetReader.HoplogIterator;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.SequenceFileHoplog.SequenceFileIterator;
-import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerHelper;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-/**
- * Test class to test hoplog functionality for streaming ingest 
- * 
- * @author hemantb
- *
- */
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSUnsortedHoplogOrganizerJUnitTest extends BaseHoplogTestCase {
- 
-  /**
-   * Tests flush operation
-   */
-  public void testFlush() throws Exception {
-    int count = 10;
-    int bucketId = (int) System.nanoTime();
-    HDFSUnsortedHoplogOrganizer organizer = new HDFSUnsortedHoplogOrganizer(regionManager, bucketId);
-
-    // flush and create hoplog
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    for (int i = 0; i < count; i++) {
-      items.add(new TestEvent(("key-" + i), ("value-" + System.nanoTime())));
-    }
-    
-    organizer.flush(items.iterator(), count);
-    organizer.closeCurrentWriter();
-    
-    // check file existence in bucket directory
-    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/" + bucketId, 
-                      HdfsSortedOplogOrganizer.SEQ_HOPLOG_EXTENSION);
-
-    // only one hoplog should exists
-    assertEquals(1, hoplogs.length);
-    readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0);
-  }
-  
-  public void testAlterRollOverInterval() throws Exception {
-    HDFSUnsortedHoplogOrganizer organizer = new HDFSUnsortedHoplogOrganizer(regionManager, 0);
-    
-    // flush 4 times with small delays. Only one seq file will be created
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    for (int j = 0; j < 3; j++) {
-      items.clear();
-      for (int i = 0; i < 10; i++) {
-        items.add(new TestEvent(("key-" + (i + 10 * j)), ("value-" + System.nanoTime())));
-      }
-      organizer.flush(items.iterator(), 10);
-      TimeUnit.MILLISECONDS.sleep(1100);
-    }
-    organizer.closeCurrentWriter();
-    
-    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/" + 0,
-        HdfsSortedOplogOrganizer.SEQ_HOPLOG_EXTENSION);
-    
-    // only one hoplog should exists
-    assertEquals(1, hoplogs.length);
-    readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0);
-    
-    HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
-    mutator.setWriteOnlyFileRolloverInterval(1);
-    hdfsStore.alter(mutator);
-    
-    TimeUnit.MILLISECONDS.sleep(1100);
-    for (int j = 0; j < 2; j++) {
-      items.clear();
-      for (int i = 0; i < 10; i++) {
-        items.add(new TestEvent(("key-" + (i + 10 * j)), ("value-" + System.nanoTime())));
-      }
-      organizer.flush(items.iterator(), 10);
-      TimeUnit.MILLISECONDS.sleep(1100);
-    }
-    organizer.closeCurrentWriter();
-    hoplogs = getBucketHoplogs(getName() + "/" + 0,
-        HdfsSortedOplogOrganizer.SEQ_HOPLOG_EXTENSION);
-    assertEquals(3, hoplogs.length);
-  }
-  
-  public void testSequenceFileScan() throws Exception {
-    int count = 10000;
-    int bucketId = (int) System.nanoTime();
-    HDFSUnsortedHoplogOrganizer organizer = new HDFSUnsortedHoplogOrganizer(regionManager, bucketId);
-
-    // flush and create hoplog
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    for (int i = 0; i < count; i++) {
-      items.add(new TestEvent(("key-" + i), ("value-" + System.nanoTime())));
-    }
-    
-    organizer.flush(items.iterator(), count);
-    organizer.closeCurrentWriter();
-    
-    // check file existence in bucket directory
-    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/" + bucketId, 
-                      HdfsSortedOplogOrganizer.SEQ_HOPLOG_EXTENSION);
-
-    // only one hoplog should exists
-    assertEquals(1, hoplogs.length);
-    
-    SequenceFileDetails sfd = getSequenceFileDetails(hdfsStore.getFileSystem(), hoplogs[0].getPath());
-    
-    // End position is before a sync. Should read until sync.
-    readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0, sfd.indexOfKeyBeforeSecondSync ,
-        0, sfd.posBeforeSecondSync);
-    
-    // Start position is inside header. Should start from first key and go to next sync point. 
-    readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0, sfd.indexOfKeyBeforeSecondSync, 
-        10, sfd.posAfterFirstSync);
-    
-    // Start and end position are between two sync markers. Should not read any keys.    
-    readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 29, 28, 
-        sfd.posAfterFirstSync, sfd.posBeforeSecondSync - sfd.posAfterFirstSync);
-    
-    // Start position is after a sync and End position is beyond the file size. 
-    //Should read all the records after the next sync.
-    readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), sfd.indexOfKeyAfterFirstSync, 9999, 
-        sfd.posBeforeFirstSync, 10000000);
-    
-    // Should read all the records. 
-    readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0, 9999, 0, -1);
-  }
-  
-  class SequenceFileDetails {
-    public int posBeforeFirstSync;
-    public int indexOfKeyBeforeFirstSync;
-    
-    public int posAfterFirstSync;
-    public int indexOfKeyAfterFirstSync; 
-    
-    public int posBeforeSecondSync;
-    public int indexOfKeyBeforeSecondSync;
-  }
-  
-  public SequenceFileDetails getSequenceFileDetails(FileSystem inputFS, Path sequenceFileName) throws Exception {
-    SequenceFileDetails fd = new SequenceFileDetails();
-    SequenceFileHoplog hoplog = new SequenceFileHoplog(inputFS, sequenceFileName, null);
-      
-    SequenceFileIterator iter = (SequenceFileIterator)hoplog.getReader().scan();;
-    int currentkeyStartPos = 0;
-    int cursorPos = 0;
-    String currentKey = null;
-    boolean firstSyncSeen = false; 
-    try {
-      while (iter.hasNext()) {
-        iter.next();
-        currentkeyStartPos = cursorPos;
-        currentKey = ((String)CacheServerHelper.deserialize(iter.getKey()));
-        cursorPos = (int)iter.getPosition();
-        if (iter.syncSeen()){
-          if (firstSyncSeen) {
-            
-            fd.posBeforeSecondSync = currentkeyStartPos;
-            fd.indexOfKeyBeforeSecondSync = Integer.parseInt(currentKey.substring(4));
-            break;
-          } else {
-            fd.posBeforeFirstSync = currentkeyStartPos;
-            fd.indexOfKeyBeforeFirstSync = Integer.parseInt(currentKey.substring(4));
-            
-            fd.posAfterFirstSync = cursorPos;
-            fd.indexOfKeyAfterFirstSync = Integer.parseInt(currentKey.substring(4)) + 1;
-            firstSyncSeen = true;
-          }
-        }
-      }
-
-    } catch (Exception e) {
-      assertTrue(e.toString(), false);
-    }
-    iter.close();
-    hoplog.close();
-    return fd;
-  }
-  
-  public void testClear() throws Exception {
-    int count = 10;
-    int bucketId = (int) System.nanoTime();
-    HDFSUnsortedHoplogOrganizer organizer = new HDFSUnsortedHoplogOrganizer(regionManager, bucketId);
-
-    // flush and create hoplog
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    for (int i = 0; i < count; i++) {
-      items.add(new TestEvent(("key-" + i), ("value-" + System.nanoTime())));
-    }
-    organizer.flush(items.iterator(), count);
-    organizer.closeCurrentWriter();
-    // check file existence in bucket directory
-    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/" + bucketId, 
-                      AbstractHoplogOrganizer.SEQ_HOPLOG_EXTENSION);
-    assertEquals(1, hoplogs.length);
-    readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0);
-    
-    
-    // write another batch but do not close the data. 
-    organizer.flush(items.iterator(), count);
-    
-    organizer.clear();
-    
-    hoplogs = getBucketHoplogs(getName() + "/" + bucketId, 
-        AbstractHoplogOrganizer.SEQ_HOPLOG_EXTENSION);
-    // check file existence in bucket directory
-    FileStatus[] expiredhoplogs = getBucketHoplogs(getName() + "/" + bucketId, 
-                      AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
-    
-    // two expired hoplog should exists
-    assertEquals(2, expiredhoplogs.length);
-    assertEquals(2, hoplogs.length);
-    // check the expired hops name should be same 
-    assertTrue(expiredhoplogs[0].getPath().getName().equals(hoplogs[0].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) || 
-        expiredhoplogs[1].getPath().getName().equals(hoplogs[0].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) );
-    assertTrue(expiredhoplogs[0].getPath().getName().equals(hoplogs[1].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) || 
-        expiredhoplogs[1].getPath().getName().equals(hoplogs[1].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) );
-    
-    // Test that second time clear should be harmless and should not result in extra files. 
-    organizer.clear();
-    hoplogs = getBucketHoplogs(getName() + "/" + bucketId, 
-        AbstractHoplogOrganizer.SEQ_HOPLOG_EXTENSION);
-    // check file existence in bucket directory
-    expiredhoplogs = getBucketHoplogs(getName() + "/" + bucketId, 
-                      AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
-    
-    // two expired hoplog should exists
-    assertEquals(2, expiredhoplogs.length);
-    assertEquals(2, hoplogs.length);
-    // check the expired hops name should be same 
-    assertTrue(expiredhoplogs[0].getPath().getName().equals(hoplogs[0].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) || 
-        expiredhoplogs[1].getPath().getName().equals(hoplogs[0].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) );
-    assertTrue(expiredhoplogs[0].getPath().getName().equals(hoplogs[1].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) || 
-        expiredhoplogs[1].getPath().getName().equals(hoplogs[1].getPath().getName()+ AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION) );
-    
-    
-    readSequenceFile(hdfsStore.getFileSystem(), hoplogs[0].getPath(), 0);
-    readSequenceFile(hdfsStore.getFileSystem(), hoplogs[1].getPath(), 0);
-  }
-  
-  public void readSequenceFile(FileSystem inputFS, Path sequenceFileName, int index)  throws IOException{
-    readSequenceFile(inputFS, sequenceFileName, index, -1, 0, -1);
-  }
-  /**
-   * Reads the sequence file assuming that it has keys and values starting from index that 
-   * is specified as parameter. 
-   * 
-   */
-  public void readSequenceFile(FileSystem inputFS, Path sequenceFileName, int index, int endIndex,
-      int startoffset, int length) throws IOException {
-    SequenceFileHoplog hoplog = new SequenceFileHoplog(inputFS, sequenceFileName, null);
-    
-    HoplogIterator<byte[], byte[]> iter = null;
-    if (length == -1){
-      iter = hoplog.getReader().scan();
-    }
-    else {
-      iter = hoplog.getReader().scan(startoffset, length);
-    }
-    
-    try {
-      while (iter.hasNext()) {
-        iter.next();
-        PersistedEventImpl te = UnsortedHoplogPersistedEvent.fromBytes(iter.getValue());
-        String stringkey = ((String)CacheServerHelper.deserialize(iter.getKey()));
-        assertTrue("Expected key: key-" + index + ". Actual key: " + stringkey , ((String)stringkey).equals("key-" + index));
-        index++;
-      }
-      if (endIndex != -1)
-      assertTrue ("The keys should have been until key-"+ endIndex + " but they are until key-"+ (index-1),  index == endIndex + 1) ;
-    } catch (Exception e) {
-      assertTrue(e.toString(), false);
-    }
-    iter.close();
-    hoplog.close();
- }
-
-}



[36/50] [abbrv] incubator-geode git commit: [GEODE-140]: QueryUsingFunctionContextDUnitTest.testQueriesWithFilterKeysOnPRWithRebalancing failed due to suspect string Test was failing due to server being down before function executes This is "expected" as

Posted by ds...@apache.org.
[GEODE-140]: QueryUsingFunctionContextDUnitTest.testQueriesWithFilterKeysOnPRWithRebalancing failed due to suspect string
Test was failing due to server being down before function executes
This is "expected" as it can occur based on this test.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/e87e3b7f
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/e87e3b7f
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/e87e3b7f

Branch: refs/heads/develop
Commit: e87e3b7fc93a6da74ff077291c354c5b4d6685db
Parents: b490257
Author: Jason Huynh <hu...@gmail.com>
Authored: Tue Oct 20 10:55:57 2015 -0700
Committer: Jason Huynh <hu...@gmail.com>
Committed: Thu Oct 22 15:35:49 2015 -0700

----------------------------------------------------------------------
 .../QueryUsingFunctionContextDUnitTest.java     | 25 +++++++++++++-------
 .../query/functional/StructSetOrResultsSet.java |  4 ++--
 2 files changed, 18 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e87e3b7f/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java
index 6ed01a2..22fe767 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java
@@ -26,6 +26,7 @@ import com.gemstone.gemfire.cache.RegionShortcut;
 import com.gemstone.gemfire.cache.client.ClientCache;
 import com.gemstone.gemfire.cache.client.ClientCacheFactory;
 import com.gemstone.gemfire.cache.client.ClientRegionShortcut;
+import com.gemstone.gemfire.cache.client.ServerConnectivityException;
 import com.gemstone.gemfire.cache.execute.Function;
 import com.gemstone.gemfire.cache.execute.FunctionAdapter;
 import com.gemstone.gemfire.cache.execute.FunctionContext;
@@ -34,7 +35,6 @@ import com.gemstone.gemfire.cache.execute.FunctionService;
 import com.gemstone.gemfire.cache.execute.RegionFunctionContext;
 import com.gemstone.gemfire.cache.execute.ResultCollector;
 import com.gemstone.gemfire.cache.query.Query;
-import com.gemstone.gemfire.cache.query.QueryInvalidException;
 import com.gemstone.gemfire.cache.query.QueryInvocationTargetException;
 import com.gemstone.gemfire.cache.query.QueryService;
 import com.gemstone.gemfire.cache.query.SelectResults;
@@ -430,8 +430,9 @@ public class QueryUsingFunctionContextDUnitTest extends CacheTestCase {
  public void testQueriesWithFilterKeysOnPRWithRebalancing() {
    addExpectedException("QueryInvocationTargetException");
    addExpectedException("java.net.SocketException");
-   Object[][] r = new Object[queries.length][2];
-   Set filter =  new HashSet();
+   addExpectedException("ServerConnectivityException");
+   addExpectedException("FunctionException");
+   addExpectedException("IOException");
 
    // Close cache on server1
    server1.invoke(new CacheSerializableRunnable("Set QueryObserver in cache on server1") {
@@ -478,9 +479,16 @@ public class QueryUsingFunctionContextDUnitTest extends CacheTestCase {
            //Should not come here, an exception is expected from above function call.
            fail("Function call did not fail for query with function context");
          } catch (FunctionException ex) {
-           //ex.printStackTrace();
-           if (!(ex.getCause() instanceof QueryInvocationTargetException)) {
-             fail("Should have received an QueryInvocationTargetException but recieved"+ex.getMessage());
+           if (!((ex.getCause() instanceof QueryInvocationTargetException) || (ex.getCause() instanceof ServerConnectivityException))) {
+             if (ex.getCause() instanceof FunctionException) {
+               FunctionException fe = (FunctionException)ex.getCause();
+               if (!fe.getMessage().startsWith("IOException")) {
+                 fail("Should have received an QueryInvocationTargetException but recieved"+ex.getMessage());
+               }
+             }
+             else {
+               fail("Should have received an QueryInvocationTargetException but recieved"+ex.getMessage());
+             }
            }
          }
        }//For loop ends here.
@@ -497,9 +505,8 @@ public class QueryUsingFunctionContextDUnitTest extends CacheTestCase {
 
  }
 
- // DISABLED due to high rate of failure in unit test runs.
- // See internal ticket #52270
- public void disabledtestNonColocatedRegionQueries() {
+ 
+ public void testNonColocatedRegionQueries() {
    addExpectedException("UnsupportedOperationException");
    client.invoke(new CacheSerializableRunnable("Test query on non-colocated regions on server") {
      @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e87e3b7f/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/StructSetOrResultsSet.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/StructSetOrResultsSet.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/StructSetOrResultsSet.java
index 7f3e14d..c4e6f8f 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/StructSetOrResultsSet.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/StructSetOrResultsSet.java
@@ -368,8 +368,8 @@ public class StructSetOrResultsSet extends TestCase {
   private void checkResultSizes(Collection r1, Collection r2, String query) {
     if (r1.size() != r2.size()) {
       fail("FAILED:SelectResults size is different in both the cases. Size1="
-          + ((SelectResults) r1).size() + " Size2 = "
-          + ((SelectResults) r2).size() + "; failed query=" + query);
+          + r1.size() + " Size2 = "
+          + r2.size() + "; failed query=" + query);
     }
   }
   


[17/50] [abbrv] incubator-geode git commit: GEODE-429: Remove test category HoplogTests

Posted by ds...@apache.org.
GEODE-429: Remove test category HoplogTests


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/8fb5edd3
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/8fb5edd3
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/8fb5edd3

Branch: refs/heads/develop
Commit: 8fb5edd349ac388fec2d5f665119f26244343703
Parents: f2390a1
Author: Ashvin Agrawal <as...@apache.org>
Authored: Mon Oct 19 15:08:18 2015 -0700
Committer: Ashvin Agrawal <as...@apache.org>
Committed: Wed Oct 21 08:55:23 2015 -0700

----------------------------------------------------------------------
 .../cache/hdfs/internal/SignalledFlushObserverJUnitTest.java  | 7 +++----
 .../cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java | 7 +++----
 .../gemstone/gemfire/test/junit/categories/HoplogTest.java    | 7 -------
 3 files changed, 6 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8fb5edd3/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserverJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserverJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserverJUnitTest.java
index e6b7aa8..92328f8 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserverJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserverJUnitTest.java
@@ -5,15 +5,14 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import org.junit.experimental.categories.Category;
 
-import junit.framework.TestCase;
-
 import com.gemstone.gemfire.cache.hdfs.internal.FlushObserver.AsyncFlushResult;
 import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest
 ;
 
-@Category({IntegrationTest.class, HoplogTest.class})
+import junit.framework.TestCase;
+
+@Category({IntegrationTest.class})
 public class SignalledFlushObserverJUnitTest extends TestCase {
   private AtomicInteger events;
   private AtomicInteger delivered;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8fb5edd3/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java
index 6fa1ff1..0acaf8e 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java
@@ -17,8 +17,6 @@ import java.util.concurrent.ConcurrentSkipListSet;
 
 import org.junit.experimental.categories.Category;
 
-import junit.framework.TestCase;
-
 import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.CacheFactory;
 import com.gemstone.gemfire.cache.Operation;
@@ -41,16 +39,17 @@ import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerHelper;
 import com.gemstone.gemfire.internal.cache.wan.GatewaySenderAttributes;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest
 ;
 
+import junit.framework.TestCase;
+
 /**
  * A test class for testing whether the functionalities of sorted Aysync Queue.
  * 
  * @author Hemant Bhanawat
  */
-@Category({IntegrationTest.class, HoplogTest.class})
+@Category({IntegrationTest.class})
 public class SortedListForAsyncQueueJUnitTest extends TestCase {
   
   public SortedListForAsyncQueueJUnitTest() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8fb5edd3/gemfire-junit/src/test/java/com/gemstone/gemfire/test/junit/categories/HoplogTest.java
----------------------------------------------------------------------
diff --git a/gemfire-junit/src/test/java/com/gemstone/gemfire/test/junit/categories/HoplogTest.java b/gemfire-junit/src/test/java/com/gemstone/gemfire/test/junit/categories/HoplogTest.java
deleted file mode 100644
index 08987a5..0000000
--- a/gemfire-junit/src/test/java/com/gemstone/gemfire/test/junit/categories/HoplogTest.java
+++ /dev/null
@@ -1,7 +0,0 @@
-package com.gemstone.gemfire.test.junit.categories;
-/**
- * JUnit Test Category that specifies a test with very narrow and well defined
- * scope. Any complex dependencies and interactions are stubbed or mocked.
- */
-public interface HoplogTest {
-}


[13/50] [abbrv] incubator-geode git commit: GEODE-429: Remove hdfsStore gfsh commands

Posted by ds...@apache.org.
GEODE-429: Remove hdfsStore gfsh commands


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/7f251978
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/7f251978
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/7f251978

Branch: refs/heads/develop
Commit: 7f251978c9730c403534a62fb385e922eecc8e5b
Parents: 7bcc1e4
Author: Ashvin Agrawal <as...@apache.org>
Authored: Mon Oct 19 14:01:01 2015 -0700
Committer: Ashvin Agrawal <as...@apache.org>
Committed: Wed Oct 21 08:55:22 2015 -0700

----------------------------------------------------------------------
 .../gemfire/internal/redis/RegionProvider.java  |   2 +-
 .../gemfire/management/cli/ConverterHint.java   |   1 -
 .../CreateAlterDestroyRegionCommands.java       |  12 +-
 .../cli/commands/HDFSStoreCommands.java         | 695 ---------------
 .../cli/converters/HdfsStoreNameConverter.java  |  88 --
 .../cli/functions/AlterHDFSStoreFunction.java   | 228 -----
 .../cli/functions/CreateHDFSStoreFunction.java  | 124 ---
 .../cli/functions/DestroyHDFSStoreFunction.java | 100 ---
 .../cli/functions/ListHDFSStoresFunction.java   | 102 ---
 .../cli/functions/RegionFunctionArgs.java       |  66 +-
 .../internal/cli/i18n/CliStrings.java           | 112 ---
 .../HDFSStoreCommandsController.java            | 229 -----
 .../controllers/ShellCommandsController.java    |  28 +-
 .../commands/HDFSStoreCommandsJUnitTest.java    | 838 -------------------
 .../AlterHDFSStoreFunctionJUnitTest.java        | 324 -------
 .../CreateHDFSStoreFunctionJUnitTest.java       | 307 -------
 .../DescribeHDFSStoreFunctionJUnitTest.java     | 364 --------
 .../DestroyHDFSStoreFunctionJUnitTest.java      | 305 -------
 .../ListHDFSStoresFunctionJUnitTest.java        | 319 -------
 19 files changed, 17 insertions(+), 4227 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/internal/redis/RegionProvider.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/redis/RegionProvider.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/redis/RegionProvider.java
index 0240a4c..a01858e 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/redis/RegionProvider.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/redis/RegionProvider.java
@@ -378,7 +378,7 @@ public class RegionProvider implements Closeable {
     r = cache.getRegion(key);
     if (r != null) return r;
     do {
-      Result result = cliCmds.createRegion(key, defaultRegionType, null, null, true, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null);
+      Result result = cliCmds.createRegion(key, defaultRegionType, null, null, true, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null);
       r = cache.getRegion(key);
       if (result.getStatus() == Status.ERROR && r == null) {
         String err = "";

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/cli/ConverterHint.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/cli/ConverterHint.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/cli/ConverterHint.java
index f295983..afe8c76 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/cli/ConverterHint.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/management/cli/ConverterHint.java
@@ -41,5 +41,4 @@ public interface ConverterHint {
   public static final String LOG_LEVEL             = "converter.hint.log.levels";
 
   public static final String STRING_DISABLER       = "converter.hint.disable-string-converter";
-  public static final String HDFSSTORE_ALL         = "converter.hint.cluster.hdfsstore";
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
index 919d6fe..41cf531 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
@@ -202,14 +202,6 @@ public class CreateAlterDestroyRegionCommands extends AbstractCommandsSupport {
                   help = CliStrings.CREATE_REGION__GATEWAYSENDERID__HELP)
       @CliMetaData (valueSeparator = ",") 
       String[] gatewaySenderIds,
-      @CliOption (key = CliStrings.CREATE_REGION__HDFSSTORE_NAME,
-                  help = CliStrings.CREATE_REGION__HDFSSTORE_NAME__HELP ,
-                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE)
-      String hdfsStoreName,
-      @CliOption (key = CliStrings.CREATE_REGION__HDFSSTORE_WRITEONLY,      
-                  help = CliStrings.CREATE_REGION__HDFSSTORE_WRITEONLY__HELP,
-                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE)
-      Boolean hdfsWriteOnly,      
       @CliOption (key = CliStrings.CREATE_REGION__KEYCONSTRAINT,
                   help = CliStrings.CREATE_REGION__KEYCONSTRAINT__HELP)
       String keyConstraint,
@@ -319,7 +311,7 @@ public class CreateAlterDestroyRegionCommands extends AbstractCommandsSupport {
             prColocatedWith, prLocalMaxMemory, prRecoveryDelay,
             prRedundantCopies, prStartupRecoveryDelay,
             prTotalMaxMemory, prTotalNumBuckets,
-            offHeap, hdfsStoreName , hdfsWriteOnly,  regionAttributes);
+            offHeap, regionAttributes);
         
 
         if (regionAttributes.getPartitionAttributes() == null && regionFunctionArgs.hasPartitionAttributes()) {
@@ -339,7 +331,7 @@ public class CreateAlterDestroyRegionCommands extends AbstractCommandsSupport {
           concurrencyChecksEnabled, cloningEnabled, concurrencyLevel, 
           prColocatedWith, prLocalMaxMemory, prRecoveryDelay,
           prRedundantCopies, prStartupRecoveryDelay,
-          prTotalMaxMemory, prTotalNumBuckets, null,compressor, offHeap , hdfsStoreName , hdfsWriteOnly);
+          prTotalMaxMemory, prTotalNumBuckets, null,compressor, offHeap);
         
         if (!regionShortcut.name().startsWith("PARTITION") && regionFunctionArgs.hasPartitionAttributes()) {
           throw new IllegalArgumentException(

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommands.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommands.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommands.java
deleted file mode 100644
index 6e573f1..0000000
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommands.java
+++ /dev/null
@@ -1,695 +0,0 @@
-package com.gemstone.gemfire.management.internal.cli.commands;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-import java.util.Set;
-
-import org.springframework.shell.core.annotation.CliAvailabilityIndicator;
-import org.springframework.shell.core.annotation.CliCommand;
-import org.springframework.shell.core.annotation.CliOption;
-
-import com.gemstone.gemfire.SystemFailure;
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.execute.Execution;
-import com.gemstone.gemfire.cache.execute.FunctionInvocationTargetException;
-import com.gemstone.gemfire.cache.execute.ResultCollector;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.cache.execute.AbstractExecution;
-import com.gemstone.gemfire.internal.lang.ClassUtils;
-import com.gemstone.gemfire.management.cli.CliMetaData;
-import com.gemstone.gemfire.management.cli.ConverterHint;
-import com.gemstone.gemfire.management.cli.Result;
-import com.gemstone.gemfire.management.cli.Result.Status;
-import com.gemstone.gemfire.management.internal.cli.CliUtil;
-import com.gemstone.gemfire.management.internal.cli.functions.AlterHDFSStoreFunction;
-import com.gemstone.gemfire.management.internal.cli.functions.AlterHDFSStoreFunction.AlterHDFSStoreAttributes;
-import com.gemstone.gemfire.management.internal.cli.functions.CliFunctionResult;
-import com.gemstone.gemfire.management.internal.cli.functions.CreateHDFSStoreFunction;
-import com.gemstone.gemfire.management.internal.cli.functions.DescribeHDFSStoreFunction;
-import com.gemstone.gemfire.management.internal.cli.functions.DestroyHDFSStoreFunction;
-import com.gemstone.gemfire.management.internal.cli.functions.ListHDFSStoresFunction;
-import com.gemstone.gemfire.management.internal.cli.functions.ListHDFSStoresFunction.HdfsStoreDetails;
-import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
-import com.gemstone.gemfire.management.internal.cli.result.CommandResultException;
-import com.gemstone.gemfire.management.internal.cli.result.CompositeResultData;
-import com.gemstone.gemfire.management.internal.cli.result.ResultBuilder;
-import com.gemstone.gemfire.management.internal.cli.result.ResultDataException;
-import com.gemstone.gemfire.management.internal.cli.result.TabularResultData;
-import com.gemstone.gemfire.management.internal.cli.util.HDFSStoreNotFoundException;
-import com.gemstone.gemfire.management.internal.cli.util.MemberNotFoundException;
-import com.gemstone.gemfire.management.internal.configuration.SharedConfigurationWriter;
-import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
-
-/**
- * The HdfsStoreCommands class encapsulates all GemFire Hdfs Store commands in Gfsh.
- *  </p>
- *  
- * @author Namrata Thanvi
- * @see com.gemstone.gemfire.management.internal.cli.commands.AbstractCommandsSupport
- */
-
-
-public class HDFSStoreCommands   extends AbstractCommandsSupport {  
-  @CliCommand (value = CliStrings.CREATE_HDFS_STORE, help = CliStrings.CREATE_HDFS_STORE__HELP)
-  @CliMetaData (relatedTopic = CliStrings.TOPIC_GEMFIRE_HDFSSTORE, writesToSharedConfiguration = true)
-  public Result createHdfsStore(      
-      @CliOption (key = CliStrings.CREATE_HDFS_STORE__NAME,                  
-                  mandatory = true,
-                  optionContext = ConverterHint.HDFSSTORE_ALL, 
-                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-                  help = CliStrings.CREATE_HDFS_STORE__NAME__HELP)
-      String hdfsUniqueName,
-      @CliOption (key = CliStrings.CREATE_HDFS_STORE__NAMENODE,
-                  mandatory = false,
-                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-                  help = CliStrings.CREATE_HDFS_STORE__NAMENODE__HELP)
-      String namenode, 
-      @CliOption (key = CliStrings.CREATE_HDFS_STORE__HOMEDIR,
-                  optionContext = ConverterHint.DIR_PATHSTRING,
-                  mandatory = false,
-                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-                  help = CliStrings.CREATE_HDFS_STORE__HOMEDIR__HELP)
-      String homeDir,
-      @CliOption (key = CliStrings.CREATE_HDFS_STORE__BATCHSIZE,
-                  mandatory = false,
-                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-                  help = CliStrings.CREATE_HDFS_STORE__BATCHSIZE__HELP)
-      Integer batchSize,
-      @CliOption (key = CliStrings.CREATE_HDFS_STORE__BATCHINTERVAL,
-                  mandatory = false,
-                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-                  help = CliStrings.CREATE_HDFS_STORE__BATCHINTERVAL__HELP)
-      Integer batchInterval,
-      @CliOption (key = CliStrings.CREATE_HDFS_STORE__READCACHESIZE,
-                  mandatory = false,
-                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-                  help = CliStrings.CREATE_HDFS_STORE__READCACHESIZE__HELP)
-      Float readCacheSize,
-      @CliOption (key = CliStrings.CREATE_HDFS_STORE__DISPATCHERTHREADS,
-          mandatory = false,
-          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-          help = CliStrings.CREATE_HDFS_STORE__DISPATCHERTHREADS__HELP)
-      Integer dispatcherThreads,
-      @CliOption (key = CliStrings.CREATE_HDFS_STORE__MAXMEMORY,
-                  mandatory = false,
-                  unspecifiedDefaultValue =CliMetaData.ANNOTATION_NULL_VALUE,
-                  help = CliStrings.CREATE_HDFS_STORE__MAXMEMORY__HELP)
-      Integer maxMemory,
-      @CliOption (key = CliStrings.CREATE_HDFS_STORE__BUFFERPERSISTENT,
-                  mandatory = false,
-                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-                  help = CliStrings.CREATE_HDFS_STORE__BUFFERPERSISTENT__HELP)
-      Boolean bufferPersistent,
-      @CliOption (key = CliStrings.CREATE_HDFS_STORE__SYNCDISKWRITE,
-                  mandatory = false,
-                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-                  help = CliStrings.CREATE_HDFS_STORE__SYNCDISKWRITE__HELP)
-      Boolean syncDiskWrite,
-      @CliOption (key = CliStrings.CREATE_HDFS_STORE__DISKSTORENAME,
-                  mandatory = false,
-                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-                  help = CliStrings.CREATE_HDFS_STORE__DISKSTORENAME__HELP)
-      String diskStoreName,
-      @CliOption (key = CliStrings.CREATE_HDFS_STORE__MINORCOMPACT,
-                  mandatory = false,
-                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-                  help = CliStrings.CREATE_HDFS_STORE__MINORCOMPACT__HELP)
-      Boolean minorCompact,            
-      @CliOption (key = CliStrings.CREATE_HDFS_STORE__MINORCOMPACTIONTHREADS,
-                  mandatory = false,
-                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-                  help = CliStrings.CREATE_HDFS_STORE__MINORCOMPACTIONTHREADS__HELP)
-      Integer minorCompactionThreads,
-      @CliOption (key = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACT,
-                  mandatory = false,
-                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-                  help = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACT__HELP)
-      Boolean majorCompact,   
-      @CliOption (key = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTINTERVAL,
-                  mandatory = false,
-                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-                  help = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTINTERVAL__HELP)
-      Integer majorCompactionInterval, 
-      @CliOption (key = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTIONTHREADS,
-                  mandatory = false,
-                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-                  help = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTIONTHREADS__HELP)
-      Integer majorCompactionThreads,  
-      @CliOption (key = CliStrings.CREATE_HDFS_STORE__PURGEINTERVAL,
-                  mandatory = false,
-                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-                  help = CliStrings.CREATE_HDFS_STORE__PURGEINTERVAL__HELP)
-      Integer purgeInterval,  
-      @CliOption (key = CliStrings.CREATE_HDFS_STORE__WRITEONLYFILESIZE,
-                  mandatory = false,
-                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-                  help = CliStrings.CREATE_HDFS_STORE__WRITEONLYFILESIZE__HELP)
-      Integer maxWriteonlyFileSize,  
-      @CliOption (key = CliStrings.CREATE_HDFS_STORE__FILEROLLOVERINTERVAL,
-                  mandatory = false,
-                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-                  help = CliStrings.CREATE_HDFS_STORE__FILEROLLOVERINTERVAL__HELP)
-      Integer fileRolloverInterval,  
-      @CliOption (key = CliStrings.CREATE_HDFS_STORE__CLIENTCONFIGFILE,
-                  optionContext = ConverterHint.FILE_PATHSTRING,
-                  mandatory = false,
-                  unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-                  help = CliStrings.CREATE_HDFS_STORE__CLIENTCONFIGFILE__HELP)      
-      String clientConfigFile,
-      @CliOption(key=CliStrings.CREATE_HDFS_STORE__GROUP,
-                 help=CliStrings.CREATE_HDFS_STORE__GROUP__HELP,
-                 optionContext=ConverterHint.MEMBERGROUP)
-      @CliMetaData (valueSeparator = ",")
-       String[] groups ) {
-    try {
-      
-      return getCreatedHdfsStore(groups, hdfsUniqueName, namenode, homeDir, clientConfigFile, fileRolloverInterval,
-          maxWriteonlyFileSize, minorCompact, majorCompact, batchSize, batchInterval, diskStoreName, bufferPersistent,
-          dispatcherThreads, syncDiskWrite, readCacheSize, majorCompactionInterval, majorCompactionThreads,
-          minorCompactionThreads, purgeInterval, maxMemory);
-      
-    } catch (VirtualMachineError e) {
-      SystemFailure.initiateFailure(e);
-      throw e;
-
-    } catch (Throwable th) {
-      String formattedErrString = CliStrings.format(CliStrings.CREATE_HDFS_STORE__ERROR_WHILE_CREATING_REASON_0,
-          new Object[] { th.getMessage() });
-      SystemFailure.checkFailure();
-      return ResultBuilder.createGemFireErrorResult(formattedErrString);
-    }
-  }
-
-  public Result getCreatedHdfsStore(String[] groups, String hdfsUniqueName, String namenode, String homeDir,
-      String clientConfigFile, Integer fileRolloverInterval, Integer maxWriteonlyFileSize, Boolean minorCompact,
-      Boolean majorCompact, Integer batchSize, Integer batchInterval, String diskStoreName, Boolean bufferPersistent,
-      Integer dispatcherThreads, Boolean syncDiskWrite, Float readCacheSize, Integer majorCompactionInterval,
-      Integer majorCompactionThreads, Integer minorCompactionThreads, Integer purgeInterval, Integer maxMemory) {
-
-    XmlEntity xmlEntity = null;
-
-    Set<DistributedMember> targetMembers = null;
-
-    try {
-      targetMembers = getGroupMembers(groups);
-    } catch (CommandResultException cre) {
-      return cre.getResult();
-    }
-
-    HDFSStoreConfigHolder configHolder = new HDFSStoreConfigHolder();
-    configHolder.setName(hdfsUniqueName);
-    if (readCacheSize != null)
-      configHolder.setBlockCacheSize(readCacheSize);
-
-    if (fileRolloverInterval != null)
-      configHolder.setWriteOnlyFileRolloverInterval(fileRolloverInterval);
-    if (clientConfigFile != null)
-      configHolder.setHDFSClientConfigFile(clientConfigFile);
-    if (homeDir != null)
-      configHolder.setHomeDir(homeDir);
-    if (maxWriteonlyFileSize != null)
-      configHolder.setWriteOnlyFileRolloverSize(maxWriteonlyFileSize);
-    if (namenode != null)
-      configHolder.setNameNodeURL(namenode);
-
-    if (minorCompact != null)
-      configHolder.setMinorCompaction(minorCompact);
-    if (majorCompact != null)
-      configHolder.setMajorCompaction(majorCompact);
-    if (majorCompactionInterval != null)
-      configHolder.setMajorCompactionInterval(majorCompactionInterval);
-    if (majorCompactionThreads != null)
-      configHolder.setMajorCompactionThreads(majorCompactionThreads);
-    if (minorCompactionThreads != null)
-      configHolder.setMinorCompactionThreads(minorCompactionThreads);
-    if (purgeInterval != null)
-      configHolder.setPurgeInterval(purgeInterval);
-
-    if (batchSize != null)
-      configHolder.setBatchSize(batchSize);
-    if (batchInterval != null)
-      configHolder.setBatchInterval(batchInterval);
-    if (diskStoreName != null)
-      configHolder.setDiskStoreName(diskStoreName);
-    if (syncDiskWrite != null)
-      configHolder.setSynchronousDiskWrite(syncDiskWrite);
-    if (dispatcherThreads != null)
-      configHolder.setDispatcherThreads(dispatcherThreads);
-    if (maxMemory != null)
-      configHolder.setMaxMemory(maxMemory);
-    if (bufferPersistent != null)
-      configHolder.setBufferPersistent(bufferPersistent);
-
-    ResultCollector<?, ?> resultCollector = getMembersFunctionExecutor(targetMembers)
-    .withArgs(configHolder).execute(new CreateHDFSStoreFunction());
-    
-    List<CliFunctionResult> hdfsStoreCreateResults = CliFunctionResult.cleanResults((List<?>)resultCollector
-        .getResult());
-
-    TabularResultData tabularResultData = ResultBuilder.createTabularResultData();
-
-    Boolean accumulatedData = false;
-
-    for (CliFunctionResult hdfsStoreCreateResult : hdfsStoreCreateResults) {
-      if (hdfsStoreCreateResult.getThrowable() != null) {
-        String memberId = hdfsStoreCreateResult.getMemberIdOrName();
-        String errorMsg = hdfsStoreCreateResult.getThrowable().getMessage();
-        String errClass = hdfsStoreCreateResult.getThrowable().getClass().getName();
-        tabularResultData.accumulate("Member", memberId);
-        tabularResultData.accumulate("Result", "ERROR: " + errClass + ": " + errorMsg);
-        accumulatedData = true;
-        tabularResultData.setStatus(Status.ERROR);
-      }
-      else if (hdfsStoreCreateResult.isSuccessful()) {
-        String memberId = hdfsStoreCreateResult.getMemberIdOrName();
-        String successMsg = hdfsStoreCreateResult.getMessage();
-        tabularResultData.accumulate("Member", memberId);
-        tabularResultData.accumulate("Result", successMsg);
-        if (xmlEntity == null) {
-          xmlEntity = hdfsStoreCreateResult.getXmlEntity();
-        }
-        accumulatedData = true;
-      }
-    }
-
-    if (!accumulatedData) {
-      return ResultBuilder.createInfoResult("Unable to create hdfs store:" + hdfsUniqueName);
-    }
-
-    Result result = ResultBuilder.buildResult(tabularResultData);
-    if (xmlEntity != null) {
-      result.setCommandPersisted((new SharedConfigurationWriter()).addXmlEntity(xmlEntity, groups));
-    }
-
-    return ResultBuilder.buildResult(tabularResultData);
-  }
-  
-  
-  @CliCommand(value = CliStrings.DESCRIBE_HDFS_STORE, help = CliStrings.DESCRIBE_HDFS_STORE__HELP)
-  @CliMetaData(shellOnly = false, relatedTopic = { CliStrings.TOPIC_GEMFIRE_HDFSSTORE})
-  public Result describeHdfsStore(
-      @CliOption(key = CliStrings.DESCRIBE_HDFS_STORE__MEMBER, 
-                 mandatory = true, optionContext = ConverterHint.MEMBERIDNAME, 
-                 help = CliStrings.DESCRIBE_HDFS_STORE__MEMBER__HELP)
-      final String memberName,
-      @CliOption(key = CliStrings.DESCRIBE_HDFS_STORE__NAME, 
-                 mandatory = true, 
-                 optionContext = ConverterHint.HDFSSTORE_ALL, 
-                 help = CliStrings.DESCRIBE_HDFS_STORE__NAME__HELP)
-      final String hdfsStoreName) {
-    try{
-      return toCompositeResult(getHDFSStoreDescription(memberName , hdfsStoreName));
-      
-      }catch (HDFSStoreNotFoundException e){
-         return ResultBuilder.createShellClientErrorResult(((HDFSStoreNotFoundException)e).getMessage());
-      } 
-      catch (FunctionInvocationTargetException ignore) {
-      return ResultBuilder.createGemFireErrorResult(CliStrings.format(CliStrings.COULD_NOT_EXECUTE_COMMAND_TRY_AGAIN,
-          CliStrings.DESCRIBE_HDFS_STORE));
-      
-    } catch (MemberNotFoundException e) {
-      return ResultBuilder.createShellClientErrorResult(e.getMessage());
-      
-    } catch (VirtualMachineError e) {
-      SystemFailure.initiateFailure(e);
-      throw e;
-      
-    } catch (Throwable t) {
-      SystemFailure.checkFailure();
-      return ResultBuilder.createGemFireErrorResult(String.format(CliStrings.DESCRIBE_HDFS_STORE__ERROR_MESSAGE,
-          memberName, hdfsStoreName, t));
-    }
-  }        
-  
-  public HDFSStoreConfigHolder getHDFSStoreDescription(String memberName, String hdfsStoreName) {
-
-    final DistributedMember member = getMember(getCache(), memberName);
-    
-    ResultCollector<?, ?> resultCollector = getMembersFunctionExecutor(Collections.singleton(member))
-    .withArgs(hdfsStoreName).execute(new DescribeHDFSStoreFunction());
-    
-    Object result = ((List<?>)resultCollector.getResult()).get(0);
-
-    if (result instanceof HDFSStoreConfigHolder) {
-      return (HDFSStoreConfigHolder)result;
-    }
-    if (result instanceof HDFSStoreNotFoundException) {
-      throw (HDFSStoreNotFoundException)result;
-    }
-    else {
-      final Throwable cause = (result instanceof Throwable ? (Throwable)result : null);
-      throw new RuntimeException(CliStrings.format(CliStrings.UNEXPECTED_RETURN_TYPE_EXECUTING_COMMAND_ERROR_MESSAGE,
-          ClassUtils.getClassName(result), CliStrings.DESCRIBE_HDFS_STORE), cause);
-
-    }
-  }
-  
-  public Result toCompositeResult(final HDFSStoreConfigHolder storePrms) {
-    final CompositeResultData hdfsStoreCompositeResult = ResultBuilder.createCompositeResultData();
-    final CompositeResultData.SectionResultData hdfsStoreSection = hdfsStoreCompositeResult.addSection();
-
-    hdfsStoreSection.addData("Hdfs Store Name", storePrms.getName());
-    hdfsStoreSection.addData("Name Node URL", storePrms.getNameNodeURL());
-    hdfsStoreSection.addData("Home Dir", storePrms.getHomeDir());
-    hdfsStoreSection.addData("Block Cache", storePrms.getBlockCacheSize());
-    hdfsStoreSection.addData("File RollOver Interval", storePrms.getWriteOnlyFileRolloverInterval());
-    hdfsStoreSection.addData("Max WriteOnly File Size", storePrms.getWriteOnlyFileRolloverSize());
-
-    hdfsStoreSection.addData("Client Configuration File", storePrms.getHDFSClientConfigFile());
-
-    hdfsStoreSection.addData("Disk Store Name", storePrms.getDiskStoreName());
-    hdfsStoreSection.addData("Batch Size In MB", storePrms.getBatchSize());
-    hdfsStoreSection.addData("Batch Interval Time", storePrms.getBatchInterval());
-    hdfsStoreSection.addData("Maximum Memory", storePrms.getMaxMemory());
-    hdfsStoreSection.addData("Dispatcher Threads", storePrms.getDispatcherThreads());
-    hdfsStoreSection.addData("Buffer Persistence", storePrms.getBufferPersistent());
-    hdfsStoreSection.addData("Synchronous Persistence", storePrms.getSynchronousDiskWrite());
-
-    hdfsStoreSection.addData("Major Compaction Enabled", storePrms.getMajorCompaction());
-    hdfsStoreSection.addData("Major Compaction Threads", storePrms.getMajorCompactionThreads());
-    hdfsStoreSection.addData("Major compaction Interval", storePrms.getMajorCompactionInterval());
-    hdfsStoreSection.addData("Minor Compaction Enabled", storePrms.getMinorCompaction());
-    hdfsStoreSection.addData("Minor Compaction Threads", storePrms.getMinorCompactionThreads());
-    hdfsStoreSection.addData("Purge Interval", storePrms.getPurgeInterval());
-
-    return ResultBuilder.buildResult(hdfsStoreCompositeResult);
-  } 
-  
-  @CliCommand(value = CliStrings.LIST_HDFS_STORE, help = CliStrings.LIST_HDFS_STORE__HELP)
-  @CliMetaData(shellOnly = false, relatedTopic = { CliStrings.TOPIC_GEMFIRE_HDFSSTORE })
-  public Result listHdfsStore() {  
-    try {
-      Set<DistributedMember> dataMembers = getNormalMembers(getCache());
-      if (dataMembers.isEmpty()) {
-        return ResultBuilder.createInfoResult(CliStrings.NO_CACHING_MEMBERS_FOUND_MESSAGE);
-      }
-      return toTabularResult(getHdfsStoreListing(dataMembers));
-
-    } catch (FunctionInvocationTargetException ignore) {
-      return ResultBuilder.createGemFireErrorResult(
-          CliStrings.format(CliStrings.COULD_NOT_EXECUTE_COMMAND_TRY_AGAIN,
-          CliStrings.LIST_HDFS_STORE));
-
-    } catch (VirtualMachineError e) {
-      SystemFailure.initiateFailure(e);
-      throw e;
-
-    } catch (Throwable t) {
-      SystemFailure.checkFailure();
-      return ResultBuilder.createGemFireErrorResult(
-          String.format(CliStrings.LIST_HDFS_STORE__ERROR_MESSAGE, t.getMessage()));
-    }
-  }
-  
-  protected List<HdfsStoreDetails> getHdfsStoreListing(Set<DistributedMember> members) {
-
-    final Execution membersFunctionExecutor = getMembersFunctionExecutor(members);
-
-    if (membersFunctionExecutor instanceof AbstractExecution) {
-      ((AbstractExecution)membersFunctionExecutor).setIgnoreDepartedMembers(true);
-    }
-
-    final ResultCollector<?, ?> resultCollector = membersFunctionExecutor.execute(new ListHDFSStoresFunction());
-    final List<?> results = (List<?>)resultCollector.getResult();
-    final List<HdfsStoreDetails> hdfsStoreList = new ArrayList<HdfsStoreDetails>(results.size());
-
-    for (final Object result : results) {
-      if (result instanceof Set) { // ignore FunctionInvocationTargetExceptions and other Exceptions...
-        hdfsStoreList.addAll((Set<HdfsStoreDetails>)result);
-      }
-    }
-
-    Collections.sort(hdfsStoreList, new Comparator<HdfsStoreDetails>() {
-      public <T extends Comparable<T>> int compare(final T obj1, final T obj2) {
-        return (obj1 == null && obj2 == null ? 0 : (obj1 == null ? 1 : (obj2 == null ? -1 : obj1.compareTo(obj2))));
-      }
-
-      @Override
-      public int compare(HdfsStoreDetails store1, HdfsStoreDetails store2) {
-        int comparisonValue = compare(store1.getMemberName(), store2.getMemberName());
-        comparisonValue = (comparisonValue != 0 ? comparisonValue : compare(store1.getMemberId(), store2.getMemberId()));
-        return (comparisonValue != 0 ? comparisonValue : store1.getStoreName().compareTo(store2.getStoreName()));
-      }
-    });
-
-    return hdfsStoreList;
-  }
-  
-
-  protected Result toTabularResult(final List<HdfsStoreDetails> hdfsStoreList) throws ResultDataException {
-    if (!hdfsStoreList.isEmpty()) {
-      final TabularResultData hdfsStoreData = ResultBuilder.createTabularResultData();
-      for (final HdfsStoreDetails hdfsStoreDetails : hdfsStoreList) {
-        hdfsStoreData.accumulate("Member Name", hdfsStoreDetails.getMemberName());
-        hdfsStoreData.accumulate("Member Id", hdfsStoreDetails.getMemberId());
-        hdfsStoreData.accumulate("Hdfs Store Name", hdfsStoreDetails.getStoreName());
-      }
-      return ResultBuilder.buildResult(hdfsStoreData);
-    }
-    else {
-      return ResultBuilder.createInfoResult(CliStrings.LIST_HDFS_STORE__HDFS_STORES_NOT_FOUND_MESSAGE);
-    }
-  }
-  
-
-  @CliCommand(value=CliStrings.DESTROY_HDFS_STORE, help=CliStrings.DESTROY_HDFS_STORE__HELP)
-  @CliMetaData(shellOnly=false, relatedTopic={CliStrings.TOPIC_GEMFIRE_HDFSSTORE}, writesToSharedConfiguration=true)
-  public Result destroyHdfstore(
-      @CliOption  (key=CliStrings.DESTROY_HDFS_STORE__NAME, 
-                   optionContext=ConverterHint.HDFSSTORE_ALL,
-                   mandatory=true,
-                   help=CliStrings.DESTROY_HDFS_STORE__NAME__HELP)
-        String hdfsStoreName,
-      @CliOption(key=CliStrings.DESTROY_HDFS_STORE__GROUP,
-                 help=CliStrings.DESTROY_HDFS_STORE__GROUP__HELP,
-                 optionContext=ConverterHint.MEMBERGROUP)
-      @CliMetaData (valueSeparator = ",")
-        String[] groups) {
-    try{      
-       return destroyStore(hdfsStoreName,groups);
- 
-    } catch (FunctionInvocationTargetException ignore) {
-      return ResultBuilder.createGemFireErrorResult(CliStrings.format(CliStrings.COULD_NOT_EXECUTE_COMMAND_TRY_AGAIN,
-          CliStrings.DESTROY_HDFS_STORE));
-      
-    } catch (VirtualMachineError e) {
-      SystemFailure.initiateFailure(e);
-      throw e;
-      
-    } catch (Throwable th) {
-      SystemFailure.checkFailure();
-      return ResultBuilder.createGemFireErrorResult(CliStrings.format(
-          CliStrings.DESTROY_HDFS_STORE__ERROR_WHILE_DESTROYING_REASON_0, new Object[] { th.getMessage() }));
-    }
- }
-  
-  protected Result destroyStore(String hdfsStoreName , String[] groups){
-      TabularResultData tabularData = ResultBuilder.createTabularResultData();
-      boolean accumulatedData = false;
-
-      Set<DistributedMember> targetMembers = null;
-      try {
-        targetMembers = getGroupMembers(groups);
-      } catch (CommandResultException cre) {
-        return cre.getResult();
-      }
-      
-      ResultCollector<?, ?> rc = getMembersFunctionExecutor(targetMembers)
-      .withArgs(hdfsStoreName).execute(new DestroyHDFSStoreFunction());
-      
-      List<CliFunctionResult> results = CliFunctionResult.cleanResults((List<?>)rc.getResult());
-
-      XmlEntity xmlEntity = null;
-      for (CliFunctionResult result : results) {
-        
-        if (result.getThrowable() != null) {
-          tabularData.accumulate("Member", result.getMemberIdOrName());
-          tabularData.accumulate("Result", "ERROR: " + result.getThrowable().getClass().getName() + ": "
-              + result.getThrowable().getMessage());
-          accumulatedData = true;
-          tabularData.setStatus(Status.ERROR);
-        }
-        else if (result.getMessage() != null) {
-          tabularData.accumulate("Member", result.getMemberIdOrName());
-          tabularData.accumulate("Result", result.getMessage());
-          accumulatedData = true;
-          
-          if (xmlEntity == null) {
-            xmlEntity = result.getXmlEntity();
-          }
-        }
-      }
-      
-      if (!accumulatedData) {
-        return ResultBuilder.createInfoResult("No matching hdfs stores found.");
-      }
-      
-      Result result = ResultBuilder.buildResult(tabularData);
-      if (xmlEntity != null) {
-        result.setCommandPersisted((new SharedConfigurationWriter()).deleteXmlEntity(xmlEntity, groups));
-      }
-      
-      return result;
-  }
-  @CliCommand(value=CliStrings.ALTER_HDFS_STORE, help=CliStrings.ALTER_HDFS_STORE__HELP)
-  @CliMetaData(shellOnly=false, relatedTopic={CliStrings.TOPIC_GEMFIRE_HDFSSTORE}, writesToSharedConfiguration=true)
-  public Result alterHdfstore(
-      @CliOption (key = CliStrings.ALTER_HDFS_STORE__NAME,                  
-          mandatory = true,
-          optionContext = ConverterHint.HDFSSTORE_ALL, 
-          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-          help = CliStrings.ALTER_HDFS_STORE__NAME__HELP)
-      String hdfsUniqueName,     
-      @CliOption (key = CliStrings.ALTER_HDFS_STORE__BATCHSIZE,
-          mandatory = false,
-          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-          help = CliStrings.ALTER_HDFS_STORE__BATCHSIZE__HELP)
-      Integer batchSize,
-      @CliOption (key = CliStrings.ALTER_HDFS_STORE__BATCHINTERVAL,
-          mandatory = false,
-          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-          help = CliStrings.ALTER_HDFS_STORE__BATCHINTERVAL__HELP)
-      Integer batchInterval,      
-      @CliOption (key = CliStrings.ALTER_HDFS_STORE__MINORCOMPACT,
-          mandatory = false,
-          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-          help = CliStrings.ALTER_HDFS_STORE__MINORCOMPACT__HELP)
-      Boolean minorCompact,                                                                                                         
-      @CliOption (key = CliStrings.ALTER_HDFS_STORE__MINORCOMPACTIONTHREADS,
-          mandatory = false,
-          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-          help = CliStrings.ALTER_HDFS_STORE__MINORCOMPACTIONTHREADS__HELP)
-      Integer minorCompactionThreads,
-      @CliOption (key = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACT,
-          mandatory = false,
-          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-          help = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACT__HELP)
-      Boolean majorCompact,   
-      @CliOption (key = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTINTERVAL,
-          mandatory = false,
-          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-          help = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTINTERVAL__HELP)
-      Integer majorCompactionInterval, 
-      @CliOption (key = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTIONTHREADS,
-          mandatory = false,
-          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-          help = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTIONTHREADS__HELP)
-      Integer majorCompactionThreads,  
-      @CliOption (key = CliStrings.ALTER_HDFS_STORE__PURGEINTERVAL,
-          mandatory = false,
-          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-          help = CliStrings.ALTER_HDFS_STORE__PURGEINTERVAL__HELP)
-      Integer purgeInterval,        
-      @CliOption (key = CliStrings.ALTER_HDFS_STORE__FILEROLLOVERINTERVAL,
-          mandatory = false,
-          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-          help = CliStrings.ALTER_HDFS_STORE__FILEROLLOVERINTERVAL__HELP)
-      Integer fileRolloverInterval,
-      @CliOption (key = CliStrings.ALTER_HDFS_STORE__WRITEONLYFILESIZE,
-          mandatory = false,
-          unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-          help = CliStrings.ALTER_HDFS_STORE__WRITEONLYFILESIZE__HELP)
-      Integer maxWriteonlyFileSize,  
-      @CliOption(key=CliStrings.ALTER_HDFS_STORE__GROUP,
-         help=CliStrings.ALTER_HDFS_STORE__GROUP__HELP,
-         optionContext=ConverterHint.MEMBERGROUP)
-      @CliMetaData (valueSeparator = ",")
-      String[] groups){
-    try {                         
-      
-      return getAlteredHDFSStore(groups, hdfsUniqueName, batchSize, batchInterval, minorCompact,
-          minorCompactionThreads, majorCompact, majorCompactionInterval, majorCompactionThreads, purgeInterval,
-          fileRolloverInterval, maxWriteonlyFileSize);
-      
-    } catch (FunctionInvocationTargetException ignore) {
-      return ResultBuilder.createGemFireErrorResult(CliStrings.format(CliStrings.COULD_NOT_EXECUTE_COMMAND_TRY_AGAIN,
-          CliStrings.ALTER_HDFS_STORE));
-      
-    } catch (VirtualMachineError e) {
-      SystemFailure.initiateFailure(e);
-      throw e;
-      
-    } catch (Throwable th) {
-      SystemFailure.checkFailure();
-      return ResultBuilder.createGemFireErrorResult(CliStrings.format(
-          CliStrings.ALTER_HDFS_STORE__ERROR_WHILE_ALTERING_REASON_0, new Object[] { th.getMessage() }));
-    }
- }
-  
-  
-  protected Result getAlteredHDFSStore(String[] groups, String hdfsUniqueName, Integer batchSize,
-      Integer batchInterval, Boolean minorCompact, Integer minorCompactionThreads, Boolean majorCompact,
-      Integer majorCompactionInterval, Integer majorCompactionThreads, Integer purgeInterval,
-      Integer fileRolloverInterval, Integer maxWriteonlyFileSize) {
-    
-    Set<DistributedMember> targetMembers = null;
-    try {
-      targetMembers = getGroupMembers(groups);
-    } catch (CommandResultException cre) {
-      return cre.getResult();
-    }
-    
-    TabularResultData tabularData = ResultBuilder.createTabularResultData();
-    
-	AlterHDFSStoreAttributes alterAttributes = new AlterHDFSStoreAttributes(
-				hdfsUniqueName, batchSize, batchInterval, minorCompact,
-				majorCompact, minorCompactionThreads, majorCompactionInterval,
-				majorCompactionThreads, purgeInterval, fileRolloverInterval,
-				maxWriteonlyFileSize);
-	
-    ResultCollector<?, ?> rc = getMembersFunctionExecutor(targetMembers)
-    .withArgs(alterAttributes).execute(new AlterHDFSStoreFunction());
-    
-    List<CliFunctionResult> results = CliFunctionResult.cleanResults((List<?>)rc.getResult());
-
-    XmlEntity xmlEntity = null;
-
-    for (CliFunctionResult result : results) {
-      if (result.getThrowable() != null) {
-        tabularData.accumulate("Member", result.getMemberIdOrName());
-        tabularData.accumulate("Result", "ERROR: " + result.getThrowable().getClass().getName() + ": "
-            + result.getThrowable().getMessage());
-        tabularData.setStatus(Status.ERROR);
-      }
-      else if (result.getMessage() != null) {
-        tabularData.accumulate("Member", result.getMemberIdOrName());
-        tabularData.accumulate("Result", result.getMessage());
-
-        if (xmlEntity == null) {
-          xmlEntity = result.getXmlEntity();
-        }
-      }
-    }
-    
-    Result result = ResultBuilder.buildResult(tabularData);
-    
-    if (xmlEntity != null) {
-      result.setCommandPersisted((new SharedConfigurationWriter()).deleteXmlEntity(xmlEntity, groups));
-    }
-    
-    return result;
-  }
-  @CliAvailabilityIndicator({CliStrings.CREATE_HDFS_STORE, CliStrings.LIST_HDFS_STORE,
-    CliStrings.DESCRIBE_HDFS_STORE, CliStrings.ALTER_HDFS_STORE, CliStrings.DESTROY_HDFS_STORE})
-  public boolean hdfsStoreCommandsAvailable() {
-    // these hdfs store commands are always available in GemFire
-    return (!CliUtil.isGfshVM() || (getGfsh() != null && getGfsh().isConnectedAndReady()));
-  }  
-  
-  @Override
-  protected Set<DistributedMember> getMembers(final Cache cache) {
-    return CliUtil.getAllMembers(cache);
-  }
-  
-  protected Set<DistributedMember> getNormalMembers(final Cache cache) {
-    return CliUtil.getAllNormalMembers(cache);
-  }
-  
-  protected Set<DistributedMember> getGroupMembers(String[] groups) throws CommandResultException {    
-      return  CliUtil.findAllMatchingMembers(groups, null); 
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/converters/HdfsStoreNameConverter.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/converters/HdfsStoreNameConverter.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/converters/HdfsStoreNameConverter.java
deleted file mode 100644
index e595c77..0000000
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/converters/HdfsStoreNameConverter.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * =========================================================================
- *  Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
- *  This product is protected by U.S. and international copyright
- *  and intellectual property laws. Pivotal products are covered by
- *  more patents listed at http://www.pivotal.io/patents.
- * ========================================================================
- */
-package com.gemstone.gemfire.management.internal.cli.converters;
-
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import com.gemstone.gemfire.management.cli.ConverterHint;
-import com.gemstone.gemfire.management.internal.cli.shell.Gfsh;
-
-import org.springframework.shell.core.Completion;
-import org.springframework.shell.core.Converter;
-import org.springframework.shell.core.MethodTarget;
-
-/**
- * 
- * @author Namrata Thanvi
- * 
- */
-
-public class HdfsStoreNameConverter implements Converter<String> {
-
-  @Override
-  public boolean supports(Class<?> type, String optionContext) {
-    return String.class.equals(type) && ConverterHint.HDFSSTORE_ALL.equals(optionContext);
-  }
-
-  @Override
-  public String convertFromText(String value, Class<?> targetType, String optionContext) {
-    return value;
-  }
-
-  @Override
-  public boolean getAllPossibleValues(List<Completion> completions, Class<?> targetType, String existingData,
-      String optionContext, MethodTarget target) {
-    if (String.class.equals(targetType) && ConverterHint.HDFSSTORE_ALL.equals(optionContext)) {
-      Set<String> hdfsStoreNames = getHdfsStoreNames();
-
-      for (String hdfsStoreName : hdfsStoreNames) {
-        if (existingData != null) {
-          if (hdfsStoreName.startsWith(existingData)) {
-            completions.add(new Completion(hdfsStoreName));
-          }
-        }
-        else {
-          completions.add(new Completion(hdfsStoreName));
-        }
-      }
-    }
-
-    return !completions.isEmpty();
-  }
-
-  private Set<String> getHdfsStoreNames() {
-    SortedSet<String> hdfsStoreNames = new TreeSet<String>();
-    Gfsh gfsh = Gfsh.getCurrentInstance();
-
-    if (gfsh != null && gfsh.isConnectedAndReady()) {
-      Map<String, String[]> hdfsStoreInfo = gfsh.getOperationInvoker().getDistributedSystemMXBean()
-          .listMemberHDFSStore();
-      if (hdfsStoreInfo != null) {
-        Set<Entry<String, String[]>> entries = hdfsStoreInfo.entrySet();
-
-        for (Entry<String, String[]> entry : entries) {
-          String[] value = entry.getValue();
-          if (value != null) {
-            hdfsStoreNames.addAll(Arrays.asList(value));
-          }
-        }
-
-      }
-    }
-
-    return hdfsStoreNames;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunction.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunction.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunction.java
deleted file mode 100644
index b5b5341..0000000
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunction.java
+++ /dev/null
@@ -1,228 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.management.internal.cli.functions;
-
-import java.io.Serializable;
-
-import org.apache.logging.log4j.Logger;
-
-import com.gemstone.gemfire.SystemFailure;
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheClosedException;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.execute.FunctionAdapter;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreMutatorImpl;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.InternalEntity;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.InternalCache;
-import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
-
-/**
- * Function used by the 'alter hdfs-store' gfsh command to alter a hdfs store on
- * each member.
- * 
- * @author Namrata Thanvi
- */
-
-public class AlterHDFSStoreFunction extends FunctionAdapter implements InternalEntity {
-  private static final Logger logger = LogService.getLogger();
-
-  private static final String ID = AlterHDFSStoreFunction.class.getName();
-
-  private static final long serialVersionUID = 1L;
-
-  @Override
-  public void execute(FunctionContext context) {
-    String memberId = "";
-
-    try {
-      final AlterHDFSStoreAttributes alterAttributes = (AlterHDFSStoreAttributes)context.getArguments();      
-      GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
-      DistributedMember member = getDistributedMember(cache);
-
-      memberId = member.getId();
-      // If they set a name use it instead
-      if (!member.getName().equals("")) {
-        memberId = member.getName();
-      }      
-      HDFSStore hdfsStore = cache.findHDFSStore(alterAttributes.getHdfsUniqueName());      
-      CliFunctionResult result;
-      if (hdfsStore != null) {
-        // TODO - Need to verify what all attributes needs to be persisted in
-        // cache.xml
-        XmlEntity xmlEntity = getXMLEntity(hdfsStore.getName());
-        alterHdfsStore(hdfsStore, alterAttributes);
-        result = new CliFunctionResult(memberId, xmlEntity, "Success");
-      }
-      else {
-        result = new CliFunctionResult(memberId, false, "Hdfs store not found on this member");
-      }
-      context.getResultSender().lastResult(result);
-
-    } catch (CacheClosedException cce) {
-      CliFunctionResult result = new CliFunctionResult(memberId, false, null);
-      context.getResultSender().lastResult(result);
-
-    } catch (VirtualMachineError e) {
-      SystemFailure.initiateFailure(e);
-      throw e;
-
-    } catch (Throwable th) {
-      SystemFailure.checkFailure();
-      logger.error("Could not alter hdfs store: {}", th.getMessage(), th);
-
-      CliFunctionResult result = new CliFunctionResult(memberId, th, null);
-      context.getResultSender().lastResult(result);
-    }
-
-  }
-
-  @Override
-  public String getId() {
-    return ID;
-  }
-
-  /**
-   * Alter HDFSStore with given configuration.
-   * 
-   * @param hdfsStore
-   * @param alterAttributes
-   * @return HDFSStore
-   */
-
-  protected HDFSStore alterHdfsStore(HDFSStore hdfsStore, AlterHDFSStoreAttributes alterAttributes) {
-    HDFSStoreMutator storeMutator = new HDFSStoreMutatorImpl(hdfsStore);
-  
-		if (alterAttributes.getFileRolloverInterval() != null)
-			storeMutator.setWriteOnlyFileRolloverInterval(alterAttributes
-					.getFileRolloverInterval());
-
-		if (alterAttributes.getMaxWriteonlyFileSize() != null)
-			storeMutator.setWriteOnlyFileRolloverSize(alterAttributes.getMaxWriteonlyFileSize());
-
-		if (alterAttributes.getMinorCompact() != null)
-			storeMutator.setMinorCompaction(alterAttributes.getMinorCompact());
-
-		if (alterAttributes.getMajorCompact() != null)
-		  storeMutator.setMajorCompaction(alterAttributes.getMajorCompact());
-
-		if (alterAttributes.getMajorCompactionInterval() != null)
-		  storeMutator.setMajorCompactionInterval(alterAttributes.getMajorCompactionInterval());
-
-		if (alterAttributes.getMajorCompactionThreads() != null)
-		  storeMutator.setMajorCompactionThreads(alterAttributes.getMajorCompactionThreads());
-
-		if (alterAttributes.getMajorCompactionThreads() != null)
-		  storeMutator.setMinorCompactionThreads(alterAttributes.getMajorCompactionThreads());
-
-		if (alterAttributes.getPurgeInterval() != null)
-			storeMutator.setPurgeInterval(alterAttributes.getPurgeInterval());
-
-		if (alterAttributes.getBatchSize() != null)
-		  storeMutator.setBatchSize(alterAttributes.getBatchSize());
-
-		if (alterAttributes.getBatchInterval() != null)
-		  storeMutator.setBatchInterval(alterAttributes.getBatchInterval());
-
-		hdfsStore.alter(storeMutator);
-		return hdfsStore;
-  }
-  
-  
-  public static class AlterHDFSStoreAttributes implements Serializable {
-	private static final long serialVersionUID = 1L;
-	String hdfsUniqueName;
-      Integer batchSize , batchInterval;
-      Boolean minorCompact,  majorCompact;
-      Integer minorCompactionThreads, majorCompactionInterval, majorCompactionThreads, purgeInterval;
-      Integer fileRolloverInterval, maxWriteonlyFileSize;
-      
-	public AlterHDFSStoreAttributes(String hdfsUniqueName, Integer batchSize,
-			Integer batchInterval, Boolean minorCompact, Boolean majorCompact,
-			Integer minorCompactionThreads, Integer majorCompactionInterval,
-			Integer majorCompactionThreads, Integer purgeInterval,
-			Integer fileRolloverInterval, Integer maxWriteonlyFileSize) {
-		this.hdfsUniqueName = hdfsUniqueName;
-		this.batchSize = batchSize;
-		this.batchInterval = batchInterval;
-		this.minorCompact = minorCompact;
-		this.majorCompact = majorCompact;
-		this.minorCompactionThreads = minorCompactionThreads;
-		this.majorCompactionInterval = majorCompactionInterval;
-		this.majorCompactionThreads = majorCompactionThreads;
-		this.purgeInterval = purgeInterval;
-		this.fileRolloverInterval = fileRolloverInterval;
-		this.maxWriteonlyFileSize = maxWriteonlyFileSize;
-	}
-
-	public String getHdfsUniqueName() {
-		return hdfsUniqueName;
-	}
-
-	public Integer getBatchSize() {
-		return batchSize;
-	}
-
-	public Integer getBatchInterval() {
-		return batchInterval;
-	}
-
-	public Boolean getMinorCompact() {
-		return minorCompact;
-	}
-
-	public Boolean getMajorCompact() {
-		return majorCompact;
-	}
-
-	public Integer getMinorCompactionThreads() {
-		return minorCompactionThreads;
-	}
-
-	public Integer getMajorCompactionInterval() {
-		return majorCompactionInterval;
-	}
-
-	public Integer getMajorCompactionThreads() {
-		return majorCompactionThreads;
-	}
-
-	public Integer getPurgeInterval() {
-		return purgeInterval;
-	}
-
-	public Integer getFileRolloverInterval() {
-		return fileRolloverInterval;
-	}
-
-	public Integer getMaxWriteonlyFileSize() {
-		return maxWriteonlyFileSize;
-	}
-	  
-	
-  }
-  
-  
-  protected Cache getCache() {
-    return CacheFactory.getAnyInstance();
-  }
-  
-  protected DistributedMember getDistributedMember(Cache cache){
-    return ((InternalCache)cache).getMyId();
-  }
-  
-  protected XmlEntity getXMLEntity(String storeName){
-    return new XmlEntity(CacheXml.HDFS_STORE, "name", storeName);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunction.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunction.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunction.java
deleted file mode 100644
index b4e5033..0000000
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunction.java
+++ /dev/null
@@ -1,124 +0,0 @@
-package com.gemstone.gemfire.management.internal.cli.functions;
-
-import org.apache.logging.log4j.Logger;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.SystemFailure;
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheClosedException;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.execute.FunctionAdapter;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.InternalEntity;
-import com.gemstone.gemfire.internal.cache.InternalCache;
-import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
-
-import com.gemstone.gemfire.management.internal.cli.CliUtil;
-import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
-
-
-/**
- * Function used by the 'create hdfs-store' gfsh command to create a hdfs store
- * on each member.
- * 
- * @author Namrata Thanvi
- */
-
-public class CreateHDFSStoreFunction extends FunctionAdapter implements InternalEntity {
-  
-  private static final long serialVersionUID = 1L;
-
-  private static final Logger logger = LogService.getLogger();
-
-  public static final CreateHDFSStoreFunction INSTANCE = new CreateHDFSStoreFunction();
-
-  private static final String ID = CreateHDFSStoreFunction.class.getName();
-
-  @Override
-  public void execute(FunctionContext context) {
-    String memberId = "";
-    try {
-      Cache cache = getCache();      
-      DistributedMember member = getDistributedMember(cache);
-      
-      memberId = member.getId();
-      if (!member.getName().equals("")) {
-        memberId = member.getName();
-      }
-      HDFSStoreConfigHolder configHolder = (HDFSStoreConfigHolder)context.getArguments();
-     
-      HDFSStore hdfsStore = createHdfsStore(cache, configHolder);
-      // TODO - Need to verify what all attributes needs to be persisted in
-      // cache.xml
-      XmlEntity xmlEntity = getXMLEntity(hdfsStore.getName());
-      context.getResultSender().lastResult(new CliFunctionResult(memberId, xmlEntity, "Success"));
-
-    } catch (CacheClosedException cce) {
-      context.getResultSender().lastResult(new CliFunctionResult(memberId, false, null));
-
-    } catch (VirtualMachineError e) {
-      SystemFailure.initiateFailure(e);
-      throw e;
-
-    } catch (Throwable th) {
-      SystemFailure.checkFailure();
-      logger.error("Could not create hdfs store: {}", CliUtil.stackTraceAsString(th), th);
-      context.getResultSender().lastResult(new CliFunctionResult(memberId, th, th.getMessage()));
-    }
-  }
-
-  @Override
-  public String getId() {
-    return ID;
-  } 
-  
-  /**
-   * Creates the HDFSStore with given configuration.
-   * 
-   * @param cache
-   * @param configHolder
-   * @return HDFSStore
-   */
-
-  protected HDFSStore createHdfsStore(Cache cache, HDFSStoreConfigHolder configHolder) {    
-    HDFSStoreFactory hdfsStoreFactory = cache.createHDFSStoreFactory();
-    hdfsStoreFactory.setName(configHolder.getName());
-    hdfsStoreFactory.setNameNodeURL(configHolder.getNameNodeURL());
-    hdfsStoreFactory.setBlockCacheSize(configHolder.getBlockCacheSize());
-    hdfsStoreFactory.setWriteOnlyFileRolloverInterval(configHolder.getWriteOnlyFileRolloverInterval());
-    hdfsStoreFactory.setHomeDir(configHolder.getHomeDir());
-    hdfsStoreFactory.setHDFSClientConfigFile(configHolder.getHDFSClientConfigFile());
-    hdfsStoreFactory.setWriteOnlyFileRolloverSize(configHolder.getWriteOnlyFileRolloverSize());
-    hdfsStoreFactory.setMajorCompaction(configHolder.getMajorCompaction());
-    hdfsStoreFactory.setMajorCompactionInterval(configHolder.getMajorCompactionInterval());
-    hdfsStoreFactory.setMajorCompactionThreads(configHolder.getMajorCompactionThreads());
-    hdfsStoreFactory.setMinorCompaction(configHolder.getMinorCompaction());
-    hdfsStoreFactory.setMaxMemory(configHolder.getMaxMemory());
-    hdfsStoreFactory.setBatchSize(configHolder.getBatchSize());
-    hdfsStoreFactory.setBatchInterval(configHolder.getBatchInterval());
-    hdfsStoreFactory.setDiskStoreName(configHolder.getDiskStoreName());
-    hdfsStoreFactory.setDispatcherThreads(configHolder.getDispatcherThreads());
-    hdfsStoreFactory.setMinorCompactionThreads(configHolder.getMinorCompactionThreads());
-    hdfsStoreFactory.setPurgeInterval(configHolder.getPurgeInterval());
-    hdfsStoreFactory.setSynchronousDiskWrite(configHolder.getSynchronousDiskWrite());
-    hdfsStoreFactory.setBufferPersistent(configHolder.getBufferPersistent());
-    
-    return hdfsStoreFactory.create(configHolder.getName());   
-  }
-  
-  protected Cache getCache() {
-    return CacheFactory.getAnyInstance();
-  }
-  
-  protected DistributedMember getDistributedMember(Cache cache){
-    return ((InternalCache)cache).getMyId();
-  }
-  
-  protected XmlEntity getXMLEntity(String storeName){
-    return new XmlEntity(CacheXml.HDFS_STORE, "name", storeName);
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunction.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunction.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunction.java
deleted file mode 100644
index 83f6740..0000000
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunction.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.management.internal.cli.functions;
-
-import org.apache.logging.log4j.Logger;
-
-import com.gemstone.gemfire.SystemFailure;
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheClosedException;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.execute.FunctionAdapter;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.InternalEntity;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.InternalCache;
-import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
-
-/**
- * Function used by the 'destroy hdfs-store' gfsh command to destroy a hdfs
- * store on each member.
- * 
- * @author Namrata Thanvi
- */
-
-public class DestroyHDFSStoreFunction extends FunctionAdapter implements InternalEntity {
-  private static final Logger logger = LogService.getLogger();
-
-  private static final String ID = DestroyHDFSStoreFunction.class.getName();
-
-  private static final long serialVersionUID = 1L;
-
-  @Override
-  public void execute(FunctionContext context) {
-    String memberId = "";
-    try {
-      final String hdfsStoreName = (String)context.getArguments();
-      GemFireCacheImpl cache = (GemFireCacheImpl)getCache();      
-      DistributedMember member = getDistributedMember(cache);     
-      CliFunctionResult result;
-      
-      memberId = member.getId();
-      if (!member.getName().equals("")) {
-        memberId = member.getName();
-      }
-      
-      HDFSStoreImpl hdfsStore = cache.findHDFSStore(hdfsStoreName);
-      
-      if (hdfsStore != null) {
-        hdfsStore.destroy();
-        // TODO - Need to verify what all attributes needs to be persisted in cache.xml and how
-        XmlEntity xmlEntity = getXMLEntity(hdfsStoreName); 
-        result = new CliFunctionResult(memberId, xmlEntity, "Success");
-      }
-      else {
-        result = new CliFunctionResult(memberId, false, "Hdfs store not found on this member");
-      }
-      context.getResultSender().lastResult(result);   
-
-    } catch (CacheClosedException cce) {
-      CliFunctionResult result = new CliFunctionResult(memberId, false, null);
-      context.getResultSender().lastResult(result);
-
-    } catch (VirtualMachineError e) {
-      SystemFailure.initiateFailure(e);
-      throw e;
-
-    } catch (Throwable th) {
-      SystemFailure.checkFailure();
-      logger.error("Could not destroy hdfs store: {}", th.getMessage(), th);
-      CliFunctionResult result = new CliFunctionResult(memberId, th, null);
-      context.getResultSender().lastResult(result);
-    }
-  }
-
-  @Override
-  public String getId() {
-    return ID;
-  }
-  
-  protected Cache getCache() {
-    return CacheFactory.getAnyInstance();
-  }
-  
-  protected DistributedMember getDistributedMember(Cache cache){
-    return ((InternalCache)cache).getMyId();
-  }
-  
-  protected XmlEntity getXMLEntity(String storeName){
-    return new XmlEntity(CacheXml.HDFS_STORE, "name", storeName);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunction.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunction.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunction.java
deleted file mode 100644
index fb947ae..0000000
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunction.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * more patents listed at http://www.pivotal.io/patents.
- */
-
-package com.gemstone.gemfire.management.internal.cli.functions;
-
-import java.io.Serializable;
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.logging.log4j.Logger;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.execute.FunctionAdapter;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.InternalEntity;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.InternalCache;
-import com.gemstone.gemfire.internal.logging.LogService;
-
-/**
- * Function used by the 'list hdfs-stores' gfsh command to determine all the
- * Hdfs stores that exist for the entire cache, distributed across the GemFire distributed system.
- * on each member.
- * 
- * @author Namrata Thanvi
- */
-
-public class ListHDFSStoresFunction extends FunctionAdapter implements InternalEntity {
-
-  private static final long serialVersionUID = 1L;
-
-  private static final String ID = ListHDFSStoresFunction.class.getName();
-
-  private static final Logger logger = LogService.getLogger();
-
-  protected Cache getCache() {
-    return CacheFactory.getAnyInstance();
-  }
-  
-  protected DistributedMember getDistributedMemberId(Cache cache){
-    return ((InternalCache)cache).getMyId();
-  }
-  
-  public void execute(final FunctionContext context) {
-    Set<HdfsStoreDetails>  hdfsStores = new HashSet<HdfsStoreDetails>();
-    try {
-      final Cache cache = getCache();     
-      if (cache instanceof GemFireCacheImpl) {    
-        final GemFireCacheImpl gemfireCache = (GemFireCacheImpl)cache;
-        final DistributedMember member = getDistributedMemberId(cache);        
-        for (final HDFSStore store : gemfireCache.getHDFSStores()) {  
-          hdfsStores.add(new HdfsStoreDetails (store.getName() , member.getId() , member.getName()));      
-        }             
-      }
-      context.getResultSender().lastResult(hdfsStores);
-    } catch (Exception e) {
-      context.getResultSender().sendException(e);
-    }
-  } 
-  
-  @Override
-  public String getId() {
-    return ID;
-  }
-
-  
-  public static class HdfsStoreDetails implements Serializable {
-    private static final long serialVersionUID = 1L;
-    private String storeName;
-    private String memberId, memberName;
-    
-    public HdfsStoreDetails(String storeName, String memberId, String memberName) {
-      super();
-      this.storeName = storeName;
-      this.memberId = memberId;
-      this.memberName = memberName;
-    }
-    
-    public String getStoreName() {
-      return storeName;
-    }
-   
-    public String getMemberId() {
-      return memberId;
-    }
-   
-    public String getMemberName() {
-      return memberName;
-    }
-
-}
-}
-
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs.java
index bd5e196..f7b2b4d 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs.java
@@ -70,48 +70,9 @@ public class RegionFunctionArgs implements Serializable {
   private final boolean isSetCompressor;
   private Boolean offHeap;
   private final boolean isSetOffHeap;
-  private String hdfsStoreName;
-  private Boolean isSetHdfsWriteOnly = false;
-  private Boolean hdfsWriteOnly;
-
   private RegionAttributes<?, ?> regionAttributes;
 
   public RegionFunctionArgs(String regionPath,
-	      RegionShortcut regionShortcut, String useAttributesFrom,
-	      boolean skipIfExists, String keyConstraint, String valueConstraint,
-	      Boolean statisticsEnabled, 
-	      RegionFunctionArgs.ExpirationAttrs entryExpirationIdleTime, 
-	      RegionFunctionArgs.ExpirationAttrs entryExpirationTTL, 
-	      RegionFunctionArgs.ExpirationAttrs regionExpirationIdleTime, 
-	      RegionFunctionArgs.ExpirationAttrs regionExpirationTTL, String diskStore,
-	      Boolean diskSynchronous, Boolean enableAsyncConflation,
-	      Boolean enableSubscriptionConflation, String[] cacheListeners,
-	      String cacheLoader, String cacheWriter, String[] asyncEventQueueIds,
-	      String[] gatewaySenderIds, Boolean concurrencyChecksEnabled,
-	      Boolean cloningEnabled, Integer concurrencyLevel, String prColocatedWith,
-	      Integer prLocalMaxMemory, Long prRecoveryDelay,
-	      Integer prRedundantCopies, Long prStartupRecoveryDelay,
-	      Long prTotalMaxMemory, Integer prTotalNumBuckets, Integer evictionMax,
-	      String compressor, Boolean offHeap , String hdfsStoreName , Boolean hdfsWriteOnly) {	
-		this(regionPath, regionShortcut, useAttributesFrom, skipIfExists,
-				keyConstraint, valueConstraint, statisticsEnabled,
-				entryExpirationIdleTime, entryExpirationTTL,
-				regionExpirationIdleTime, regionExpirationTTL, diskStore,
-				diskSynchronous, enableAsyncConflation,
-				enableSubscriptionConflation, cacheListeners, cacheLoader,
-				cacheWriter, asyncEventQueueIds, gatewaySenderIds,
-				concurrencyChecksEnabled, cloningEnabled, concurrencyLevel,
-				prColocatedWith, prLocalMaxMemory, prRecoveryDelay,
-				prRedundantCopies, prStartupRecoveryDelay, prTotalMaxMemory,
-				prTotalNumBuckets, evictionMax, compressor, offHeap);	
-		this.isSetHdfsWriteOnly = hdfsWriteOnly != null;
-		if (isSetHdfsWriteOnly) {
-			this.hdfsWriteOnly = hdfsWriteOnly;
-		}
-		if (hdfsStoreName != null )
-		  this.hdfsStoreName = hdfsStoreName;
-  }
-  public RegionFunctionArgs(String regionPath,
       RegionShortcut regionShortcut, String useAttributesFrom,
       boolean skipIfExists, String keyConstraint, String valueConstraint,
       Boolean statisticsEnabled, 
@@ -219,8 +180,7 @@ public class RegionFunctionArgs implements Serializable {
       Integer prLocalMaxMemory, Long prRecoveryDelay,
       Integer prRedundantCopies, Long prStartupRecoveryDelay,
       Long prTotalMaxMemory, Integer prTotalNumBuckets, 
-      Boolean offHeap, String hdfsStoreName , Boolean hdfsWriteOnly , 
-      RegionAttributes<?, ?> regionAttributes) {   
+      Boolean offHeap, RegionAttributes<?, ?> regionAttributes) {   
     this(regionPath, null, useAttributesFrom, skipIfExists, keyConstraint,
         valueConstraint, statisticsEnabled, entryExpirationIdleTime,
         entryExpirationTTL, regionExpirationIdleTime, regionExpirationTTL,
@@ -230,7 +190,7 @@ public class RegionFunctionArgs implements Serializable {
         concurrencyChecksEnabled, cloningEnabled, concurrencyLevel, 
         prColocatedWith, prLocalMaxMemory, prRecoveryDelay,
         prRedundantCopies, prStartupRecoveryDelay,
-        prTotalMaxMemory, prTotalNumBuckets, null, null, offHeap , hdfsStoreName , hdfsWriteOnly);
+        prTotalMaxMemory, prTotalNumBuckets, null, null, offHeap);
     this.regionAttributes = regionAttributes;
   }
 
@@ -277,28 +237,6 @@ public class RegionFunctionArgs implements Serializable {
   }  
 
   /**
-   * @return the hdfsStoreName
-   */
-  public String getHDFSStoreName() {
-    return this.hdfsStoreName;
-  }  
-
-  /**
-   * @return the hdfsWriteOnly
-   */
-  public Boolean getHDFSWriteOnly() {
-    return this.hdfsWriteOnly;
-  }
-  
-  /**
-   * @return the isSetHDFSWriteOnly
-   */
-  public Boolean isSetHDFSWriteOnly() {
-    return this.isSetHdfsWriteOnly;
-  }
-  
-  
-  /**
    * @return the valueConstraint
    */
   public String getValueConstraint() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/i18n/CliStrings.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/i18n/CliStrings.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/i18n/CliStrings.java
index a4561bf..5ae8e82 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/i18n/CliStrings.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/i18n/CliStrings.java
@@ -11,10 +11,7 @@ package com.gemstone.gemfire.management.internal.cli.i18n;
 import java.text.MessageFormat;
 
 import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueueFactory;
 import com.gemstone.gemfire.cache.server.CacheServer;
-import com.gemstone.gemfire.cache.wan.GatewayEventFilter;
-import com.gemstone.gemfire.cache.wan.GatewayEventSubstitutionFilter;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.SharedConfiguration;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
@@ -104,8 +101,6 @@ public class CliStrings {
   public static final String TOPIC_SHARED_CONFIGURATION = "Cluster Configuration";
   public static final String TOPIC_SHARED_CONFIGURATION_HELP = "Configuration for cluster and various groups. It consists of cache.xml, gemfire properties and deployed jars.\nChanges due to gfshs command are persisted to the locator hosting the cluster configuration service.";
   public static final String TOPIC_CHANGELOGLEVEL = "User can change the log-level for a  memeber run time and generate log contents as per the need";
-  public static final String TOPIC_GEMFIRE_HDFSSTORE = "Hdfs Store";
-  public static final String TOPIC_GEMFIRE_HDFSSTORE__DESC = "Hdfs stores are used to persist data to hadoop distributed file system as a backup to your in-memory copy or as overflow storage when eviction criteria is specified.";
 
   /*-*************************************************************************
    * ********* String Constants other than command name, options & help ******
@@ -680,114 +675,7 @@ public class CliStrings {
 
   public static final String CREATE_REGION__OFF_HEAP = "off-heap";
   public static final String CREATE_REGION__OFF_HEAP__HELP = "Causes the values of the region to be stored in off-heap memory. The default is on heap.";
-  public static final String CREATE_REGION__HDFSSTORE_NAME = "hdfs-store";
-  public static final String CREATE_REGION__HDFSSTORE_NAME__HELP = "HDFS Store to be used by this region. \"list hdfs-store\" can be used to display existing HDFSStores.";
-  public static final String CREATE_REGION__HDFSSTORE_WRITEONLY = "hdfs-write-only";
-  public static final String CREATE_REGION__HDFSSTORE_WRITEONLY__HELP = "HDFS write-only mode will be used. All data will be persisted in the HDFS store, and user can access the stored data only through the MapReduce API";
-  /* hdfsstore commands  */  
-  public static final String CREATE_HDFS_STORE ="create hdfs-store";
-  public static final String CREATE_HDFS_STORE__HELP = "Create a hdfsstore and persist region data on the specified hadoop cluster.";
-  public static final String CREATE_HDFS_STORE__NAME = "name";
-  public static final String CREATE_HDFS_STORE__NAME__HELP = "Name of the store.";
-  public static final String CREATE_HDFS_STORE__NAMENODE = "namenode";
-  public static final String CREATE_HDFS_STORE__NAMENODE__HELP = "The URL of the Hadoop NameNode for your HD cluster.HDFSStore persists data on a HDFS cluster identified by cluster's NameNode URL or NameNode Service URL.NameNode URL can also be provided via hdfs-site.xml";
-  public static final String CREATE_HDFS_STORE__HOMEDIR = "home-dir";
-  public static final String CREATE_HDFS_STORE__HOMEDIR__HELP ="The HDFS directory path in which HDFSStore stores files. The value must not contain the NameNode URL";
-  public static final String CREATE_HDFS_STORE__READCACHESIZE= "read-cache-size";
-  public static final String CREATE_HDFS_STORE__READCACHESIZE__HELP ="The maximum amount of memory in megabytes used by HDFSStore read cache.";  
-  public static final String CREATE_HDFS_STORE__BATCHSIZE = "batch-size";
-  public static final String CREATE_HDFS_STORE__BATCHSIZE__HELP ="HDFSStore buffer data is persisted on HDFS in batches, and the BatchSize defines the maximum size (in megabytes) of each batch that is written to HDFS.";
-  public static final String CREATE_HDFS_STORE__BATCHINTERVAL = "batch-interval";
-  public static final String CREATE_HDFS_STORE__BATCHINTERVAL__HELP ="It defines the maximum time that can elapse between writing batches to HDFS. ";
-  public static final String CREATE_HDFS_STORE__MAXMEMORY = "max-memory";
-  public static final String CREATE_HDFS_STORE__MAXMEMORY__HELP ="The maximum amount of memory in megabytes used by HDFSStore";
-  public static final String CREATE_HDFS_STORE__DISPATCHERTHREADS = "dispatcher-threads";
-  public static final String CREATE_HDFS_STORE__DISPATCHERTHREADS__HELP ="The maximum number of threads (per region) used to write batches of HDFS.";
-  public static final String CREATE_HDFS_STORE__BUFFERPERSISTENT = "buffer-persistent";
-  public static final String CREATE_HDFS_STORE__BUFFERPERSISTENT__HELP ="Configure if HDFSStore in-memory buffer data, that has not been persisted on HDFS yet, should be persisted to a local disk to buffer prevent data loss";
-  public static final String CREATE_HDFS_STORE__SYNCDISKWRITE = "synchronous-disk-write";
-  public static final String CREATE_HDFS_STORE__SYNCDISKWRITE__HELP ="Enable or disable synchronous writes to the local DiskStore.";
-  public static final String CREATE_HDFS_STORE__DISKSTORENAME = "disk-store-name";
-  public static final String CREATE_HDFS_STORE__DISKSTORENAME__HELP ="The named DiskStore to use for any local disk persistence needs of HDFSStore.";
-  public static final String CREATE_HDFS_STORE__MINORCOMPACT= "minor-compact";
-  public static final String CREATE_HDFS_STORE__MINORCOMPACT__HELP ="Minor compaction reorganizes data in files to optimize read performance and reduce number of files created on HDFS.";
-  
-  public static final String CREATE_HDFS_STORE__MINORCOMPACTIONTHREADS = "minor-compaction-threads";
-  public static final String CREATE_HDFS_STORE__MINORCOMPACTIONTHREADS__HELP ="The maximum number of threads that GemFire uses to perform minor compaction in this HDFS store.";
-  public static final String CREATE_HDFS_STORE__MAJORCOMPACT= "major-compact";
-  public static final String CREATE_HDFS_STORE__MAJORCOMPACT__HELP ="Major compaction removes old values of a key and deleted records from the HDFS files.";
-  public static final String CREATE_HDFS_STORE__MAJORCOMPACTINTERVAL= "major-compaction-interval";
-  public static final String CREATE_HDFS_STORE__MAJORCOMPACTINTERVAL__HELP ="Interval Between two major compactions.";
-  public static final String CREATE_HDFS_STORE__MAJORCOMPACTIONTHREADS = "major-compaction-threads";
-  public static final String CREATE_HDFS_STORE__MAJORCOMPACTIONTHREADS__HELP ="The maximum number of threads that GemFire uses to perform major compaction in this HDFS store.";
-  public static final String CREATE_HDFS_STORE__PURGEINTERVAL = "purge-interval";
-  public static final String CREATE_HDFS_STORE__PURGEINTERVAL__HELP ="PurgeInterval defines the amount of time old files remain available for MapReduce jobs. After this interval has passed, old files are deleted.";
-  public static final String CREATE_HDFS_STORE__WRITEONLYFILESIZE = "max-write-only-file-size";
-  public static final String CREATE_HDFS_STORE__WRITEONLYFILESIZE__HELP ="For HDFS write-only regions, this defines the maximum size (in megabytes) that an HDFS log file can reach before HDFSStore closes the file and begins writing to a new file.";
-  public static final String CREATE_HDFS_STORE__FILEROLLOVERINTERVAL = "write-only-file-rollover-interval";
-  public static final String CREATE_HDFS_STORE__FILEROLLOVERINTERVAL__HELP ="For HDFS write-only regions, this defines the maximum time that can elapse before HDFSStore closes an HDFS file and begins writing to a new file.";  
-  public static final String CREATE_HDFS_STORE__CLIENTCONFIGFILE = "client-config-files";
-  public static final String CREATE_HDFS_STORE__CLIENTCONFIGFILE__HELP ="The full path to the HDFS client configuration file that the store uses.The full path to the HDFS client configuration files, for e.g. hdfs-site.xml and core-site.xml. These files must be accessible to any node where an instance of this HDFSStore will be created";
-  public static final String CREATE_HDFS_STORE__ERROR_WHILE_CREATING_REASON_0 = "An error occurred while creating the hdfs store: \"{0}\"";
-  public static final String CREATE_HDFS_STORE__GROUP = "group";
-  public static final String CREATE_HDFS_STORE__GROUP__HELP = "Group(s) of members on which the hdfs store will be created. If no group is specified the hdfs store will be created on all members.";
-    
-  /*HDFS describe command*/
-  public static final String DESCRIBE_HDFS_STORE = "describe hdfs-store";
-  public static final String DESCRIBE_HDFS_STORE__HELP = "Display information about a hdfs store.";
-  public static final String DESCRIBE_HDFS_STORE__NAME = "name";
-  public static final String DESCRIBE_HDFS_STORE__NAME__HELP = "name of the hdfs store";
-  public static final String DESCRIBE_HDFS_STORE__MEMBER = "member";
-  public static final String DESCRIBE_HDFS_STORE__MEMBER__HELP = "Name/Id of the member with the hdfs store to be described.";
-  public static final String DESCRIBE_HDFS_STORE__ERROR_MESSAGE = "An error occurred while getting information about the hdfs store: \"{0}\"";
-  
-  /*HDFS list command*/
-  public static final String LIST_HDFS_STORE = "list hdfs-stores";
-  public static final String LIST_HDFS_STORE__HELP = "Display hdfs stores for all members.";
-  public static final String LIST_HDFS_STORE__NAME__HELP = "name of the hdfs store";
-  public static final String LIST_HDFS_STORE__ERROR_MESSAGE = "An error occurred while collecting Hdfs Store information for all members across the GemFire cluster: %1$s";
-  public static final String LIST_HDFS_STORE__HDFS_STORES_NOT_FOUND_MESSAGE = "No Hdfs Stores Found";
-  
-  
-  /* 'destroy hdfs-store' command */
-  public static final String DESTROY_HDFS_STORE = "destroy hdfs-store";
-  public static final String DESTROY_HDFS_STORE__HELP = "Destroy a hdfs store";
-  public static final String DESTROY_HDFS_STORE__NAME = "name";
-  public static final String DESTROY_HDFS_STORE__NAME__HELP = "Name of the hdfs store that will be destroyed.";
-  public static final String DESTROY_HDFS_STORE__GROUP = "group";
-  public static final String DESTROY_HDFS_STORE__GROUP__HELP = "Group(s) of members on which the hdfs store will be destroyed. If no group is specified the hdfs store will be destroyed on all members.";
-  public static final String DESTROY_HDFS_STORE__ERROR_WHILE_DESTROYING_REASON_0 = "An error occurred while destroying the hdfs store: \"{0}\"";
 
-  
-  /* 'alter hdfs-store' command */
-  public static final String ALTER_HDFS_STORE = "alter hdfs-store";
-  public static final String ALTER_HDFS_STORE__HELP = "Alter a hdfs store";
-  public static final String ALTER_HDFS_STORE__NAME = "name";
-  public static final String ALTER_HDFS_STORE__NAME__HELP = "Name of the hdfs store that will be Altered.";
-  public static final String ALTER_HDFS_STORE__GROUP = "group";  
-  public static final String ALTER_HDFS_STORE__GROUP__HELP = "Group(s) of members on which the hdfs store will be altered. If no group is specified the hdfs store will be altered on all members.";
-  public static final String ALTER_HDFS_STORE__ERROR_WHILE_ALTERING_REASON_0 = "An error occurred while altering the hdfs store: \"{0}\"";
-  public static final String ALTER_HDFS_STORE__BATCHSIZE = "batch-size";
-  public static final String ALTER_HDFS_STORE__BATCHSIZE__HELP ="HDFSStore buffer data is persisted on HDFS in batches, and the BatchSize defines the maximum size (in megabytes) of each batch that is written to HDFS.";
-  public static final String ALTER_HDFS_STORE__BATCHINTERVAL = "batch-interval";
-  public static final String ALTER_HDFS_STORE__BATCHINTERVAL__HELP ="It defines the maximum time that can elapse between writing batches to HDFS. ";
-  public static final String ALTER_HDFS_STORE__MINORCOMPACT= "minor-compact";
-  public static final String ALTER_HDFS_STORE__MINORCOMPACT__HELP ="Minor compaction reorganizes data in files to optimize read performance and reduce number of files created on HDFS.";  
-  public static final String ALTER_HDFS_STORE__MINORCOMPACTIONTHREADS = "minor-compaction-threads";
-  public static final String ALTER_HDFS_STORE__MINORCOMPACTIONTHREADS__HELP ="The maximum number of threads that GemFire uses to perform minor compaction in this HDFS store.";
-  public static final String ALTER_HDFS_STORE__MAJORCOMPACT= "major-compact";
-  public static final String ALTER_HDFS_STORE__MAJORCOMPACT__HELP ="Major compaction removes old values of a key and deleted records from the HDFS files.";
-  public static final String ALTER_HDFS_STORE__MAJORCOMPACTINTERVAL= "major-compaction-interval";
-  public static final String ALTER_HDFS_STORE__MAJORCOMPACTINTERVAL__HELP ="Interval Between two major compactions.";
-  public static final String ALTER_HDFS_STORE__MAJORCOMPACTIONTHREADS = "major-compaction-threads";
-  public static final String ALTER_HDFS_STORE__MAJORCOMPACTIONTHREADS__HELP ="The maximum number of threads that GemFire uses to perform major compaction in this HDFS store.";
-  public static final String ALTER_HDFS_STORE__PURGEINTERVAL = "purge-interval";
-  public static final String ALTER_HDFS_STORE__PURGEINTERVAL__HELP ="PurgeInterval defines the amount of time old files remain available for MapReduce jobs. After this interval has passed, old files are deleted.";
-  public static final String ALTER_HDFS_STORE__FILEROLLOVERINTERVAL = "write-only-file-rollover-interval";
-  public static final String ALTER_HDFS_STORE__FILEROLLOVERINTERVAL__HELP = "For HDFS write-only regions, this defines the maximum time that can elapse before HDFSStore closes an HDFS file and begins writing to a new file.";  
-  public static final String ALTER_HDFS_STORE__WRITEONLYFILESIZE = "max-write-only-file-size";
-  public static final String ALTER_HDFS_STORE__WRITEONLYFILESIZE__HELP ="For HDFS write-only regions, this defines the maximum size (in megabytes) that an HDFS log file can reach before HDFSStore closes the file and begins writing to a new file.";
-  
   /* debug command */
   public static final String DEBUG = "debug";
   public static final String DEBUG__HELP = "Enable/Disable debugging output in GFSH.";



[34/50] [abbrv] incubator-geode git commit: GEODE-468: Using 1.8 generated files for AnalyzeSerializablesJUnitTest

Posted by ds...@apache.org.
GEODE-468: Using 1.8 generated files for AnalyzeSerializablesJUnitTest

This test was passing every time with 1.8 because of a short circuit in
the test. I've regenerated the files with 1.8 and updated the test to
use junit Assume to ignore the test with the wrong JDK - at least it
will get reported as ignored.

The test was failing due to HDFS code removal. The newly generated
files also have the HDFS fields removed.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/37f77a90
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/37f77a90
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/37f77a90

Branch: refs/heads/develop
Commit: 37f77a90a8567c1e4612c01dbffa6ce659a83eb2
Parents: 593d176
Author: Dan Smith <up...@apache.org>
Authored: Thu Oct 22 13:09:49 2015 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Thu Oct 22 13:12:59 2015 -0700

----------------------------------------------------------------------
 .../AnalyzeSerializablesJUnitTest.java          |   26 +-
 .../sanctionedDataSerializables.txt             | 1755 ++++++++----------
 .../codeAnalysis/sanctionedSerializables.txt    |   27 +-
 3 files changed, 797 insertions(+), 1011 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/37f77a90/gemfire-core/src/test/java/com/gemstone/gemfire/codeAnalysis/AnalyzeSerializablesJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/codeAnalysis/AnalyzeSerializablesJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/codeAnalysis/AnalyzeSerializablesJUnitTest.java
index 7cd0ca7..2f02de4 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/codeAnalysis/AnalyzeSerializablesJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/codeAnalysis/AnalyzeSerializablesJUnitTest.java
@@ -20,8 +20,10 @@ import java.util.List;
 import java.util.Map;
 
 import org.junit.AfterClass;
+import org.junit.Assume;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -40,7 +42,6 @@ import com.gemstone.gemfire.util.test.TestUtil;
 public class AnalyzeSerializablesJUnitTest {
   /** all loaded classes */
   protected static Map<String, CompiledClass> classes = new HashMap<String, CompiledClass>();
-  protected static boolean DISABLED = true;
   private static boolean ClassesNotFound;
   
   public AnalyzeSerializablesJUnitTest() {
@@ -48,17 +49,15 @@ public class AnalyzeSerializablesJUnitTest {
   
   @Before
   public void loadClasses() throws Exception {
-    if (classes.size() > 0) {
-      return;
-    }
-    System.out.println("loadClasses starting");
     String version = System.getProperty("java.runtime.version");
-    if (version == null || !version.startsWith("1.7")) {
+    boolean jdk17 = version != null && version.startsWith("1.8");
       // sanctioned info is based on a 1.7 compiler
-      System.out.println("AnalyzeSerializables requires a Java 7 but tests are running with v"+version);
-      DISABLED=true;
+    Assume.assumeTrue("AnalyzeSerializables requires a Java 7 but tests are running with v"+version, jdk17);
+    if (classes.size() > 0) {
       return;
     }
+    System.out.println("loadClasses starting");
+    
     List<String> excludedClasses = loadExcludedClasses(new File(TestUtil.getResourcePath(AnalyzeSerializablesJUnitTest.class, "excludedClasses.txt")));
     List<String> openBugs = loadOpenBugs(new File(TestUtil.getResourcePath(AnalyzeSerializablesJUnitTest.class, "openBugs.txt")));
     excludedClasses.addAll(openBugs);
@@ -94,7 +93,6 @@ public class AnalyzeSerializablesJUnitTest {
     else {
       fail("unable to find geode classes");
     }
-    DISABLED = false;
   }
   
   @AfterClass
@@ -129,7 +127,6 @@ public class AnalyzeSerializablesJUnitTest {
     else {
       fail("unable to find jgroups jar");
     }
-    DISABLED = false;
   }
   
   protected static List<String> loadExcludedClasses(File exclusionsFile) throws Exception {
@@ -162,7 +159,6 @@ public class AnalyzeSerializablesJUnitTest {
         if (line.length() > 0 && !line.startsWith("#")) {
           String[] split = line.split(",");
           if (split.length != 2) {
-            DISABLED = true; // don't run the other tests
             fail("unable to load classes due to misformatted line in openBugs.txt: " + line);
           }
           excludedClasses.add(line.split(",")[1].trim());
@@ -189,10 +185,6 @@ public class AnalyzeSerializablesJUnitTest {
       System.out.println("... test not run due to not being able to locate product class files");
       return;
     }
-    if (DISABLED) {
-      System.out.println("... test is disabled");
-      return;
-    }
     String compareToFileName = TestUtil.getResourcePath(getClass(), "sanctionedDataSerializables.txt");
 
     String storeInFileName = "actualDataSerializables.dat";
@@ -233,10 +225,6 @@ public class AnalyzeSerializablesJUnitTest {
       System.out.println("... test not run due to not being able to locate product class files");
       return;
     }
-    if (DISABLED) {
-      System.out.println("... test is disabled");
-      return;
-    }
     String compareToFileName = TestUtil.getResourcePath(getClass(), "sanctionedSerializables.txt");
     File compareToFile = new File(compareToFileName);
 


[10/50] [abbrv] incubator-geode git commit: GEODE-429: Remove RegionFactory.setHdfsStore

Posted by ds...@apache.org.
GEODE-429: Remove RegionFactory.setHdfsStore


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/7bcc1e44
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/7bcc1e44
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/7bcc1e44

Branch: refs/heads/develop
Commit: 7bcc1e44cb7f0f69381c06d583b058926ca85331
Parents: b3f838e
Author: Ashvin Agrawal <as...@apache.org>
Authored: Mon Oct 19 13:41:31 2015 -0700
Committer: Ashvin Agrawal <as...@apache.org>
Committed: Wed Oct 21 08:55:22 2015 -0700

----------------------------------------------------------------------
 .../gemstone/gemfire/cache/RegionFactory.java   | 25 --------------------
 .../cli/functions/RegionCreateFunction.java     |  8 -------
 .../hdfs/internal/HDFSConfigJUnitTest.java      | 16 ++++++++-----
 .../hdfs/internal/HDFSEntriesSetJUnitTest.java  |  3 ++-
 .../internal/hoplog/BaseHoplogTestCase.java     |  2 +-
 .../HdfsSortedOplogOrganizerJUnitTest.java      |  2 +-
 ...FSQueueRegionOperationsOffHeapJUnitTest.java |  2 +-
 .../cache/HDFSRegionOperationsJUnitTest.java    |  4 ++--
 .../HDFSRegionOperationsOffHeapJUnitTest.java   |  2 +-
 .../HDFSRegionMBeanAttributeJUnitTest.java      |  2 +-
 10 files changed, 19 insertions(+), 47 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7bcc1e44/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java
index 40041cb..72a0a44 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java
@@ -902,31 +902,6 @@ public class RegionFactory<K,V>
     this.attrsFactory.addAsyncEventQueueId(asyncEventQueueId);
     return this;
   }
-  /**
-   * Sets the HDFSStore name attribute.
-   * This causes the region to belong to the HDFSStore.
-   * @param name the name of the hdfsstore
-   * @return a reference to this RegionFactory object
-   * 
-   * @see AttributesFactory#setHDFSStoreName
-   * @since 9.0
-   */
-  public RegionFactory<K,V> setHDFSStoreName(String name) {
-    this.attrsFactory.setHDFSStoreName(name);
-    return this;
-  }
-  
-  /**
-   * Sets the HDFS write only attribute. if the region
-   * is configured to be write only to HDFS, events that have 
-   * been evicted from memory cannot be read back from HDFS.
-   * Events are written to HDFS in the order in which they occurred.
-   * @since 9.0
-   */
-  public RegionFactory<K,V> setHDFSWriteOnly(boolean writeOnly) {
-    this.attrsFactory.setHDFSWriteOnly(writeOnly);
-    return this;
-  }
 
   /**
    * Set the compressor to be used by this region for compressing

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7bcc1e44/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionCreateFunction.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionCreateFunction.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionCreateFunction.java
index 74afc47..3bf8b3f 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionCreateFunction.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionCreateFunction.java
@@ -309,14 +309,6 @@ public class RegionCreateFunction extends FunctionAdapter implements InternalEnt
     
     String regionName = regionPathData.getName();
     
-    final String hdfsStoreName = regionCreateArgs.getHDFSStoreName();
-	if (hdfsStoreName != null && !hdfsStoreName.isEmpty()) {
-		factory.setHDFSStoreName(hdfsStoreName);		
-	}
-	if (regionCreateArgs.isSetHDFSWriteOnly()) {
-		factory.setHDFSWriteOnly(regionCreateArgs.getHDFSWriteOnly());
-	}
-	  
     if (parentRegion != null) {
       createdRegion = factory.createSubregion(parentRegion, regionName);
     } else {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7bcc1e44/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
index b0c6520..26e6c73 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
@@ -72,7 +72,8 @@ public class HDFSConfigJUnitTest extends TestCase {
         HDFSStoreFactory hsf = this.c.createHDFSStoreFactory();
         HDFSStore store = hsf.create("myHDFSStore");
         RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
-        Region r1 = rf1.setHDFSStoreName("myHDFSStore").create("r1");
+//        rf1.setHDFSStoreName("myHDFSStore");
+        Region r1 = rf1.create("r1");
        
         r1.put("k1", "v1");
         
@@ -89,8 +90,9 @@ public class HDFSConfigJUnitTest extends TestCase {
         hsf = this.c.createHDFSStoreFactory();
         hsf.create("myHDFSStore");
         
-        r1 = this.c.createRegionFactory(RegionShortcut.PARTITION).setHDFSStoreName("myHDFSStore")
-              .create("r1");
+        RegionFactory<Object, Object> rf = this.c.createRegionFactory(RegionShortcut.PARTITION);
+//        rf.setHDFSStoreName("myHDFSStore");
+        r1 = rf.create("r1");
        
         r1.put("k1", "v1");
         assertTrue("Mismatch in attributes, actual.batchsize: " + store.getBatchSize() + " and expected batchsize: 32", store.getBatchSize()== 32);
@@ -126,8 +128,9 @@ public class HDFSConfigJUnitTest extends TestCase {
         hsf.create("myHDFSStore");
         
         
-        r1 = this.c.createRegionFactory(RegionShortcut.PARTITION).setHDFSStoreName("myHDFSStore")
-            .setHDFSWriteOnly(true).create("r1");
+        rf = this.c.createRegionFactory(RegionShortcut.PARTITION);
+//        rf.setHDFSStoreName("myHDFSStore").setHDFSWriteOnly(true);
+        r1 = rf.create("r1");
        
         r1.put("k1", "v1");
         store = c.findHDFSStore(r1.getAttributes().getHDFSStoreName());
@@ -469,7 +472,8 @@ public class HDFSConfigJUnitTest extends TestCase {
       HDFSStoreImpl store = (HDFSStoreImpl) hsf.create("myHDFSStore");
       RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
       //Create a region that evicts everything
-      LocalRegion r1 = (LocalRegion) rf1.setHDFSStoreName("myHDFSStore").setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(1)).create("r1");
+//      rf1.setHDFSStoreName("myHDFSStore");
+      LocalRegion r1 = (LocalRegion) rf1.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(1)).create("r1");
      
       //Populate about many times our block cache size worth of data
       //We want to try to cache at least 5 blocks worth of index and metadata

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7bcc1e44/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
index f864176..3085a66 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
@@ -73,7 +73,8 @@ public class HDFSEntriesSetJUnitTest extends TestCase {
     paf.setTotalNumBuckets(1);
     
     RegionFactory rf = cache.createRegionFactory(RegionShortcut.PARTITION);
-    region = (PartitionedRegion) rf.setHDFSStoreName("test").setPartitionAttributes(paf.create()).create("test");
+//    rf.setHDFSStoreName("test");
+    region = (PartitionedRegion) rf.setPartitionAttributes(paf.create()).create("test");
     
     // prime the region so buckets get created
     region.put("test", "test");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7bcc1e44/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
index b35f756..07d9f77 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
@@ -90,7 +90,7 @@ public abstract class BaseHoplogTestCase extends TestCase {
     hdfsStore = (HDFSStoreImpl) hsf.create(HDFS_STORE_NAME);
 
     regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION);
-    regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
+//    regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
     region = regionfactory.create(getName());
     
     // disable compaction by default and clear existing queues

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7bcc1e44/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java
index 4529067..e6a1229 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java
@@ -744,7 +744,7 @@ public class HdfsSortedOplogOrganizerJUnitTest extends BaseHoplogTestCase {
     }
       
     // create region with store
-    regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
+//    regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
     Region<Object, Object> region1 = regionfactory.create("region-1");
     ExpectedException ex = DistributedTestCase.addExpectedException("CorruptHFileException");
     try {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7bcc1e44/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
index 4565568..24cd1dc 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
@@ -37,7 +37,7 @@ public class HDFSQueueRegionOperationsOffHeapJUnitTest extends HDFSQueueRegionOp
     PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
     rf.setPartitionAttributes(prAttr);
     rf.setOffHeap(true);
-    rf.setHDFSStoreName(hdfsStore.getName());
+//    rf.setHDFSStoreName(hdfsStore.getName());
     Region<Integer, String> r = rf.create(regionName);
 //    addListener(r);
     

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7bcc1e44/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
index b24ee5d..d96e31b 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
@@ -108,7 +108,7 @@ public class HDFSRegionOperationsJUnitTest extends TestCase {
     RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION);
     PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
     rf.setPartitionAttributes(prAttr);
-    rf.setHDFSStoreName(hdfsStore.getName());
+//    rf.setHDFSStoreName(hdfsStore.getName());
     Region<Integer, String> r = rf.create(regionName);
     
     ((PartitionedRegion) r).setQueryHDFS(true);
@@ -265,7 +265,7 @@ public class HDFSRegionOperationsJUnitTest extends TestCase {
 
   public void test050LRURegionAttributesForPR() {
     RegionFactory<Integer, String> rf = cache.createRegionFactory();
-    rf.setHDFSStoreName(hdfsStore.getName());
+//    rf.setHDFSStoreName(hdfsStore.getName());
     rf.setDataPolicy(DataPolicy.HDFS_PARTITION);
     verifyLRURegionAttributesForPR(rf.create(getName()));
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7bcc1e44/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
index f9c96a2..de2aae3 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
@@ -59,7 +59,7 @@ public class HDFSRegionOperationsOffHeapJUnitTest extends HDFSRegionOperationsJU
     PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
     rf.setPartitionAttributes(prAttr);
     rf.setOffHeap(true);
-    rf.setHDFSStoreName(hdfsStore.getName());
+//    rf.setHDFSStoreName(hdfsStore.getName());
     Region<Integer, String> r = rf.create(regionName);
 //    addListener(r);
     

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7bcc1e44/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
index c563d5a..14b61e6 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
@@ -77,7 +77,7 @@ public class HDFSRegionMBeanAttributeJUnitTest extends TestCase {
     hdfsStore = (HDFSStoreImpl) hsf.create(HDFS_STORE_NAME);
 
     RegionFactory<Object, Object> regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION);
-    regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
+//    regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
 
     // regionfactory.setCompressionCodec("Some");
     PartitionAttributesFactory fac = new PartitionAttributesFactory();


[45/50] [abbrv] incubator-geode git commit: [GEODE-480] ConcurrentIndexUpdateWithoutWLDUnitTest.testRangeIndex Stat in abstractIndex should be atomic or in a synchronized block

Posted by ds...@apache.org.
[GEODE-480] ConcurrentIndexUpdateWithoutWLDUnitTest.testRangeIndex
Stat in abstractIndex should be atomic or in a synchronized block


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/f437106e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/f437106e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/f437106e

Branch: refs/heads/develop
Commit: f437106ecde4414b04d9e21e0af4df41812b82b6
Parents: f0bd8b0
Author: Jason Huynh <jh...@pivotal.io>
Authored: Fri Oct 23 12:40:54 2015 -0700
Committer: Jason Huynh <jh...@pivotal.io>
Committed: Fri Oct 23 12:40:54 2015 -0700

----------------------------------------------------------------------
 .../query/internal/index/AbstractIndex.java     | 23 ++++++++++----------
 1 file changed, 12 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f437106e/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/index/AbstractIndex.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/index/AbstractIndex.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/index/AbstractIndex.java
index 6c0d0bf..aca4078 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/index/AbstractIndex.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/index/AbstractIndex.java
@@ -17,6 +17,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
@@ -1744,7 +1745,7 @@ public abstract class AbstractIndex implements IndexProtocol
   {
     protected Map map;
     private boolean useList;
-    private volatile int numValues = 0;
+    private AtomicInteger numValues = new AtomicInteger(0);
 
     RegionEntryToValuesMap(boolean useList) {
       this.map = new ConcurrentHashMap(2, 0.75f, 1);
@@ -1793,7 +1794,7 @@ public abstract class AbstractIndex implements IndexProtocol
         coll.add(value);
         map.put(entry, coll);
       }
-      this.numValues++;
+      numValues.incrementAndGet();
     }
 
     public void addAll(RegionEntry entry, Collection values)
@@ -1803,7 +1804,7 @@ public abstract class AbstractIndex implements IndexProtocol
         Collection coll = useList?new ArrayList(values.size()):new IndexConcurrentHashSet(values.size(), 0.75f, 1);
         coll.addAll(values);
         map.put(entry, coll);
-        this.numValues = this.numValues + values.size();
+        numValues.addAndGet(values.size());
       } else if (object instanceof Collection) {
         Collection coll = (Collection) object;
         // If its a list query might get ConcurrentModificationException.
@@ -1822,7 +1823,7 @@ public abstract class AbstractIndex implements IndexProtocol
         coll.add(object);
         map.put(entry, coll);
       }
-      this.numValues = this.numValues + values.size();
+      numValues.addAndGet(values.size());
     }
 
     public Object get(RegionEntry entry)
@@ -1860,14 +1861,14 @@ public abstract class AbstractIndex implements IndexProtocol
           if (coll.size() == 0) {
             map.remove(entry);
           }
-          this.numValues--;
+          numValues.decrementAndGet();
         }
       }
       else {
         if (object.equals(value)) {
           map.remove(entry);
         }
-        this.numValues--;
+        this.numValues.decrementAndGet();
       }
     }
 
@@ -1875,8 +1876,8 @@ public abstract class AbstractIndex implements IndexProtocol
     {
       Object retVal = map.remove(entry);
       if (retVal != null) {
-            this.numValues = (retVal instanceof Collection) ? this.numValues
-                - ((Collection) retVal).size() : this.numValues - 1;
+            numValues.addAndGet((retVal instanceof Collection) ?
+              - ((Collection) retVal).size() : -1 );
       }
       return retVal;
     }
@@ -1896,7 +1897,7 @@ public abstract class AbstractIndex implements IndexProtocol
 
     public int getNumValues()
     {
-      return this.numValues;
+      return this.numValues.get();
     }
 
     public int getNumEntries()
@@ -2158,7 +2159,7 @@ public abstract class AbstractIndex implements IndexProtocol
     public void clear()
     {
       map.clear();
-      this.numValues = 0;
+      this.numValues.set(0);
     }
 
     public Set entrySet()
@@ -2175,7 +2176,7 @@ public abstract class AbstractIndex implements IndexProtocol
     public void replace(RegionEntry entry, Object values) {
       int numOldValues = getNumValues(entry);
       this.map.put(entry, values);
-      this.numValues += (((values instanceof Collection) ? ((Collection) values)
+      this.numValues.addAndGet(((values instanceof Collection) ? ((Collection) values)
           .size() : 1) - numOldValues);
     }
   }


[30/50] [abbrv] incubator-geode git commit: GEODE-445: fix race in unit test

Posted by ds...@apache.org.
GEODE-445: fix race in unit test

The test now uses a WaitCriteria when checking
member and profile count.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/9390a624
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/9390a624
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/9390a624

Branch: refs/heads/develop
Commit: 9390a6248d2a92c0028275dac3711c49869f03ce
Parents: ec307d2
Author: Darrel Schneider <ds...@pivotal.io>
Authored: Wed Oct 21 14:51:34 2015 -0700
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Wed Oct 21 16:52:41 2015 -0700

----------------------------------------------------------------------
 .../offheap/OutOfOffHeapMemoryDUnitTest.java    | 27 +++++++++++++++++---
 1 file changed, 23 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9390a624/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OutOfOffHeapMemoryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OutOfOffHeapMemoryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OutOfOffHeapMemoryDUnitTest.java
index a7921ee..76bfde7 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OutOfOffHeapMemoryDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OutOfOffHeapMemoryDUnitTest.java
@@ -293,10 +293,29 @@ public class OutOfOffHeapMemoryDUnitTest extends CacheTestCase {
           final int countMembersPlusLocator = vmCount+1-1; // +1 for locator, -1 for OOOHME member
           final int countOtherMembers = vmCount-1-1; // -1 for self, -1 for OOOHME member
           
-          assertEquals(countMembersPlusLocator, ((InternalDistributedSystem)OutOfOffHeapMemoryDUnitTest
-              .system.get()).getDistributionManager().getDistributionManagerIds().size());
-          assertEquals(countOtherMembers, ((DistributedRegion)OutOfOffHeapMemoryDUnitTest
-              .cache.get().getRegion(name)).getDistributionAdvisor().getNumProfiles());
+          final WaitCriterion waitForDisconnect = new WaitCriterion() {
+            public boolean done() {
+              InternalDistributedSystem ids = (InternalDistributedSystem)OutOfOffHeapMemoryDUnitTest.system.get();
+              DistributedRegion dr = (DistributedRegion)OutOfOffHeapMemoryDUnitTest.cache.get().getRegion(name);
+              return countMembersPlusLocator == ids.getDistributionManager().getDistributionManagerIds().size()
+                  && countOtherMembers == dr.getDistributionAdvisor().getNumProfiles();
+            }
+            public String description() {
+              String msg = "";
+              InternalDistributedSystem ids = (InternalDistributedSystem)OutOfOffHeapMemoryDUnitTest.system.get();
+              int currentMemberCount = ids.getDistributionManager().getDistributionManagerIds().size();
+              if (countMembersPlusLocator != currentMemberCount) {
+                msg += " expected " + countMembersPlusLocator + " members but found " + currentMemberCount;
+              }
+              DistributedRegion dr = (DistributedRegion)OutOfOffHeapMemoryDUnitTest.cache.get().getRegion(name);
+              int profileCount = dr.getDistributionAdvisor().getNumProfiles();
+              if (countOtherMembers != profileCount) {
+                msg += " expected " + countOtherMembers + " profiles but found " + profileCount;
+              }
+              return msg;
+            }
+          };
+          waitForCriterion(waitForDisconnect, 30*1000, 10, true);
         }
       });
     }


[38/50] [abbrv] incubator-geode git commit: [fixes GEODE-414] Add a retry so that PRMetaData will be updated with correct data.

Posted by ds...@apache.org.
[fixes GEODE-414] Add a retry so that PRMetaData will be updated with
correct data.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/a73dc1b7
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/a73dc1b7
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/a73dc1b7

Branch: refs/heads/develop
Commit: a73dc1b7dc1b17429e73c7e97b5dceff203eed82
Parents: 1e6cc66
Author: eshu <es...@pivotal.io>
Authored: Thu Oct 22 17:08:47 2015 -0700
Committer: eshu <es...@pivotal.io>
Committed: Thu Oct 22 17:08:47 2015 -0700

----------------------------------------------------------------------
 .../PartitionedRegionSingleHopDUnitTest.java     | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a73dc1b7/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java
index 278c477..0e2ec72 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java
@@ -830,6 +830,25 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     assertTrue(regionMetaData.containsKey(region.getFullPath()));
     
     final ClientPartitionAdvisor prMetaData = regionMetaData.get(region.getFullPath()); 
+    
+    //Fixes a race condition in GEODE-414 by retrying as 
+    //region.clientMetaDataLock.tryLock() may prevent fetching the 
+    //metadata through functional calls as only limited functions are executed in the test.
+    long start = System.currentTimeMillis();
+    do {
+      if ((prMetaData.getBucketServerLocationsMap_TEST_ONLY().size() !=4)) {
+        //waiting if there is another thread holding the lock
+        try {
+          Thread.sleep(1000);
+        } catch (InterruptedException e) {
+          //ignored.
+        }
+        cms.getClientPRMetadata((LocalRegion)region);
+      } else {
+        break;
+      }
+    } while (System.currentTimeMillis() - start < 60000);
+    
     wc = new WaitCriterion() {
       public boolean done() {
         return (prMetaData.getBucketServerLocationsMap_TEST_ONLY().size() == 4);


[07/50] [abbrv] incubator-geode git commit: GEODE-346: Split into UnitTests and IntegrationTests

Posted by ds...@apache.org.
GEODE-346: Split into UnitTests and IntegrationTests

Extract IntegrationTests from AutoBalancerJUnitTest and move some
common code to setup and teardown methods. Fix GEODE-346 by making
teardown wait for HostStatSampler statThread to terminate.

Isolate test with sleep to its own test class and document. The next
step should involve refactoring of AutoBalancer to break dependencies
to allow for correct mocking.

Add Awaitility version 1.6.5 to dependencies (GEODE-217).

Add check to see if HostStatSampler statThread is alive or not.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/f801d1cf
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/f801d1cf
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/f801d1cf

Branch: refs/heads/develop
Commit: f801d1cfc654772fc399f9baae22d32abbe50ac6
Parents: a23c33c
Author: Kirk Lund <kl...@pivotal.io>
Authored: Tue Oct 20 15:44:40 2015 -0700
Committer: Kirk Lund <kl...@pivotal.io>
Committed: Tue Oct 20 15:59:01 2015 -0700

----------------------------------------------------------------------
 build.gradle                                    |   1 +
 .../gemfire/internal/HostStatSampler.java       |   4 +-
 ...erAuditorInvocationIntegrationJUnitTest.java |  80 ++++++
 .../util/AutoBalancerIntegrationJUnitTest.java  | 279 +++++++++++++++++++
 .../cache/util/AutoBalancerJUnitTest.java       | 273 +-----------------
 5 files changed, 375 insertions(+), 262 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f801d1cf/build.gradle
----------------------------------------------------------------------
diff --git a/build.gradle b/build.gradle
index f2aa561..33e73a5 100755
--- a/build.gradle
+++ b/build.gradle
@@ -285,6 +285,7 @@ subprojects {
     compile 'org.springframework:spring-webmvc:3.2.12.RELEASE'
 
     testCompile 'com.github.stefanbirkner:system-rules:1.12.1'
+    testCompile 'com.jayway.awaitility:awaitility:1.6.5'
     testCompile 'edu.umd.cs.mtc:multithreadedtc:1.01'
     testCompile 'junit:junit:4.12'
     testCompile 'org.assertj:assertj-core:2.1.0'

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f801d1cf/gemfire-core/src/main/java/com/gemstone/gemfire/internal/HostStatSampler.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/HostStatSampler.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/HostStatSampler.java
index eadfc34..91b2abe 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/HostStatSampler.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/HostStatSampler.java
@@ -327,7 +327,9 @@ public abstract class HostStatSampler
   }
   
   public final boolean isAlive() {
-    return statThread.isAlive();
+    synchronized (HostStatSampler.class) {
+      return statThread != null && statThread.isAlive();
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f801d1cf/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerAuditorInvocationIntegrationJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerAuditorInvocationIntegrationJUnitTest.java b/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerAuditorInvocationIntegrationJUnitTest.java
new file mode 100755
index 0000000..bd6a3ff
--- /dev/null
+++ b/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerAuditorInvocationIntegrationJUnitTest.java
@@ -0,0 +1,80 @@
+package com.gemstone.gemfire.cache.util;
+
+import static org.junit.Assert.*;
+
+import java.util.Properties;
+import java.util.concurrent.TimeUnit;
+
+import org.jmock.Expectations;
+import org.jmock.Mockery;
+import org.jmock.lib.concurrent.Synchroniser;
+import org.jmock.lib.legacy.ClassImposteriser;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.util.AutoBalancer.OOBAuditor;
+import com.gemstone.gemfire.cache.util.AutoBalancer.TimeProvider;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
+/**
+ * IntegrationTest for AuditorInvocation in AutoBalancer. 
+ * 
+ * <p>AutoBalancer should:<br>
+ * 1) be refactored to extract out all inner-classes and inner-interfaces<br>
+ * 2) have constructor changed to accept every collaborator as an argument<br>
+ * 3) then this test can correctly use mocking without any real threads to wait on
+ * 
+ * <p>Extracted from AutoBalancerJUnitTest
+ */
+@Category(IntegrationTest.class)
+public class AutoBalancerAuditorInvocationIntegrationJUnitTest {
+
+  Mockery mockContext;
+
+  @Before
+  public void setupMock() {
+    mockContext = new Mockery() {
+      {
+        setImposteriser(ClassImposteriser.INSTANCE);
+        setThreadingPolicy(new Synchroniser());
+      }
+    };
+  }
+
+  @After
+  public void validateMock() {
+    mockContext.assertIsSatisfied();
+    mockContext = null;
+  }
+
+  @Test
+  public void testAuditorInvocation() throws InterruptedException {
+    int count = 0;
+
+    final OOBAuditor mockAuditor = mockContext.mock(OOBAuditor.class);
+    final TimeProvider mockClock = mockContext.mock(TimeProvider.class);
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockAuditor).init(with(any(Properties.class)));
+        exactly(2).of(mockAuditor).execute();
+        allowing(mockClock).currentTimeMillis();
+        will(returnValue(950L));
+      }
+    });
+
+    Properties props = AutoBalancerJUnitTest.getBasicConfig();
+
+    assertEquals(0, count);
+    AutoBalancer autoR = new AutoBalancer();
+    autoR.setOOBAuditor(mockAuditor);
+    autoR.setTimeProvider(mockClock);
+
+    // the trigger should get invoked after 50 milliseconds
+    autoR.init(props);
+    
+    // TODO: this sleep should NOT be here -- use Awaitility to await a condition instead or use mocking to avoid this altogether
+    TimeUnit.MILLISECONDS.sleep(120); // removal causes failure in validateMock
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f801d1cf/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerIntegrationJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerIntegrationJUnitTest.java b/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerIntegrationJUnitTest.java
new file mode 100755
index 0000000..38b7bf9
--- /dev/null
+++ b/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerIntegrationJUnitTest.java
@@ -0,0 +1,279 @@
+package com.gemstone.gemfire.cache.util;
+
+import static com.jayway.awaitility.Awaitility.*;
+import static java.util.concurrent.TimeUnit.*;
+import static org.junit.Assert.*;
+import static org.hamcrest.Matchers.*;
+
+import java.io.ByteArrayInputStream;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.jmock.Expectations;
+import org.jmock.Mockery;
+import org.jmock.api.Invocation;
+import org.jmock.lib.action.CustomAction;
+import org.jmock.lib.concurrent.Synchroniser;
+import org.jmock.lib.legacy.ClassImposteriser;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.GemFireConfigException;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.util.AutoBalancer.CacheOperationFacade;
+import com.gemstone.gemfire.cache.util.AutoBalancer.GeodeCacheFacade;
+import com.gemstone.gemfire.distributed.DistributedLockService;
+import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
+import com.gemstone.gemfire.distributed.internal.locks.DLockService;
+import com.gemstone.gemfire.internal.HostStatSampler;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.PRHARedundancyProvider;
+import com.gemstone.gemfire.internal.cache.PartitionedRegion;
+import com.gemstone.gemfire.internal.cache.partitioned.InternalPRInfo;
+import com.gemstone.gemfire.internal.cache.partitioned.LoadProbe;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
+/**
+ * IntegrationTests for AutoBalancer that include usage of Cache, StatSampler 
+ * and DistributedLockService. Some collaborators may be mocked while others
+ * are real.
+ * 
+ * <p>Extracted from AutoBalancerJUnitTest
+ */
+@Category(IntegrationTest.class)
+public class AutoBalancerIntegrationJUnitTest {
+  
+  private static final int TIMEOUT_SECONDS = 5;
+
+  private GemFireCacheImpl cache;
+  private Mockery mockContext;
+
+  @Before
+  public void setupMock() {
+    mockContext = new Mockery() {
+      {
+        setImposteriser(ClassImposteriser.INSTANCE);
+        setThreadingPolicy(new Synchroniser());
+      }
+    };
+  }
+  
+  @Before
+  public void setUpCacheAndDLS() {
+    cache = createBasicCache();
+  }
+
+  @After
+  public void destroyCacheAndDLS() {
+    if (DLockService.getServiceNamed(AutoBalancer.AUTO_BALANCER_LOCK_SERVICE_NAME) != null) {
+      DLockService.destroy(AutoBalancer.AUTO_BALANCER_LOCK_SERVICE_NAME);
+    }
+
+    if (cache != null && !cache.isClosed()) {
+      try {
+        final HostStatSampler statSampler = ((InternalDistributedSystem)cache.getDistributedSystem()).getStatSampler();
+        cache.close();
+        // wait for the stat sampler to stand down
+        await().atMost(TIMEOUT_SECONDS, SECONDS).until(isAlive(statSampler), equalTo(false));
+      } finally {
+        cache = null;
+      }
+    }
+  }
+  
+  @After
+  public void validateMock() {
+    mockContext.assertIsSatisfied();
+    mockContext = null;
+  }
+
+  @Test
+  public void testAutoRebalaceStatsOnLockSuccess() throws InterruptedException {
+    final CacheOperationFacade mockCacheFacade = mockContext.mock(CacheOperationFacade.class);
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockCacheFacade).acquireAutoBalanceLock();
+        will(returnValue(true));
+        oneOf(mockCacheFacade).incrementAttemptCounter();
+        will(new CustomAction("increment stat") {
+          public Object invoke(Invocation invocation) throws Throwable {
+            new GeodeCacheFacade().incrementAttemptCounter();
+            return null;
+          }
+        });
+        allowing(mockCacheFacade);
+      }
+    });
+
+    assertEquals(0, cache.getResourceManager().getStats().getAutoRebalanceAttempts());
+    AutoBalancer balancer = new AutoBalancer();
+    balancer.setCacheOperationFacade(mockCacheFacade);
+    balancer.getOOBAuditor().execute();
+    
+    assertEquals(1, cache.getResourceManager().getStats().getAutoRebalanceAttempts());
+  }
+
+  @Test
+  public void testAutoRebalaceStatsOnLockFailure() throws InterruptedException {
+    final CacheOperationFacade mockCacheFacade = mockContext.mock(CacheOperationFacade.class);
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockCacheFacade).acquireAutoBalanceLock();
+        will(returnValue(false));
+      }
+    });
+
+    assertEquals(0, cache.getResourceManager().getStats().getAutoRebalanceAttempts());
+    AutoBalancer balancer = new AutoBalancer();
+    balancer.setCacheOperationFacade(mockCacheFacade);
+    balancer.getOOBAuditor().execute();
+
+    assertEquals(0, cache.getResourceManager().getStats().getAutoRebalanceAttempts());
+  }
+  
+  @Test
+  public void testAutoBalanceStatUpdate() {
+    assertEquals(0, cache.getResourceManager().getStats().getAutoRebalanceAttempts());
+    new GeodeCacheFacade().incrementAttemptCounter();
+    
+    assertEquals(1, cache.getResourceManager().getStats().getAutoRebalanceAttempts());
+  }
+  
+  @Test
+  public void testLockSuccess() throws InterruptedException {
+    final AtomicBoolean acquiredAutoBalanceLock = new AtomicBoolean(true);
+    
+    Thread thread = new Thread(new Runnable() {
+      @Override
+      public void run() {
+        CacheOperationFacade cacheFacade = new GeodeCacheFacade();
+        acquiredAutoBalanceLock.set(cacheFacade.acquireAutoBalanceLock());
+      }
+    });
+    thread.start();
+    
+    await().atMost(TIMEOUT_SECONDS, SECONDS).untilTrue(acquiredAutoBalanceLock);
+    
+    DistributedLockService dls = new GeodeCacheFacade().getDLS();
+    assertFalse(dls.lock(AutoBalancer.AUTO_BALANCER_LOCK, 0, -1));
+  }
+
+  @Test
+  public void testLockAlreadyTakenElsewhere() throws InterruptedException {
+    DistributedLockService dls = new GeodeCacheFacade().getDLS();
+    assertTrue(dls.lock(AutoBalancer.AUTO_BALANCER_LOCK, 0, -1));
+
+    final AtomicBoolean success = new AtomicBoolean(true);
+    
+    Thread thread = new Thread(new Runnable() {
+      @Override
+      public void run() {
+        CacheOperationFacade cacheFacade = new GeodeCacheFacade();
+        success.set(cacheFacade.acquireAutoBalanceLock());
+      }
+    });
+    thread.start();
+    thread.join();
+    
+    assertFalse(success.get());
+  }
+
+  @Test
+  public void testInitializerCacheXML() {
+    String configStr = "<cache xmlns=\"http://schema.pivotal.io/gemfire/cache\"                          "
+        + " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"                                      "
+        + " xsi:schemaLocation=\"http://schema.pivotal.io/gemfire/cache http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd\""
+        + " version=\"9.0\">                                                                             "
+        + "   <initializer>                                                                              "
+        + "     <class-name>com.gemstone.gemfire.cache.util.AutoBalancer</class-name>                    "
+        + "     <parameter name=\"schedule\">                                                            "
+        + "       <string>* * * * * ? </string>                                                          "
+        + "     </parameter>                                                                             "
+        + "   </initializer>                                                                             "
+        + " </cache>";
+
+    cache.loadCacheXml(new ByteArrayInputStream(configStr.getBytes()));
+  }
+
+  @Test(expected = GemFireConfigException.class)
+  public void testInitFailOnMissingScheduleConf() {
+    String configStr = "<cache xmlns=\"http://schema.pivotal.io/gemfire/cache\"                          "
+        + " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"                                      "
+        + " xsi:schemaLocation=\"http://schema.pivotal.io/gemfire/cache http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd\""
+        + " version=\"9.0\">                                                                             "
+        + "   <initializer>                                                                              "
+        + "     <class-name>com.gemstone.gemfire.cache.util.AutoBalancer</class-name>                    "
+        + "   </initializer>                                                                             "
+        + " </cache>";
+
+    cache.loadCacheXml(new ByteArrayInputStream(configStr.getBytes()));
+  }
+
+  @Test
+  public void testFacadeCollectMemberDetails2Regions() {
+    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class);
+
+    final PartitionedRegion mockR1 = mockContext.mock(PartitionedRegion.class, "r1");
+    final PartitionedRegion mockR2 = mockContext.mock(PartitionedRegion.class, "r2");
+    final HashSet<PartitionedRegion> regions = new HashSet<>();
+    regions.add(mockR1);
+    regions.add(mockR2);
+
+    final PRHARedundancyProvider mockRedundancyProviderR1 = mockContext.mock(PRHARedundancyProvider.class, "prhaR1");
+    final InternalPRInfo mockR1PRInfo = mockContext.mock(InternalPRInfo.class, "prInforR1");
+
+    final PRHARedundancyProvider mockRedundancyProviderR2 = mockContext.mock(PRHARedundancyProvider.class, "prhaR2");
+    final InternalPRInfo mockR2PRInfo = mockContext.mock(InternalPRInfo.class, "prInforR2");
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockCache).getPartitionedRegions();
+        will(returnValue(regions));
+        exactly(2).of(mockCache).getResourceManager();
+        will(returnValue(cache.getResourceManager()));
+        allowing(mockR1).getFullPath();
+        oneOf(mockR1).getRedundancyProvider();
+        will(returnValue(mockRedundancyProviderR1));
+        allowing(mockR2).getFullPath();
+        oneOf(mockR2).getRedundancyProvider();
+        will(returnValue(mockRedundancyProviderR2));
+
+        oneOf(mockRedundancyProviderR1).buildPartitionedRegionInfo(with(true), with(any(LoadProbe.class)));
+        will(returnValue(mockR1PRInfo));
+
+        oneOf(mockRedundancyProviderR2).buildPartitionedRegionInfo(with(true), with(any(LoadProbe.class)));
+        will(returnValue(mockR2PRInfo));
+      }
+    });
+
+    GeodeCacheFacade facade = new GeodeCacheFacade() {
+      @Override
+      GemFireCacheImpl getCache() {
+        return mockCache;
+      }
+    };
+
+    Map<PartitionedRegion, InternalPRInfo> map = facade.getRegionMemberDetails();
+    assertNotNull(map);
+    assertEquals(2, map.size());
+    assertEquals(map.get(mockR1), mockR1PRInfo);
+    assertEquals(map.get(mockR2), mockR2PRInfo);
+  }
+
+  private GemFireCacheImpl createBasicCache() {
+    return (GemFireCacheImpl) new CacheFactory().set("mcast-port", "0").create();
+  }
+
+  private Callable<Boolean> isAlive(final HostStatSampler statSampler) {
+    return new Callable<Boolean>() {
+      @Override
+      public Boolean call() throws Exception {
+        return statSampler.isAlive();
+      }
+    };
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f801d1cf/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerJUnitTest.java b/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerJUnitTest.java
index f0bcded..1eca3c2 100644
--- a/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerJUnitTest.java
+++ b/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerJUnitTest.java
@@ -1,25 +1,15 @@
 package com.gemstone.gemfire.cache.util;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
-import java.io.ByteArrayInputStream;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Properties;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.jmock.Expectations;
 import org.jmock.Mockery;
 import org.jmock.Sequence;
-import org.jmock.api.Invocation;
-import org.jmock.lib.action.CustomAction;
 import org.jmock.lib.concurrent.Synchroniser;
 import org.jmock.lib.legacy.ClassImposteriser;
 import org.junit.After;
@@ -28,7 +18,6 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import com.gemstone.gemfire.GemFireConfigException;
-import com.gemstone.gemfire.cache.CacheFactory;
 import com.gemstone.gemfire.cache.control.RebalanceFactory;
 import com.gemstone.gemfire.cache.control.RebalanceOperation;
 import com.gemstone.gemfire.cache.control.RebalanceResults;
@@ -38,20 +27,17 @@ import com.gemstone.gemfire.cache.util.AutoBalancer.CacheOperationFacade;
 import com.gemstone.gemfire.cache.util.AutoBalancer.GeodeCacheFacade;
 import com.gemstone.gemfire.cache.util.AutoBalancer.OOBAuditor;
 import com.gemstone.gemfire.cache.util.AutoBalancer.SizeBasedOOBAuditor;
-import com.gemstone.gemfire.cache.util.AutoBalancer.TimeProvider;
-import com.gemstone.gemfire.distributed.DistributedLockService;
-import com.gemstone.gemfire.distributed.internal.locks.DLockService;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.PRHARedundancyProvider;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager;
 import com.gemstone.gemfire.internal.cache.partitioned.InternalPRInfo;
-import com.gemstone.gemfire.internal.cache.partitioned.LoadProbe;
 import com.gemstone.gemfire.test.junit.categories.UnitTest;
 
+/**
+ * UnitTests for AutoBalancer. All collaborators should be mocked.
+ */
 @Category(UnitTest.class)
 public class AutoBalancerJUnitTest {
-  GemFireCacheImpl cache;
   Mockery mockContext;
 
   @Before
@@ -65,129 +51,13 @@ public class AutoBalancerJUnitTest {
   }
 
   @After
-  public void destroyCacheAndDLS() {
-    if (DLockService.getServiceNamed(AutoBalancer.AUTO_BALANCER_LOCK_SERVICE_NAME) != null) {
-      DLockService.destroy(AutoBalancer.AUTO_BALANCER_LOCK_SERVICE_NAME);
-    }
-
-    if (cache != null && !cache.isClosed()) {
-      cache.close();
-      cache = null;
-    }
-  }
-
-  @After
   public void validateMock() {
     mockContext.assertIsSatisfied();
     mockContext = null;
   }
 
-  @Test(expected = IllegalStateException.class)
-  public void testNoCacheError() {
-    AutoBalancer balancer = new AutoBalancer();
-    OOBAuditor auditor = balancer.getOOBAuditor();
-    auditor.execute();
-  }
-
-  @Test
-  public void testAutoRebalaceStatsOnLockSuccess() throws InterruptedException {
-    cache = createBasicCache();
-
-    final CacheOperationFacade mockCacheFacade = mockContext.mock(CacheOperationFacade.class);
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockCacheFacade).acquireAutoBalanceLock();
-        will(returnValue(true));
-        oneOf(mockCacheFacade).incrementAttemptCounter();
-        will(new CustomAction("increment stat") {
-          public Object invoke(Invocation invocation) throws Throwable {
-            new GeodeCacheFacade().incrementAttemptCounter();
-            return null;
-          }
-        });
-        allowing(mockCacheFacade);
-      }
-    });
-
-    assertEquals(0, cache.getResourceManager().getStats().getAutoRebalanceAttempts());
-    AutoBalancer balancer = new AutoBalancer();
-    balancer.setCacheOperationFacade(mockCacheFacade);
-    balancer.getOOBAuditor().execute();
-    assertEquals(1, cache.getResourceManager().getStats().getAutoRebalanceAttempts());
-  }
-
-  @Test
-  public void testAutoRebalaceStatsOnLockFailure() throws InterruptedException {
-    cache = createBasicCache();
-
-    final CacheOperationFacade mockCacheFacade = mockContext.mock(CacheOperationFacade.class);
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockCacheFacade).acquireAutoBalanceLock();
-        will(returnValue(false));
-      }
-    });
-
-    assertEquals(0, cache.getResourceManager().getStats().getAutoRebalanceAttempts());
-    AutoBalancer balancer = new AutoBalancer();
-    balancer.setCacheOperationFacade(mockCacheFacade);
-    balancer.getOOBAuditor().execute();
-    assertEquals(0, cache.getResourceManager().getStats().getAutoRebalanceAttempts());
-  }
-
-  @Test
-  public void testAutoBalanceStatUpdate() {
-    cache = createBasicCache();
-    assertEquals(0, cache.getResourceManager().getStats().getAutoRebalanceAttempts());
-    new GeodeCacheFacade().incrementAttemptCounter();
-    assertEquals(1, cache.getResourceManager().getStats().getAutoRebalanceAttempts());
-  }
-
-  @Test
-  public void testLockSuccess() throws InterruptedException {
-    cache = createBasicCache();
-
-    final CountDownLatch locked = new CountDownLatch(1);
-    final AtomicBoolean success = new AtomicBoolean(true);
-    Thread thread = new Thread(new Runnable() {
-      @Override
-      public void run() {
-        CacheOperationFacade cacheFacade = new GeodeCacheFacade();
-        success.set(cacheFacade.acquireAutoBalanceLock());
-        locked.countDown();
-      }
-    });
-    thread.start();
-    locked.await(1, TimeUnit.SECONDS);
-    assertTrue(success.get());
-    DistributedLockService dls = new GeodeCacheFacade().getDLS();
-    assertFalse(dls.lock(AutoBalancer.AUTO_BALANCER_LOCK, 0, -1));
-  }
-
-  @Test
-  public void testLockAlreadyTakenElsewhere() throws InterruptedException {
-    cache = createBasicCache();
-
-    DistributedLockService dls = new GeodeCacheFacade().getDLS();
-    assertTrue(dls.lock(AutoBalancer.AUTO_BALANCER_LOCK, 0, -1));
-
-    final AtomicBoolean success = new AtomicBoolean(true);
-    Thread thread = new Thread(new Runnable() {
-      @Override
-      public void run() {
-        CacheOperationFacade cacheFacade = new GeodeCacheFacade();
-        success.set(cacheFacade.acquireAutoBalanceLock());
-      }
-    });
-    thread.start();
-    thread.join();
-    assertFalse(success.get());
-  }
-
   @Test
   public void testLockStatExecuteInSequence() throws InterruptedException {
-    cache = createBasicCache();
-
     final CacheOperationFacade mockCacheFacade = mockContext.mock(CacheOperationFacade.class);
     final Sequence sequence = mockContext.sequence("sequence");
     mockContext.checking(new Expectations() {
@@ -210,8 +80,6 @@ public class AutoBalancerJUnitTest {
 
   @Test
   public void testReusePreAcquiredLock() throws InterruptedException {
-    cache = createBasicCache();
-
     final CacheOperationFacade mockCacheFacade = mockContext.mock(CacheOperationFacade.class);
     mockContext.checking(new Expectations() {
       {
@@ -231,8 +99,6 @@ public class AutoBalancerJUnitTest {
 
   @Test
   public void testAcquireLockAfterReleasedRemotely() throws InterruptedException {
-    cache = createBasicCache();
-
     final CacheOperationFacade mockCacheFacade = mockContext.mock(CacheOperationFacade.class);
     final Sequence sequence = mockContext.sequence("sequence");
     mockContext.checking(new Expectations() {
@@ -257,8 +123,6 @@ public class AutoBalancerJUnitTest {
 
   @Test
   public void testFailExecuteIfLockedElsewhere() throws InterruptedException {
-    cache = createBasicCache();
-
     final CacheOperationFacade mockCacheFacade = mockContext.mock(CacheOperationFacade.class);
     mockContext.checking(new Expectations() {
       {
@@ -282,8 +146,6 @@ public class AutoBalancerJUnitTest {
 
   @Test
   public void testFailExecuteIfBalanced() throws InterruptedException {
-    cache = createBasicCache();
-
     final CacheOperationFacade mockCacheFacade = mockContext.mock(CacheOperationFacade.class);
     mockContext.checking(new Expectations() {
       {
@@ -307,6 +169,13 @@ public class AutoBalancerJUnitTest {
     balancer.getOOBAuditor().execute();
   }
 
+  @Test(expected = IllegalStateException.class)
+  public void testNoCacheError() {
+    AutoBalancer balancer = new AutoBalancer();
+    OOBAuditor auditor = balancer.getOOBAuditor();
+    auditor.execute();
+  }
+
   @Test
   public void testOOBWhenBelowSizeThreshold() {
     final long totalSize = 1000L;
@@ -419,71 +288,6 @@ public class AutoBalancerJUnitTest {
     assertTrue(auditor.needsRebalancing());
   }
 
-  @Test
-  public void testInitializerCacheXML() {
-    String configStr = "<cache xmlns=\"http://schema.pivotal.io/gemfire/cache\"                          "
-        + " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"                                      "
-        + " xsi:schemaLocation=\"http://schema.pivotal.io/gemfire/cache http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd\""
-        + " version=\"9.0\">                                                                             "
-        + "   <initializer>                                                                              "
-        + "     <class-name>com.gemstone.gemfire.cache.util.AutoBalancer</class-name>                    "
-        + "     <parameter name=\"schedule\">                                                            "
-        + "       <string>* * * * * ? </string>                                                          "
-        + "     </parameter>                                                                             "
-        + "   </initializer>                                                                             "
-        + " </cache>";
-
-    cache = createBasicCache();
-    cache.loadCacheXml(new ByteArrayInputStream(configStr.getBytes()));
-  }
-
-  private GemFireCacheImpl createBasicCache() {
-    return (GemFireCacheImpl) new CacheFactory().set("mcast-port", "0").create();
-  }
-
-  @Test(expected = GemFireConfigException.class)
-  public void testInitFailOnMissingScheduleConf() {
-    String configStr = "<cache xmlns=\"http://schema.pivotal.io/gemfire/cache\"                          "
-        + " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"                                      "
-        + " xsi:schemaLocation=\"http://schema.pivotal.io/gemfire/cache http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd\""
-        + " version=\"9.0\">                                                                             "
-        + "   <initializer>                                                                              "
-        + "     <class-name>com.gemstone.gemfire.cache.util.AutoBalancer</class-name>                    "
-        + "   </initializer>                                                                             "
-        + " </cache>";
-
-    cache = createBasicCache();
-    cache.loadCacheXml(new ByteArrayInputStream(configStr.getBytes()));
-  }
-
-  @Test
-  public void testAuditorInvocation() throws InterruptedException {
-    int count = 0;
-
-    final OOBAuditor mockAuditor = mockContext.mock(OOBAuditor.class);
-    final TimeProvider mockClock = mockContext.mock(TimeProvider.class);
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockAuditor).init(with(any(Properties.class)));
-        exactly(2).of(mockAuditor).execute();
-        allowing(mockClock).currentTimeMillis();
-        will(returnValue(950L));
-      }
-    });
-
-    Properties props = getBasicConfig();
-
-    assertEquals(0, count);
-    AutoBalancer autoR = new AutoBalancer();
-    autoR.setOOBAuditor(mockAuditor);
-    autoR.setTimeProvider(mockClock);
-
-    // the trigger should get invoked after 50 milliseconds
-    autoR.init(props);
-
-    TimeUnit.MILLISECONDS.sleep(120);
-  }
-
   @Test(expected = GemFireConfigException.class)
   public void testInvalidSchedule() {
     String someSchedule = "X Y * * * *";
@@ -658,59 +462,6 @@ public class AutoBalancerJUnitTest {
   }
 
   @Test
-  public void testFacadeCollectMemberDetails2Regions() {
-    cache = createBasicCache();
-
-    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class);
-
-    final PartitionedRegion mockR1 = mockContext.mock(PartitionedRegion.class, "r1");
-    final PartitionedRegion mockR2 = mockContext.mock(PartitionedRegion.class, "r2");
-    final HashSet<PartitionedRegion> regions = new HashSet<>();
-    regions.add(mockR1);
-    regions.add(mockR2);
-
-    final PRHARedundancyProvider mockRedundancyProviderR1 = mockContext.mock(PRHARedundancyProvider.class, "prhaR1");
-    final InternalPRInfo mockR1PRInfo = mockContext.mock(InternalPRInfo.class, "prInforR1");
-
-    final PRHARedundancyProvider mockRedundancyProviderR2 = mockContext.mock(PRHARedundancyProvider.class, "prhaR2");
-    final InternalPRInfo mockR2PRInfo = mockContext.mock(InternalPRInfo.class, "prInforR2");
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockCache).getPartitionedRegions();
-        will(returnValue(regions));
-        exactly(2).of(mockCache).getResourceManager();
-        will(returnValue(cache.getResourceManager()));
-        allowing(mockR1).getFullPath();
-        oneOf(mockR1).getRedundancyProvider();
-        will(returnValue(mockRedundancyProviderR1));
-        allowing(mockR2).getFullPath();
-        oneOf(mockR2).getRedundancyProvider();
-        will(returnValue(mockRedundancyProviderR2));
-
-        oneOf(mockRedundancyProviderR1).buildPartitionedRegionInfo(with(true), with(any(LoadProbe.class)));
-        will(returnValue(mockR1PRInfo));
-
-        oneOf(mockRedundancyProviderR2).buildPartitionedRegionInfo(with(true), with(any(LoadProbe.class)));
-        will(returnValue(mockR2PRInfo));
-      }
-    });
-
-    GeodeCacheFacade facade = new GeodeCacheFacade() {
-      @Override
-      GemFireCacheImpl getCache() {
-        return mockCache;
-      }
-    };
-
-    Map<PartitionedRegion, InternalPRInfo> map = facade.getRegionMemberDetails();
-    assertNotNull(map);
-    assertEquals(2, map.size());
-    assertEquals(map.get(mockR1), mockR1PRInfo);
-    assertEquals(map.get(mockR2), mockR2PRInfo);
-  }
-
-  @Test
   public void testFacadeTotalBytes2Regions() {
     final PartitionedRegion mockR1 = mockContext.mock(PartitionedRegion.class, "r1");
     final PartitionedRegion mockR2 = mockContext.mock(PartitionedRegion.class, "r2");
@@ -763,7 +514,7 @@ public class AutoBalancerJUnitTest {
     assertEquals(123 + 74 + 3475, facade.getTotalDataSize(details));
   }
 
-  private Properties getBasicConfig() {
+  static Properties getBasicConfig() {
     Properties props = new Properties();
     // every second schedule
     props.put(AutoBalancer.SCHEDULE, "* * * * * ?");


[08/50] [abbrv] incubator-geode git commit: GEODE-376: fix intermittent fail of testNoDataSerializer

Posted by ds...@apache.org.
GEODE-376: fix intermittent fail of testNoDataSerializer


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/c2db9200
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/c2db9200
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/c2db9200

Branch: refs/heads/develop
Commit: c2db92008a5b05d64c5c76f2f595ac716377e2ee
Parents: f801d1c
Author: Darrel Schneider <ds...@pivotal.io>
Authored: Tue Oct 20 16:54:53 2015 -0700
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Tue Oct 20 16:55:33 2015 -0700

----------------------------------------------------------------------
 .../java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java   | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c2db9200/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
index e9a8e29..14ec5f3 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
@@ -5410,6 +5410,8 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
 
     final String name = this.getUniqueName();
 
+    disconnectAllFromDS(); // possible fix for GEODE-376
+
     SerializableRunnable create =
       new CacheSerializableRunnable("Create Region") {
           public void run2() throws CacheException {


[50/50] [abbrv] incubator-geode git commit: Merge remote-tracking branch 'origin/develop' into feature/GEODE-409

Posted by ds...@apache.org.
Merge remote-tracking branch 'origin/develop' into feature/GEODE-409

All the conflicts had to do with the HDFS files being removed
and those same files being modified on develop because of copyright work.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/f0b81325
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/f0b81325
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/f0b81325

Branch: refs/heads/develop
Commit: f0b81325fc2595ec61f562a9f774fd9991a1cbfc
Parents: daa0725 a224504
Author: Darrel Schneider <ds...@pivotal.io>
Authored: Mon Oct 26 11:04:54 2015 -0700
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Mon Oct 26 11:04:54 2015 -0700

----------------------------------------------------------------------
 README.md                                       |   9 +++
 build.gradle                                    |   2 +-
 dev-tools/docker/base/Dockerfile                |  54 ++++++++++++++++
 dev-tools/docker/base/build-base-docker.sh      |  30 +++++++++
 dev-tools/docker/compile/Dockerfile             |  21 +++++++
 .../docker/compile/start-compile-docker.sh      |  62 +++++++++++++++++++
 docker/Dockerfile                               |  56 +++++++----------
 docker/README.md                                |   6 +-
 docker/build-runtime-docker.sh                  |  32 ++++++++++
 .../internal/ra/GFConnectionFactoryImpl.java    |  21 +++++--
 .../gemfire/internal/ra/GFConnectionImpl.java   |  21 +++++--
 .../internal/ra/spi/JCALocalTransaction.java    |  21 +++++--
 .../internal/ra/spi/JCAManagedConnection.java   |  21 +++++--
 .../ra/spi/JCAManagedConnectionFactory.java     |  21 +++++--
 .../ra/spi/JCAManagedConnectionMetaData.java    |  21 +++++--
 gemfire-core/src/jca/ra.xml                     |   2 +-
 .../com/gemstone/gemfire/CancelCriterion.java   |  21 +++++--
 .../com/gemstone/gemfire/CancelException.java   |  21 +++++--
 .../gemstone/gemfire/CanonicalInstantiator.java |  21 +++++--
 .../com/gemstone/gemfire/CopyException.java     |  21 +++++--
 .../java/com/gemstone/gemfire/CopyHelper.java   |  21 +++++--
 .../com/gemstone/gemfire/DataSerializable.java  |  21 +++++--
 .../com/gemstone/gemfire/DataSerializer.java    |  21 +++++--
 .../main/java/com/gemstone/gemfire/Delta.java   |  21 +++++--
 .../gemfire/DeltaSerializationException.java    |  21 +++++--
 .../gemfire/ForcedDisconnectException.java      |  21 +++++--
 .../gemstone/gemfire/GemFireCacheException.java |  21 +++++--
 .../gemfire/GemFireCheckedException.java        |  21 +++++--
 .../gemfire/GemFireConfigException.java         |  21 +++++--
 .../com/gemstone/gemfire/GemFireException.java  |  21 +++++--
 .../gemstone/gemfire/GemFireIOException.java    |  21 +++++--
 .../gemstone/gemfire/GemFireRethrowable.java    |  21 +++++--
 .../gemfire/IncompatibleSystemException.java    |  21 +++++--
 .../java/com/gemstone/gemfire/Instantiator.java |  21 +++++--
 .../gemstone/gemfire/InternalGemFireError.java  |  21 +++++--
 .../gemfire/InternalGemFireException.java       |  21 +++++--
 .../gemstone/gemfire/InvalidDeltaException.java |  21 +++++--
 .../gemstone/gemfire/InvalidValueException.java |  21 +++++--
 .../gemfire/InvalidVersionException.java        |  16 +++++
 .../com/gemstone/gemfire/LicenseException.java  |  21 +++++--
 .../java/com/gemstone/gemfire/LogWriter.java    |  21 +++++--
 .../com/gemstone/gemfire/NoSystemException.java |  21 +++++--
 .../gemfire/OutOfOffHeapMemoryException.java    |  21 +++++--
 .../gemfire/SerializationException.java         |  21 +++++--
 .../gemstone/gemfire/StatisticDescriptor.java   |  21 +++++--
 .../java/com/gemstone/gemfire/Statistics.java   |  21 +++++--
 .../com/gemstone/gemfire/StatisticsFactory.java |  21 +++++--
 .../com/gemstone/gemfire/StatisticsType.java    |  21 +++++--
 .../gemstone/gemfire/StatisticsTypeFactory.java |  21 +++++--
 .../gemfire/SystemConnectException.java         |  21 +++++--
 .../com/gemstone/gemfire/SystemFailure.java     |  21 +++++--
 .../gemfire/SystemIsRunningException.java       |  21 +++++--
 .../gemfire/ThreadInterruptedException.java     |  18 +++++-
 .../com/gemstone/gemfire/ToDataException.java   |  21 +++++--
 .../gemfire/UncreatedSystemException.java       |  21 +++++--
 .../gemstone/gemfire/UnmodifiableException.java |  21 +++++--
 .../gemfire/UnstartedSystemException.java       |  21 +++++--
 .../com/gemstone/gemfire/admin/AdminConfig.java |  21 +++++--
 .../gemfire/admin/AdminDistributedSystem.java   |  21 +++++--
 .../admin/AdminDistributedSystemFactory.java    |  21 +++++--
 .../gemstone/gemfire/admin/AdminException.java  |  21 +++++--
 .../gemfire/admin/AdminXmlException.java        |  21 +++++--
 .../java/com/gemstone/gemfire/admin/Alert.java  |  21 +++++--
 .../com/gemstone/gemfire/admin/AlertLevel.java  |  21 +++++--
 .../gemstone/gemfire/admin/AlertListener.java   |  21 +++++--
 .../gemstone/gemfire/admin/BackupStatus.java    |  21 +++++--
 .../admin/CacheDoesNotExistException.java       |  21 +++++--
 .../gemfire/admin/CacheHealthConfig.java        |  21 +++++--
 .../com/gemstone/gemfire/admin/CacheServer.java |  21 +++++--
 .../gemfire/admin/CacheServerConfig.java        |  21 +++++--
 .../com/gemstone/gemfire/admin/CacheVm.java     |  21 +++++--
 .../gemstone/gemfire/admin/CacheVmConfig.java   |  21 +++++--
 .../gemfire/admin/ConfigurationParameter.java   |  21 +++++--
 .../gemfire/admin/DistributedSystemConfig.java  |  21 +++++--
 .../admin/DistributedSystemHealthConfig.java    |  21 +++++--
 .../gemfire/admin/DistributionLocator.java      |  21 +++++--
 .../admin/DistributionLocatorConfig.java        |  21 +++++--
 .../gemstone/gemfire/admin/GemFireHealth.java   |  21 +++++--
 .../gemfire/admin/GemFireHealthConfig.java      |  21 +++++--
 .../gemfire/admin/GemFireMemberStatus.java      |  21 +++++--
 .../gemstone/gemfire/admin/ManagedEntity.java   |  21 +++++--
 .../gemfire/admin/ManagedEntityConfig.java      |  21 +++++--
 .../gemfire/admin/MemberHealthConfig.java       |  21 +++++--
 .../admin/OperationCancelledException.java      |  21 +++++--
 .../gemfire/admin/RegionNotFoundException.java  |  21 +++++--
 .../gemfire/admin/RegionSubRegionSnapshot.java  |  21 +++++--
 .../gemfire/admin/RuntimeAdminException.java    |  21 +++++--
 .../com/gemstone/gemfire/admin/Statistic.java   |  21 +++++--
 .../gemfire/admin/StatisticResource.java        |  21 +++++--
 .../gemstone/gemfire/admin/SystemMember.java    |  21 +++++--
 .../gemfire/admin/SystemMemberBridgeServer.java |  21 +++++--
 .../gemfire/admin/SystemMemberCache.java        |  21 +++++--
 .../gemfire/admin/SystemMemberCacheEvent.java   |  16 +++++
 .../admin/SystemMemberCacheListener.java        |  21 +++++--
 .../gemfire/admin/SystemMemberCacheServer.java  |  21 +++++--
 .../gemfire/admin/SystemMemberRegion.java       |  21 +++++--
 .../gemfire/admin/SystemMemberRegionEvent.java  |  16 +++++
 .../gemfire/admin/SystemMemberType.java         |  21 +++++--
 .../gemfire/admin/SystemMembershipEvent.java    |  21 +++++--
 .../gemfire/admin/SystemMembershipListener.java |  21 +++++--
 .../UnmodifiableConfigurationException.java     |  21 +++++--
 .../admin/internal/AbstractHealthEvaluator.java |  21 +++++--
 .../internal/AdminDistributedSystemImpl.java    |  21 +++++--
 .../admin/internal/BackupStatusImpl.java        |  21 +++++--
 .../admin/internal/CacheHealthConfigImpl.java   |  21 +++++--
 .../admin/internal/CacheHealthEvaluator.java    |  21 +++++--
 .../admin/internal/CacheServerConfigImpl.java   |  21 +++++--
 .../gemfire/admin/internal/CacheServerImpl.java |  21 +++++--
 .../internal/ConfigurationParameterImpl.java    |  21 +++++--
 .../ConfigurationParameterListener.java         |  21 +++++--
 .../DisabledManagedEntityController.java        |  21 +++++--
 .../internal/DistributedSystemConfigImpl.java   |  21 +++++--
 .../DistributedSystemHealthConfigImpl.java      |  21 +++++--
 .../DistributedSystemHealthEvaluator.java       |  21 +++++--
 .../DistributedSystemHealthMonitor.java         |  21 +++++--
 .../internal/DistributionLocatorConfigImpl.java |  21 +++++--
 .../admin/internal/DistributionLocatorImpl.java |  21 +++++--
 .../EnabledManagedEntityController.java         |  21 +++++--
 .../admin/internal/FinishBackupRequest.java     |  21 +++++--
 .../admin/internal/FinishBackupResponse.java    |  21 +++++--
 .../admin/internal/FlushToDiskRequest.java      |  21 +++++--
 .../admin/internal/FlushToDiskResponse.java     |  21 +++++--
 .../admin/internal/GemFireHealthConfigImpl.java |  21 +++++--
 .../admin/internal/GemFireHealthEvaluator.java  |  21 +++++--
 .../admin/internal/GemFireHealthImpl.java       |  21 +++++--
 .../gemfire/admin/internal/InetAddressUtil.java |  21 +++++--
 .../admin/internal/InternalManagedEntity.java   |  21 +++++--
 .../gemfire/admin/internal/LogCollator.java     |  21 +++++--
 .../admin/internal/ManagedEntityConfigImpl.java |  21 +++++--
 .../admin/internal/ManagedEntityConfigXml.java  |  21 +++++--
 .../ManagedEntityConfigXmlGenerator.java        |  21 +++++--
 .../internal/ManagedEntityConfigXmlParser.java  |  21 +++++--
 .../admin/internal/ManagedEntityController.java |  21 +++++--
 .../ManagedEntityControllerFactory.java         |  21 +++++--
 .../admin/internal/ManagedSystemMemberImpl.java |  21 +++++--
 .../admin/internal/MemberHealthConfigImpl.java  |  21 +++++--
 .../admin/internal/MemberHealthEvaluator.java   |  21 +++++--
 .../admin/internal/PrepareBackupRequest.java    |  21 +++++--
 .../admin/internal/PrepareBackupResponse.java   |  21 +++++--
 .../gemfire/admin/internal/StatisticImpl.java   |  21 +++++--
 .../admin/internal/StatisticResourceImpl.java   |  20 ++++--
 .../internal/SystemMemberBridgeServerImpl.java  |  21 +++++--
 .../internal/SystemMemberCacheEventImpl.java    |  21 +++++--
 .../SystemMemberCacheEventProcessor.java        |  21 +++++--
 .../admin/internal/SystemMemberCacheImpl.java   |  21 +++++--
 .../admin/internal/SystemMemberImpl.java        |  21 +++++--
 .../internal/SystemMemberRegionEventImpl.java   |  21 +++++--
 .../admin/internal/SystemMemberRegionImpl.java  |  21 +++++--
 .../internal/SystemMembershipEventImpl.java     |  21 +++++--
 .../com/gemstone/gemfire/admin/jmx/Agent.java   |  21 +++++--
 .../gemstone/gemfire/admin/jmx/AgentConfig.java |  21 +++++--
 .../gemfire/admin/jmx/AgentFactory.java         |  21 +++++--
 .../internal/AdminDistributedSystemJmxImpl.java |  21 +++++--
 .../admin/jmx/internal/AgentConfigImpl.java     |  21 +++++--
 .../gemfire/admin/jmx/internal/AgentImpl.java   |  20 ++++--
 .../admin/jmx/internal/AgentLauncher.java       |  21 +++++--
 .../admin/jmx/internal/CacheServerJmxImpl.java  |  21 +++++--
 .../admin/jmx/internal/ConfigAttributeInfo.java |  20 ++++--
 .../internal/ConfigurationParameterJmxImpl.java |  21 +++++--
 .../DistributedSystemHealthConfigJmxImpl.java   |  21 +++++--
 .../internal/DistributionLocatorJmxImpl.java    |  22 ++++---
 .../admin/jmx/internal/DynamicManagedBean.java  |  20 ++++--
 .../internal/GemFireHealthConfigJmxImpl.java    |  21 +++++--
 .../jmx/internal/GemFireHealthJmxImpl.java      |  21 +++++--
 .../admin/jmx/internal/GenerateMBeanHTML.java   |  21 +++++--
 .../gemfire/admin/jmx/internal/MBeanUtil.java   |  20 ++++--
 .../admin/jmx/internal/MX4JModelMBean.java      |  21 +++++--
 .../jmx/internal/MX4JServerSocketFactory.java   |  22 ++++---
 .../gemfire/admin/jmx/internal/MailManager.java |  21 +++++--
 .../admin/jmx/internal/ManagedResource.java     |  21 +++++--
 .../admin/jmx/internal/ManagedResourceType.java |  21 +++++--
 .../jmx/internal/MemberInfoWithStatsMBean.java  |  21 +++++--
 .../admin/jmx/internal/RMIRegistryService.java  |  20 ++++--
 .../jmx/internal/RMIRegistryServiceMBean.java   |  20 ++++--
 .../jmx/internal/RefreshNotificationType.java   |  21 +++++--
 .../jmx/internal/StatAlertNotification.java     |  21 +++++--
 .../jmx/internal/StatAlertsAggregator.java      |  21 +++++--
 .../jmx/internal/StatisticAttributeInfo.java    |  20 ++++--
 .../jmx/internal/StatisticResourceJmxImpl.java  |  21 +++++--
 .../SystemMemberBridgeServerJmxImpl.java        |  21 +++++--
 .../jmx/internal/SystemMemberCacheJmxImpl.java  |  21 +++++--
 .../admin/jmx/internal/SystemMemberJmx.java     |  21 +++++--
 .../admin/jmx/internal/SystemMemberJmxImpl.java |  21 +++++--
 .../jmx/internal/SystemMemberRegionJmxImpl.java |  21 +++++--
 .../gemfire/cache/AttributesFactory.java        |  21 +++++--
 .../gemfire/cache/AttributesMutator.java        |  21 +++++--
 .../java/com/gemstone/gemfire/cache/Cache.java  |  21 +++++--
 .../gemstone/gemfire/cache/CacheCallback.java   |  21 +++++--
 .../gemfire/cache/CacheClosedException.java     |  21 +++++--
 .../com/gemstone/gemfire/cache/CacheEvent.java  |  21 +++++--
 .../gemstone/gemfire/cache/CacheException.java  |  21 +++++--
 .../gemfire/cache/CacheExistsException.java     |  21 +++++--
 .../gemstone/gemfire/cache/CacheFactory.java    |  21 +++++--
 .../gemstone/gemfire/cache/CacheListener.java   |  21 +++++--
 .../com/gemstone/gemfire/cache/CacheLoader.java |  21 +++++--
 .../gemfire/cache/CacheLoaderException.java     |  21 +++++--
 .../gemfire/cache/CacheRuntimeException.java    |  21 +++++--
 .../gemstone/gemfire/cache/CacheStatistics.java |  21 +++++--
 .../gemfire/cache/CacheTransactionManager.java  |  21 +++++--
 .../com/gemstone/gemfire/cache/CacheWriter.java |  21 +++++--
 .../gemfire/cache/CacheWriterException.java     |  21 +++++--
 .../gemfire/cache/CacheXmlException.java        |  21 +++++--
 .../gemstone/gemfire/cache/ClientSession.java   |  21 +++++--
 .../gemfire/cache/CommitConflictException.java  |  21 +++++--
 .../cache/CommitDistributionException.java      |  21 +++++--
 .../cache/CommitIncompleteException.java        |  16 +++++
 .../gemfire/cache/CustomEvictionAttributes.java |  22 ++++---
 .../gemstone/gemfire/cache/CustomExpiry.java    |  21 +++++--
 .../com/gemstone/gemfire/cache/DataPolicy.java  |  21 +++++--
 .../com/gemstone/gemfire/cache/Declarable.java  |  21 +++++--
 .../gemfire/cache/DiskAccessException.java      |  21 +++++--
 .../com/gemstone/gemfire/cache/DiskStore.java   |  21 +++++--
 .../gemfire/cache/DiskStoreFactory.java         |  21 +++++--
 .../gemfire/cache/DiskWriteAttributes.java      |  21 +++++--
 .../cache/DiskWriteAttributesFactory.java       |  21 +++++--
 .../DuplicatePrimaryPartitionException.java     |  21 +++++--
 .../gemfire/cache/DynamicRegionFactory.java     |  21 +++++--
 .../gemfire/cache/DynamicRegionListener.java    |  21 +++++--
 .../gemfire/cache/EntryDestroyedException.java  |  21 +++++--
 .../com/gemstone/gemfire/cache/EntryEvent.java  |  21 +++++--
 .../gemfire/cache/EntryExistsException.java     |  21 +++++--
 .../gemfire/cache/EntryNotFoundException.java   |  21 +++++--
 .../gemfire/cache/EntryNotFoundInRegion.java    |  21 +++++--
 .../gemstone/gemfire/cache/EntryOperation.java  |  21 +++++--
 .../gemstone/gemfire/cache/EvictionAction.java  |  23 ++++---
 .../gemfire/cache/EvictionAlgorithm.java        |  23 ++++---
 .../gemfire/cache/EvictionAttributes.java       |  20 ++++--
 .../cache/EvictionAttributesMutator.java        |  23 ++++---
 .../gemfire/cache/EvictionCriteria.java         |  22 ++++---
 .../gemfire/cache/ExpirationAction.java         |  21 +++++--
 .../gemfire/cache/ExpirationAttributes.java     |  21 +++++--
 .../cache/FailedSynchronizationException.java   |  21 +++++--
 .../gemfire/cache/FixedPartitionAttributes.java |  21 +++++--
 .../gemfire/cache/FixedPartitionResolver.java   |  20 ++++--
 .../cache/GatewayConfigurationException.java    |  21 +++++--
 .../gemfire/cache/GatewayException.java         |  21 +++++--
 .../gemstone/gemfire/cache/GemFireCache.java    |  21 +++++--
 .../cache/IncompatibleVersionException.java     |  21 +++++--
 .../gemstone/gemfire/cache/InterestPolicy.java  |  21 +++++--
 .../cache/InterestRegistrationEvent.java        |  21 +++++--
 .../cache/InterestRegistrationListener.java     |  21 +++++--
 .../gemfire/cache/InterestResultPolicy.java     |  21 +++++--
 .../gemstone/gemfire/cache/LoaderHelper.java    |  21 +++++--
 .../com/gemstone/gemfire/cache/LossAction.java  |  21 +++++--
 .../gemfire/cache/LowMemoryException.java       |  21 +++++--
 .../gemfire/cache/MembershipAttributes.java     |  21 +++++--
 .../com/gemstone/gemfire/cache/MirrorType.java  |  21 +++++--
 .../cache/NoQueueServersAvailableException.java |  21 +++++--
 ...NoSubscriptionServersAvailableException.java |  21 +++++--
 .../com/gemstone/gemfire/cache/Operation.java   |  21 +++++--
 .../cache/OperationAbortedException.java        |  21 +++++--
 .../gemfire/cache/PartitionAttributes.java      |  21 +++++--
 .../cache/PartitionAttributesFactory.java       |  20 ++++--
 .../gemfire/cache/PartitionResolver.java        |  20 ++++--
 .../PartitionedRegionDistributionException.java |  21 +++++--
 .../PartitionedRegionStorageException.java      |  21 +++++--
 .../java/com/gemstone/gemfire/cache/Region.java |  21 +++++--
 .../gemfire/cache/RegionAccessException.java    |  21 +++++--
 .../gemfire/cache/RegionAttributes.java         |  21 +++++--
 .../gemfire/cache/RegionDestroyedException.java |  21 +++++--
 .../cache/RegionDistributionException.java      |  21 +++++--
 .../com/gemstone/gemfire/cache/RegionEvent.java |  21 +++++--
 .../gemfire/cache/RegionExistsException.java    |  21 +++++--
 .../gemstone/gemfire/cache/RegionFactory.java   |  21 +++++--
 .../gemfire/cache/RegionMembershipListener.java |  21 +++++--
 .../cache/RegionReinitializedException.java     |  21 +++++--
 .../gemfire/cache/RegionRoleException.java      |  21 +++++--
 .../gemfire/cache/RegionRoleListener.java       |  21 +++++--
 .../gemstone/gemfire/cache/RegionService.java   |  23 ++++---
 .../gemstone/gemfire/cache/RegionShortcut.java  |  21 +++++--
 .../cache/RemoteTransactionException.java       |  21 +++++--
 .../gemstone/gemfire/cache/RequiredRoles.java   |  21 +++++--
 .../gemfire/cache/ResourceException.java        |  21 +++++--
 .../gemfire/cache/ResumptionAction.java         |  21 +++++--
 .../com/gemstone/gemfire/cache/RoleEvent.java   |  21 +++++--
 .../gemstone/gemfire/cache/RoleException.java   |  21 +++++--
 .../java/com/gemstone/gemfire/cache/Scope.java  |  21 +++++--
 .../gemfire/cache/SerializedCacheValue.java     |  21 +++++--
 .../cache/StatisticsDisabledException.java      |  21 +++++--
 .../gemfire/cache/SubscriptionAttributes.java   |  21 +++++--
 .../SynchronizationCommitConflictException.java |  21 +++++--
 .../gemfire/cache/TimeoutException.java         |  21 +++++--
 ...TransactionDataNodeHasDepartedException.java |  21 +++++--
 .../TransactionDataNotColocatedException.java   |  21 +++++--
 .../TransactionDataRebalancedException.java     |  21 +++++--
 .../gemfire/cache/TransactionEvent.java         |  21 +++++--
 .../gemfire/cache/TransactionException.java     |  21 +++++--
 .../gemstone/gemfire/cache/TransactionId.java   |  21 +++++--
 .../cache/TransactionInDoubtException.java      |  21 +++++--
 .../gemfire/cache/TransactionListener.java      |  21 +++++--
 .../gemfire/cache/TransactionWriter.java        |  21 +++++--
 .../cache/TransactionWriterException.java       |  21 +++++--
 ...upportedOperationInTransactionException.java |  21 +++++--
 .../cache/UnsupportedVersionException.java      |  21 +++++--
 .../gemfire/cache/VersionException.java         |  21 +++++--
 .../gemfire/cache/asyncqueue/AsyncEvent.java    |  21 +++++--
 .../cache/asyncqueue/AsyncEventListener.java    |  21 +++++--
 .../cache/asyncqueue/AsyncEventQueue.java       |  21 +++++--
 .../asyncqueue/AsyncEventQueueFactory.java      |  21 +++++--
 .../internal/AsyncEventQueueFactoryImpl.java    |  21 +++++--
 .../internal/AsyncEventQueueImpl.java           |  21 +++++--
 .../internal/AsyncEventQueueStats.java          |  21 +++++--
 .../internal/ParallelAsyncEventQueueImpl.java   |  16 +++++
 .../internal/SerialAsyncEventQueueImpl.java     |  16 +++++
 .../client/AllConnectionsInUseException.java    |  21 +++++--
 .../gemfire/cache/client/ClientCache.java       |  21 +++++--
 .../cache/client/ClientCacheFactory.java        |  21 +++++--
 .../cache/client/ClientNotReadyException.java   |  21 +++++--
 .../cache/client/ClientRegionFactory.java       |  21 +++++--
 .../cache/client/ClientRegionShortcut.java      |  23 ++++---
 .../client/NoAvailableLocatorsException.java    |  21 +++++--
 .../client/NoAvailableServersException.java     |  21 +++++--
 .../com/gemstone/gemfire/cache/client/Pool.java |  21 +++++--
 .../gemfire/cache/client/PoolFactory.java       |  21 +++++--
 .../gemfire/cache/client/PoolManager.java       |  21 +++++--
 .../client/ServerConnectivityException.java     |  21 +++++--
 .../cache/client/ServerOperationException.java  |  21 +++++--
 .../ServerRefusedConnectionException.java       |  21 +++++--
 .../client/SubscriptionNotEnabledException.java |  21 +++++--
 .../cache/client/internal/AbstractOp.java       |  21 +++++--
 .../cache/client/internal/AddPDXEnumOp.java     |  21 +++++--
 .../cache/client/internal/AddPDXTypeOp.java     |  21 +++++--
 .../client/internal/AuthenticateUserOp.java     |  21 +++++--
 .../internal/AutoConnectionSourceImpl.java      |  21 +++++--
 .../client/internal/CacheServerLoadMessage.java |  21 +++++--
 .../gemfire/cache/client/internal/ClearOp.java  |  21 +++++--
 .../client/internal/ClientMetadataService.java  |  20 ++++--
 .../client/internal/ClientPartitionAdvisor.java |  21 +++++--
 .../internal/ClientRegionFactoryImpl.java       |  21 +++++--
 .../cache/client/internal/ClientUpdater.java    |  21 +++++--
 .../client/internal/CloseConnectionOp.java      |  21 +++++--
 .../gemfire/cache/client/internal/CommitOp.java |  21 +++++--
 .../cache/client/internal/Connection.java       |  21 +++++--
 .../client/internal/ConnectionFactory.java      |  21 +++++--
 .../client/internal/ConnectionFactoryImpl.java  |  21 +++++--
 .../cache/client/internal/ConnectionImpl.java   |  21 +++++--
 .../cache/client/internal/ConnectionSource.java |  21 +++++--
 .../cache/client/internal/ConnectionStats.java  |  21 +++++--
 .../cache/client/internal/ContainsKeyOp.java    |  21 +++++--
 .../DataSerializerRecoveryListener.java         |  21 +++++--
 .../cache/client/internal/DestroyOp.java        |  21 +++++--
 .../cache/client/internal/DestroyRegionOp.java  |  21 +++++--
 .../gemfire/cache/client/internal/Endpoint.java |  21 +++++--
 .../cache/client/internal/EndpointManager.java  |  21 +++++--
 .../client/internal/EndpointManagerImpl.java    |  21 +++++--
 .../cache/client/internal/ExecutablePool.java   |  21 +++++--
 .../client/internal/ExecuteFunctionHelper.java  |  21 +++++--
 .../client/internal/ExecuteFunctionNoAckOp.java |  21 +++++--
 .../client/internal/ExecuteFunctionOp.java      |  21 +++++--
 .../internal/ExecuteRegionFunctionNoAckOp.java  |  21 +++++--
 .../internal/ExecuteRegionFunctionOp.java       |  21 +++++--
 .../ExecuteRegionFunctionSingleHopOp.java       |  21 +++++--
 .../internal/ExplicitConnectionSourceImpl.java  |  21 +++++--
 .../gemfire/cache/client/internal/GetAllOp.java |  21 +++++--
 .../client/internal/GetClientPRMetaDataOp.java  |  20 ++++--
 .../GetClientPartitionAttributesOp.java         |  20 ++++--
 .../cache/client/internal/GetEntryOp.java       |  21 +++++--
 .../cache/client/internal/GetEventValueOp.java  |  20 ++++--
 .../client/internal/GetFunctionAttributeOp.java |  21 +++++--
 .../gemfire/cache/client/internal/GetOp.java    |  21 +++++--
 .../cache/client/internal/GetPDXEnumByIdOp.java |  21 +++++--
 .../cache/client/internal/GetPDXEnumsOp.java    |  21 +++++--
 .../client/internal/GetPDXIdForEnumOp.java      |  21 +++++--
 .../client/internal/GetPDXIdForTypeOp.java      |  21 +++++--
 .../cache/client/internal/GetPDXTypeByIdOp.java |  21 +++++--
 .../cache/client/internal/GetPDXTypesOp.java    |  21 +++++--
 .../internal/InstantiatorRecoveryListener.java  |  21 +++++--
 .../cache/client/internal/InternalPool.java     |  21 +++++--
 .../cache/client/internal/InvalidateOp.java     |  21 +++++--
 .../gemfire/cache/client/internal/KeySetOp.java |  21 +++++--
 .../cache/client/internal/LiveServerPinger.java |  21 +++++--
 .../internal/LocatorDiscoveryCallback.java      |  21 +++++--
 .../LocatorDiscoveryCallbackAdapter.java        |  21 +++++--
 .../cache/client/internal/MakePrimaryOp.java    |  21 +++++--
 .../gemfire/cache/client/internal/Op.java       |  23 ++++---
 .../cache/client/internal/OpExecutorImpl.java   |  21 +++++--
 .../internal/PdxRegistryRecoveryListener.java   |  21 +++++--
 .../gemfire/cache/client/internal/PingOp.java   |  21 +++++--
 .../gemfire/cache/client/internal/PoolImpl.java |  21 +++++--
 .../cache/client/internal/PrimaryAckOp.java     |  21 +++++--
 .../cache/client/internal/ProxyCache.java       |  21 +++++--
 .../client/internal/ProxyCacheCloseOp.java      |  21 +++++--
 .../cache/client/internal/ProxyRegion.java      |  21 +++++--
 .../gemfire/cache/client/internal/PutAllOp.java |  21 +++++--
 .../gemfire/cache/client/internal/PutOp.java    |  21 +++++--
 .../gemfire/cache/client/internal/QueryOp.java  |  21 +++++--
 .../client/internal/QueueConnectionImpl.java    |  21 +++++--
 .../cache/client/internal/QueueManager.java     |  21 +++++--
 .../cache/client/internal/QueueManagerImpl.java |  21 +++++--
 .../cache/client/internal/QueueState.java       |  16 +++++
 .../cache/client/internal/QueueStateImpl.java   |  21 +++++--
 .../cache/client/internal/ReadyForEventsOp.java |  21 +++++--
 .../internal/RegisterDataSerializersOp.java     |  21 +++++--
 .../internal/RegisterInstantiatorsOp.java       |  21 +++++--
 .../client/internal/RegisterInterestListOp.java |  21 +++++--
 .../client/internal/RegisterInterestOp.java     |  21 +++++--
 .../internal/RegisterInterestTracker.java       |  21 +++++--
 .../cache/client/internal/RemoveAllOp.java      |  21 +++++--
 .../cache/client/internal/RollbackOp.java       |  21 +++++--
 .../cache/client/internal/ServerBlackList.java  |  21 +++++--
 .../cache/client/internal/ServerProxy.java      |  21 +++++--
 .../client/internal/ServerRegionDataAccess.java |  23 ++++---
 .../client/internal/ServerRegionProxy.java      |  21 +++++--
 .../internal/SingleHopClientExecutor.java       |  21 +++++--
 .../internal/SingleHopOperationCallable.java    |  21 +++++--
 .../gemfire/cache/client/internal/SizeOp.java   |  21 +++++--
 .../cache/client/internal/TXFailoverOp.java     |  21 +++++--
 .../client/internal/TXSynchronizationOp.java    |  21 +++++--
 .../internal/UnregisterInterestListOp.java      |  21 +++++--
 .../client/internal/UnregisterInterestOp.java   |  21 +++++--
 .../cache/client/internal/UserAttributes.java   |  21 +++++--
 .../locator/ClientConnectionRequest.java        |  21 +++++--
 .../locator/ClientConnectionResponse.java       |  21 +++++--
 .../locator/ClientReplacementRequest.java       |  21 +++++--
 .../internal/locator/GetAllServersRequest.java  |  20 ++++--
 .../internal/locator/GetAllServersResponse.java |  20 ++++--
 .../internal/locator/LocatorListRequest.java    |  21 +++++--
 .../internal/locator/LocatorListResponse.java   |  21 +++++--
 .../internal/locator/LocatorStatusRequest.java  |  20 ++++--
 .../internal/locator/LocatorStatusResponse.java |  20 ++++--
 .../locator/QueueConnectionRequest.java         |  21 +++++--
 .../locator/QueueConnectionResponse.java        |  21 +++++--
 .../internal/locator/SerializationHelper.java   |  21 +++++--
 .../internal/locator/ServerLocationRequest.java |  23 ++++---
 .../locator/ServerLocationResponse.java         |  23 ++++---
 .../locator/wan/LocatorMembershipListener.java  |  21 +++++--
 .../pooling/ConnectionDestroyedException.java   |  21 +++++--
 .../internal/pooling/ConnectionManager.java     |  21 +++++--
 .../internal/pooling/ConnectionManagerImpl.java |  21 +++++--
 .../internal/pooling/PooledConnection.java      |  21 +++++--
 .../gemfire/cache/control/RebalanceFactory.java |  21 +++++--
 .../cache/control/RebalanceOperation.java       |  21 +++++--
 .../gemfire/cache/control/RebalanceResults.java |  21 +++++--
 .../gemfire/cache/control/ResourceManager.java  |  21 +++++--
 .../execute/EmtpyRegionFunctionException.java   |  21 +++++--
 .../gemfire/cache/execute/Execution.java        |  20 ++++--
 .../gemfire/cache/execute/Function.java         |  20 ++++--
 .../gemfire/cache/execute/FunctionAdapter.java  |  20 ++++--
 .../gemfire/cache/execute/FunctionContext.java  |  20 ++++--
 .../cache/execute/FunctionException.java        |  20 ++++--
 .../FunctionInvocationTargetException.java      |  20 ++++--
 .../gemfire/cache/execute/FunctionService.java  |  20 ++++--
 .../cache/execute/RegionFunctionContext.java    |  21 +++++--
 .../gemfire/cache/execute/ResultCollector.java  |  20 ++++--
 .../gemfire/cache/execute/ResultSender.java     |  20 ++++--
 .../internal/FunctionServiceManager.java        |  24 ++++---
 .../gemfire/cache/hdfs/HDFSIOException.java     |  21 +++++--
 .../gemstone/gemfire/cache/hdfs/HDFSStore.java  |  21 +++++--
 .../gemfire/cache/hdfs/HDFSStoreFactory.java    |  21 +++++--
 .../gemfire/cache/hdfs/HDFSStoreMutator.java    |  21 +++++--
 .../cache/hdfs/StoreExistsException.java        |  21 +++++--
 .../cache/hdfs/internal/FailureTracker.java     |  21 +++++--
 .../cache/hdfs/internal/FlushObserver.java      |  16 +++++
 .../hdfs/internal/HDFSBucketRegionQueue.java    |  21 +++++--
 .../cache/hdfs/internal/HDFSEntriesSet.java     |  21 +++++--
 .../cache/hdfs/internal/HDFSEventListener.java  |  21 +++++--
 .../hdfs/internal/HDFSEventQueueFilter.java     |  21 +++++--
 .../hdfs/internal/HDFSGatewayEventImpl.java     |  22 ++++---
 .../hdfs/internal/HDFSIntegrationUtil.java      |  21 +++++--
 .../HDFSParallelGatewaySenderQueue.java         |  21 +++++--
 .../hdfs/internal/HDFSStoreConfigHolder.java    |  23 ++++---
 .../cache/hdfs/internal/HDFSStoreCreation.java  |  23 ++++---
 .../hdfs/internal/HDFSStoreFactoryImpl.java     |  21 +++++--
 .../cache/hdfs/internal/HDFSStoreImpl.java      |  21 +++++--
 .../hdfs/internal/HDFSStoreMutatorImpl.java     |  23 ++++---
 .../HDFSWriteOnlyStoreEventListener.java        |  21 +++++--
 .../hdfs/internal/HoplogListenerForRegion.java  |  21 +++++--
 .../cache/hdfs/internal/PersistedEventImpl.java |  21 +++++--
 .../hdfs/internal/QueuedPersistentEvent.java    |  16 +++++
 .../hdfs/internal/SignalledFlushObserver.java   |  16 +++++
 .../internal/SortedHDFSQueuePersistedEvent.java |  21 +++++--
 .../internal/SortedHoplogPersistedEvent.java    |  21 +++++--
 .../UnsortedHDFSQueuePersistedEvent.java        |  21 +++++--
 .../internal/UnsortedHoplogPersistedEvent.java  |  21 +++++--
 .../cache/hdfs/internal/cardinality/Bits.java   |  21 +++++--
 .../cardinality/CardinalityMergeException.java  |  21 +++++--
 .../hdfs/internal/cardinality/HyperLogLog.java  |  21 +++++--
 .../hdfs/internal/cardinality/IBuilder.java     |  21 +++++--
 .../hdfs/internal/cardinality/ICardinality.java |  21 +++++--
 .../hdfs/internal/cardinality/MurmurHash.java   |  21 +++++--
 .../hdfs/internal/cardinality/RegisterSet.java  |  21 +++++--
 .../hdfs/internal/hoplog/AbstractHoplog.java    |  21 +++++--
 .../hoplog/AbstractHoplogOrganizer.java         |  21 +++++--
 .../cache/hdfs/internal/hoplog/BloomFilter.java |  21 +++++--
 .../hoplog/CloseTmpHoplogsTimerTask.java        |  21 +++++--
 .../hdfs/internal/hoplog/CompactionStatus.java  |  21 +++++--
 .../cache/hdfs/internal/hoplog/FlushStatus.java |  16 +++++
 .../internal/hoplog/HDFSCompactionManager.java  |  21 +++++--
 .../internal/hoplog/HDFSFlushQueueArgs.java     |  16 +++++
 .../internal/hoplog/HDFSFlushQueueFunction.java |  16 +++++
 .../hoplog/HDFSForceCompactionArgs.java         |  21 +++++--
 .../hoplog/HDFSForceCompactionFunction.java     |  21 +++++--
 .../HDFSForceCompactionResultCollector.java     |  21 +++++--
 .../hoplog/HDFSLastCompactionTimeFunction.java  |  21 +++++--
 .../internal/hoplog/HDFSRegionDirector.java     |  21 +++++--
 .../hdfs/internal/hoplog/HDFSStoreDirector.java |  21 +++++--
 .../hoplog/HDFSUnsortedHoplogOrganizer.java     |  21 +++++--
 .../hdfs/internal/hoplog/HFileSortedOplog.java  |  21 +++++--
 .../hoplog/HdfsSortedOplogOrganizer.java        |  21 +++++--
 .../cache/hdfs/internal/hoplog/Hoplog.java      |  21 +++++--
 .../hdfs/internal/hoplog/HoplogConfig.java      |  21 +++++--
 .../hdfs/internal/hoplog/HoplogListener.java    |  21 +++++--
 .../hdfs/internal/hoplog/HoplogOrganizer.java   |  21 +++++--
 .../hdfs/internal/hoplog/HoplogSetIterator.java |  21 +++++--
 .../hdfs/internal/hoplog/HoplogSetReader.java   |  21 +++++--
 .../internal/hoplog/SequenceFileHoplog.java     |  21 +++++--
 .../hoplog/mapred/AbstractGFRecordReader.java   |  21 +++++--
 .../internal/hoplog/mapred/GFInputFormat.java   |  21 +++++--
 .../internal/hoplog/mapred/GFOutputFormat.java  |  21 +++++--
 .../mapreduce/AbstractGFRecordReader.java       |  21 +++++--
 .../hoplog/mapreduce/GFInputFormat.java         |  21 +++++--
 .../hdfs/internal/hoplog/mapreduce/GFKey.java   |  21 +++++--
 .../hoplog/mapreduce/GFOutputFormat.java        |  21 +++++--
 .../hoplog/mapreduce/HDFSSplitIterator.java     |  21 +++++--
 .../internal/hoplog/mapreduce/HoplogUtil.java   |  21 +++++--
 .../hoplog/mapreduce/RWSplitIterator.java       |  21 +++++--
 .../hoplog/mapreduce/StreamSplitIterator.java   |  21 +++++--
 .../org/apache/hadoop/io/SequenceFile.java      |  21 +++++--
 .../gemfire/cache/lucene/LuceneIndex.java       |  21 +++++--
 .../gemfire/cache/lucene/LuceneQuery.java       |  16 +++++
 .../cache/lucene/LuceneQueryFactory.java        |  16 +++++
 .../cache/lucene/LuceneQueryResults.java        |  16 +++++
 .../cache/lucene/LuceneResultStruct.java        |  16 +++++
 .../gemfire/cache/lucene/LuceneService.java     |  16 +++++
 .../cache/lucene/LuceneServiceFactory.java      |  16 +++++
 .../cache/lucene/LuceneServiceProvider.java     |  16 +++++
 .../cache/lucene/internal/LuceneIndexImpl.java  |  16 +++++
 .../lucene/internal/LuceneQueryFactoryImpl.java |  16 +++++
 .../cache/lucene/internal/LuceneQueryImpl.java  |  16 +++++
 .../lucene/internal/LuceneQueryResultsImpl.java |  16 +++++
 .../lucene/internal/LuceneResultStructImpl.java |  16 +++++
 .../internal/LuceneServiceFactoryImpl.java      |  16 +++++
 .../lucene/internal/LuceneServiceImpl.java      |  16 +++++
 .../operations/CloseCQOperationContext.java     |  21 +++++--
 .../operations/DestroyOperationContext.java     |  21 +++++--
 .../operations/ExecuteCQOperationContext.java   |  21 +++++--
 .../ExecuteFunctionOperationContext.java        |  21 +++++--
 .../GetDurableCQsOperationContext.java          |  21 +++++--
 .../cache/operations/GetOperationContext.java   |  21 +++++--
 .../operations/InterestOperationContext.java    |  21 +++++--
 .../gemfire/cache/operations/InterestType.java  |  21 +++++--
 .../operations/InvalidateOperationContext.java  |  21 +++++--
 .../cache/operations/KeyOperationContext.java   |  21 +++++--
 .../operations/KeySetOperationContext.java      |  21 +++++--
 .../operations/KeyValueOperationContext.java    |  21 +++++--
 .../cache/operations/OperationContext.java      |  21 +++++--
 .../operations/PutAllOperationContext.java      |  21 +++++--
 .../cache/operations/PutOperationContext.java   |  21 +++++--
 .../cache/operations/QueryOperationContext.java |  21 +++++--
 .../operations/RegionClearOperationContext.java |  21 +++++--
 .../RegionCreateOperationContext.java           |  21 +++++--
 .../RegionDestroyOperationContext.java          |  21 +++++--
 .../operations/RegionOperationContext.java      |  21 +++++--
 .../RegisterInterestOperationContext.java       |  21 +++++--
 .../operations/RemoveAllOperationContext.java   |  21 +++++--
 .../operations/StopCQOperationContext.java      |  21 +++++--
 .../UnregisterInterestOperationContext.java     |  21 +++++--
 .../internal/GetOperationContextImpl.java       |  16 +++++
 .../cache/partition/PartitionListener.java      |  20 ++++--
 .../partition/PartitionListenerAdapter.java     |  23 ++++---
 .../cache/partition/PartitionManager.java       |  20 ++++--
 .../cache/partition/PartitionMemberInfo.java    |  21 +++++--
 .../PartitionNotAvailableException.java         |  21 +++++--
 .../cache/partition/PartitionRebalanceInfo.java |  21 +++++--
 .../cache/partition/PartitionRegionHelper.java  |  20 ++++--
 .../cache/partition/PartitionRegionInfo.java    |  21 +++++--
 .../ConflictingPersistentDataException.java     |  21 +++++--
 .../persistence/PartitionOfflineException.java  |  21 +++++--
 .../gemfire/cache/persistence/PersistentID.java |  23 ++++---
 .../PersistentReplicatesOfflineException.java   |  21 +++++--
 .../persistence/RevokeFailedException.java      |  21 +++++--
 .../RevokedPersistentDataException.java         |  21 +++++--
 .../gemfire/cache/query/Aggregator.java         |  16 +++++
 .../cache/query/AmbiguousNameException.java     |  21 +++++--
 .../gemfire/cache/query/CqAttributes.java       |  21 +++++--
 .../cache/query/CqAttributesFactory.java        |  21 +++++--
 .../cache/query/CqAttributesMutator.java        |  21 +++++--
 .../gemfire/cache/query/CqClosedException.java  |  21 +++++--
 .../gemstone/gemfire/cache/query/CqEvent.java   |  21 +++++--
 .../gemfire/cache/query/CqException.java        |  21 +++++--
 .../gemfire/cache/query/CqExistsException.java  |  21 +++++--
 .../gemfire/cache/query/CqListener.java         |  21 +++++--
 .../gemstone/gemfire/cache/query/CqQuery.java   |  21 +++++--
 .../gemstone/gemfire/cache/query/CqResults.java |  21 +++++--
 .../cache/query/CqServiceStatistics.java        |  21 +++++--
 .../gemstone/gemfire/cache/query/CqState.java   |  21 +++++--
 .../gemfire/cache/query/CqStatistics.java       |  21 +++++--
 .../gemfire/cache/query/CqStatusListener.java   |  21 +++++--
 .../cache/query/FunctionDomainException.java    |  21 +++++--
 .../com/gemstone/gemfire/cache/query/Index.java |  21 +++++--
 .../cache/query/IndexCreationException.java     |  21 +++++--
 .../cache/query/IndexExistsException.java       |  21 +++++--
 .../cache/query/IndexInvalidException.java      |  21 +++++--
 .../cache/query/IndexMaintenanceException.java  |  21 +++++--
 .../cache/query/IndexNameConflictException.java |  21 +++++--
 .../gemfire/cache/query/IndexStatistics.java    |  21 +++++--
 .../gemstone/gemfire/cache/query/IndexType.java |  21 +++++--
 .../query/MultiIndexCreationException.java      |  16 +++++
 .../cache/query/NameNotFoundException.java      |  21 +++++--
 .../cache/query/NameResolutionException.java    |  21 +++++--
 .../query/ParameterCountInvalidException.java   |  21 +++++--
 .../com/gemstone/gemfire/cache/query/Query.java |  21 +++++--
 .../gemfire/cache/query/QueryException.java     |  21 +++++--
 .../query/QueryExecutionLowMemoryException.java |  21 +++++--
 .../query/QueryExecutionTimeoutException.java   |  21 +++++--
 .../cache/query/QueryInvalidException.java      |  21 +++++--
 .../query/QueryInvocationTargetException.java   |  21 +++++--
 .../gemfire/cache/query/QueryService.java       |  21 +++++--
 .../gemfire/cache/query/QueryStatistics.java    |  21 +++++--
 .../cache/query/RegionNotFoundException.java    |  21 +++++--
 .../gemfire/cache/query/SelectResults.java      |  21 +++++--
 .../gemstone/gemfire/cache/query/Struct.java    |  21 +++++--
 .../cache/query/TypeMismatchException.java      |  21 +++++--
 .../query/internal/AbstractCompiledValue.java   |  22 ++++---
 .../internal/AbstractGroupOrRangeJunction.java  |  21 +++++--
 .../cache/query/internal/AllGroupJunction.java  |  21 +++++--
 .../query/internal/AttributeDescriptor.java     |  22 ++++---
 .../gemfire/cache/query/internal/Bag.java       |  21 +++++--
 .../internal/CompiledAggregateFunction.java     |  16 +++++
 .../query/internal/CompiledBindArgument.java    |  22 ++++---
 .../query/internal/CompiledComparison.java      |  22 ++++---
 .../query/internal/CompiledConstruction.java    |  22 ++++---
 .../cache/query/internal/CompiledFunction.java  |  22 ++++---
 .../query/internal/CompiledGroupBySelect.java   |  16 +++++
 .../cache/query/internal/CompiledID.java        |  22 ++++---
 .../cache/query/internal/CompiledIn.java        |  22 ++++---
 .../query/internal/CompiledIndexOperation.java  |  22 ++++---
 .../query/internal/CompiledIteratorDef.java     |  21 +++++--
 .../cache/query/internal/CompiledJunction.java  |  22 ++++---
 .../cache/query/internal/CompiledLike.java      |  21 +++++--
 .../cache/query/internal/CompiledLiteral.java   |  22 ++++---
 .../cache/query/internal/CompiledNegation.java  |  22 ++++---
 .../cache/query/internal/CompiledOperation.java |  22 ++++---
 .../cache/query/internal/CompiledPath.java      |  22 ++++---
 .../cache/query/internal/CompiledRegion.java    |  22 ++++---
 .../cache/query/internal/CompiledSelect.java    |  22 ++++---
 .../query/internal/CompiledSortCriterion.java   |  21 +++++--
 .../query/internal/CompiledUnaryMinus.java      |  21 +++++--
 .../cache/query/internal/CompiledUndefined.java |  22 ++++---
 .../cache/query/internal/CompiledValue.java     |  22 ++++---
 .../query/internal/CompositeGroupJunction.java  |  21 +++++--
 .../gemfire/cache/query/internal/CqEntry.java   |  21 +++++--
 .../cache/query/internal/CqQueryVsdStats.java   |  21 +++++--
 .../cache/query/internal/CqStateImpl.java       |  22 ++++---
 .../internal/CumulativeNonDistinctResults.java  |  16 +++++
 .../cache/query/internal/DefaultQuery.java      |  22 ++++---
 .../query/internal/DefaultQueryService.java     |  22 ++++---
 .../cache/query/internal/ExecutionContext.java  |  22 ++++---
 .../gemfire/cache/query/internal/Filter.java    |  22 ++++---
 .../gemfire/cache/query/internal/Functions.java |  22 ++++---
 .../cache/query/internal/GroupJunction.java     |  21 +++++--
 .../cache/query/internal/HashingStrategy.java   |  21 +++++--
 .../gemfire/cache/query/internal/IndexInfo.java |  21 ++++---
 .../internal/IndexTrackingQueryObserver.java    |  28 +++++----
 .../cache/query/internal/IndexUpdater.java      |  21 +++++--
 .../gemfire/cache/query/internal/Indexable.java |  21 +++++--
 .../cache/query/internal/LinkedResultSet.java   |  21 +++++--
 .../cache/query/internal/LinkedStructSet.java   |  21 +++++--
 .../cache/query/internal/MapIndexable.java      |  16 +++++
 .../cache/query/internal/MethodDispatch.java    |  22 ++++---
 .../cache/query/internal/NWayMergeResults.java  |  16 +++++
 .../gemfire/cache/query/internal/Negatable.java |  22 ++++---
 .../gemfire/cache/query/internal/NullToken.java |  23 ++++---
 .../cache/query/internal/ObjectIntHashMap.java  |  21 +++++--
 .../cache/query/internal/OrderByComparator.java |  18 +++++-
 .../internal/OrderByComparatorUnmapped.java     |  16 +++++
 .../gemfire/cache/query/internal/Ordered.java   |  16 +++++
 .../cache/query/internal/OrganizedOperands.java |  21 +++++--
 .../cache/query/internal/PRQueryTraceInfo.java  |  22 ++++---
 .../gemfire/cache/query/internal/PathUtils.java |  22 ++++---
 .../gemfire/cache/query/internal/PlanInfo.java  |  22 ++++---
 .../cache/query/internal/ProxyQueryService.java |  21 +++++--
 .../gemfire/cache/query/internal/QCompiler.java |  22 ++++---
 .../gemfire/cache/query/internal/QRegion.java   |  21 +++++--
 .../gemfire/cache/query/internal/QScope.java    |  22 ++++---
 .../QueryExecutionCanceledException.java        |  21 +++++--
 .../query/internal/QueryExecutionContext.java   |  24 ++++---
 .../cache/query/internal/QueryExecutor.java     |  21 +++++--
 .../cache/query/internal/QueryMonitor.java      |  21 +++++--
 .../cache/query/internal/QueryObserver.java     |  22 ++++---
 .../query/internal/QueryObserverAdapter.java    |  22 ++++---
 .../query/internal/QueryObserverHolder.java     |  22 ++++---
 .../cache/query/internal/QueryUtils.java        |  21 +++++--
 .../cache/query/internal/RangeJunction.java     |  21 +++++--
 .../cache/query/internal/ResultsBag.java        |  16 +++++
 .../ResultsCollectionCopyOnReadWrapper.java     |  21 +++++--
 ...ResultsCollectionPdxDeserializerWrapper.java |  21 +++++--
 .../internal/ResultsCollectionWrapper.java      |  21 +++++--
 .../cache/query/internal/ResultsSet.java        |  21 +++++--
 .../cache/query/internal/RuntimeIterator.java   |  22 ++++---
 .../query/internal/SelectResultsComparator.java |  21 +++++--
 .../cache/query/internal/SortedResultSet.java   |  21 +++++--
 .../cache/query/internal/SortedResultsBag.java  |  16 +++++
 .../cache/query/internal/SortedStructBag.java   |  16 +++++
 .../cache/query/internal/SortedStructSet.java   |  21 +++++--
 .../gemfire/cache/query/internal/StructBag.java |  21 +++++--
 .../cache/query/internal/StructFields.java      |  16 +++++
 .../cache/query/internal/StructImpl.java        |  21 +++++--
 .../gemfire/cache/query/internal/StructSet.java |  21 +++++--
 .../gemfire/cache/query/internal/Support.java   |  22 ++++---
 .../gemfire/cache/query/internal/Undefined.java |  22 ++++---
 .../internal/aggregate/AbstractAggregator.java  |  16 +++++
 .../cache/query/internal/aggregate/Avg.java     |  16 +++++
 .../query/internal/aggregate/AvgBucketNode.java |  16 +++++
 .../query/internal/aggregate/AvgDistinct.java   |  16 +++++
 .../aggregate/AvgDistinctPRQueryNode.java       |  16 +++++
 .../internal/aggregate/AvgPRQueryNode.java      |  16 +++++
 .../cache/query/internal/aggregate/Count.java   |  16 +++++
 .../query/internal/aggregate/CountDistinct.java |  16 +++++
 .../aggregate/CountDistinctPRQueryNode.java     |  16 +++++
 .../internal/aggregate/CountPRQueryNode.java    |  16 +++++
 .../internal/aggregate/DistinctAggregator.java  |  16 +++++
 .../cache/query/internal/aggregate/MaxMin.java  |  16 +++++
 .../cache/query/internal/aggregate/Sum.java     |  16 +++++
 .../query/internal/aggregate/SumDistinct.java   |  16 +++++
 .../aggregate/SumDistinctPRQueryNode.java       |  16 +++++
 .../cache/query/internal/cq/ClientCQ.java       |  16 +++++
 .../cache/query/internal/cq/CqService.java      |  16 +++++
 .../query/internal/cq/CqServiceProvider.java    |  16 +++++
 .../query/internal/cq/InternalCqQuery.java      |  16 +++++
 .../query/internal/cq/MissingCqService.java     |  16 +++++
 .../internal/cq/MissingCqServiceStatistics.java |  16 +++++
 .../cache/query/internal/cq/ServerCQ.java       |  16 +++++
 .../query/internal/cq/spi/CqServiceFactory.java |  16 +++++
 .../query/internal/index/AbstractIndex.java     |  21 ++++---
 .../query/internal/index/AbstractMapIndex.java  |  21 +++++--
 .../internal/index/CompactMapRangeIndex.java    |  21 +++++--
 .../query/internal/index/CompactRangeIndex.java |  21 +++++--
 .../query/internal/index/DummyQRegion.java      |  21 +++++--
 .../index/FunctionalIndexCreationHelper.java    |  21 +++++--
 .../cache/query/internal/index/HashIndex.java   |  21 +++++--
 .../query/internal/index/HashIndexSet.java      |  21 +++++--
 .../query/internal/index/HashIndexStrategy.java |  21 +++++--
 .../query/internal/index/IMQException.java      |  21 +++++--
 .../internal/index/IndexConcurrentHashSet.java  |  21 +++++--
 .../query/internal/index/IndexCreationData.java |  21 +++++--
 .../internal/index/IndexCreationHelper.java     |  21 +++++--
 .../cache/query/internal/index/IndexData.java   |  21 +++++--
 .../query/internal/index/IndexElemArray.java    |  21 +++++--
 .../query/internal/index/IndexManager.java      |  21 +++++--
 .../query/internal/index/IndexProtocol.java     |  20 ++++--
 .../cache/query/internal/index/IndexStats.java  |  21 +++++--
 .../cache/query/internal/index/IndexStore.java  |  23 ++++---
 .../cache/query/internal/index/IndexUtils.java  |  21 +++++--
 .../index/IndexedExpressionEvaluator.java       |  21 +++++--
 .../query/internal/index/MapIndexStore.java     |  21 +++++--
 .../query/internal/index/MapRangeIndex.java     |  21 +++++--
 .../query/internal/index/MemoryIndexStore.java  |  21 +++++--
 .../query/internal/index/PartitionedIndex.java  |  21 +++++--
 .../query/internal/index/PrimaryKeyIndex.java   |  21 +++++--
 .../index/PrimaryKeyIndexCreationHelper.java    |  21 +++++--
 .../cache/query/internal/index/RangeIndex.java  |  21 +++++--
 .../query/internal/parse/ASTAggregateFunc.java  |  16 +++++
 .../cache/query/internal/parse/ASTAnd.java      |  21 +++++--
 .../query/internal/parse/ASTCombination.java    |  21 +++++--
 .../query/internal/parse/ASTCompareOp.java      |  21 +++++--
 .../query/internal/parse/ASTConstruction.java   |  21 +++++--
 .../query/internal/parse/ASTConversionExpr.java |  21 +++++--
 .../cache/query/internal/parse/ASTDummy.java    |  16 +++++
 .../cache/query/internal/parse/ASTGroupBy.java  |  21 +++++--
 .../cache/query/internal/parse/ASTHint.java     |  21 +++++--
 .../query/internal/parse/ASTHintIdentifier.java |  21 +++++--
 .../query/internal/parse/ASTIdentifier.java     |  21 +++++--
 .../cache/query/internal/parse/ASTImport.java   |  21 +++++--
 .../cache/query/internal/parse/ASTIn.java       |  21 +++++--
 .../query/internal/parse/ASTIteratorDef.java    |  21 +++++--
 .../cache/query/internal/parse/ASTLike.java     |  21 +++++--
 .../cache/query/internal/parse/ASTLimit.java    |  23 ++++---
 .../cache/query/internal/parse/ASTLiteral.java  |  21 +++++--
 .../internal/parse/ASTMethodInvocation.java     |  21 +++++--
 .../cache/query/internal/parse/ASTOr.java       |  21 +++++--
 .../cache/query/internal/parse/ASTOrderBy.java  |  21 +++++--
 .../query/internal/parse/ASTParameter.java      |  21 +++++--
 .../cache/query/internal/parse/ASTPostfix.java  |  21 +++++--
 .../query/internal/parse/ASTProjection.java     |  21 +++++--
 .../query/internal/parse/ASTRegionPath.java     |  21 +++++--
 .../cache/query/internal/parse/ASTSelect.java   |  21 +++++--
 .../query/internal/parse/ASTSortCriterion.java  |  21 +++++--
 .../cache/query/internal/parse/ASTTrace.java    |  21 +++++--
 .../cache/query/internal/parse/ASTType.java     |  21 +++++--
 .../cache/query/internal/parse/ASTTypeCast.java |  21 +++++--
 .../cache/query/internal/parse/ASTUnary.java    |  21 +++++--
 .../query/internal/parse/ASTUndefinedExpr.java  |  21 +++++--
 .../query/internal/parse/ASTUnsupported.java    |  21 +++++--
 .../cache/query/internal/parse/GemFireAST.java  |  21 +++++--
 .../cache/query/internal/parse/UtilParser.java  |  21 +++++--
 .../internal/types/CollectionTypeImpl.java      |  21 +++++--
 .../types/ExtendedNumericComparator.java        |  22 ++++---
 .../cache/query/internal/types/MapTypeImpl.java |  21 +++++--
 .../query/internal/types/NumericComparator.java |  22 ++++---
 .../query/internal/types/ObjectTypeImpl.java    |  21 +++++--
 .../query/internal/types/StructTypeImpl.java    |  21 +++++--
 .../internal/types/TemporalComparator.java      |  22 ++++---
 .../cache/query/internal/types/TypeUtils.java   |  22 ++++---
 .../query/internal/utils/LimitIterator.java     |  16 +++++
 .../cache/query/internal/utils/PDXUtils.java    |  16 +++++
 .../cache/query/types/CollectionType.java       |  21 +++++--
 .../gemfire/cache/query/types/MapType.java      |  21 +++++--
 .../gemfire/cache/query/types/ObjectType.java   |  21 +++++--
 .../gemfire/cache/query/types/StructType.java   |  21 +++++--
 .../gemfire/cache/server/CacheServer.java       |  21 +++++--
 .../cache/server/ClientSubscriptionConfig.java  |  21 +++++--
 .../gemfire/cache/server/ServerLoad.java        |  21 +++++--
 .../gemfire/cache/server/ServerLoadProbe.java   |  21 +++++--
 .../cache/server/ServerLoadProbeAdapter.java    |  21 +++++--
 .../gemfire/cache/server/ServerMetrics.java     |  21 +++++--
 .../server/internal/ConnectionCountProbe.java   |  21 +++++--
 .../cache/server/internal/LoadMonitor.java      |  21 +++++--
 .../server/internal/ServerMetricsImpl.java      |  21 +++++--
 .../cache/snapshot/CacheSnapshotService.java    |  21 +++++--
 .../cache/snapshot/RegionSnapshotService.java   |  21 +++++--
 .../gemfire/cache/snapshot/SnapshotFilter.java  |  21 +++++--
 .../cache/snapshot/SnapshotIterator.java        |  21 +++++--
 .../gemfire/cache/snapshot/SnapshotOptions.java |  21 +++++--
 .../gemfire/cache/snapshot/SnapshotReader.java  |  21 +++++--
 .../cache/util/BoundedLinkedHashMap.java        |  21 +++++--
 .../cache/util/CacheListenerAdapter.java        |  21 +++++--
 .../gemfire/cache/util/CacheWriterAdapter.java  |  21 +++++--
 .../gemfire/cache/util/CqListenerAdapter.java   |  21 +++++--
 .../gemstone/gemfire/cache/util/Gateway.java    |  16 +++++
 .../cache/util/GatewayConflictHelper.java       |  21 +++++--
 .../cache/util/GatewayConflictResolver.java     |  21 +++++--
 .../gemfire/cache/util/GatewayEvent.java        |  21 +++++--
 .../gemfire/cache/util/ObjectSizer.java         |  21 +++++--
 .../gemfire/cache/util/ObjectSizerImpl.java     |  16 +++++
 .../util/RegionMembershipListenerAdapter.java   |  21 +++++--
 .../cache/util/RegionRoleListenerAdapter.java   |  21 +++++--
 .../cache/util/TimestampedEntryEvent.java       |  21 +++++--
 .../cache/util/TransactionListenerAdapter.java  |  21 +++++--
 .../gemfire/cache/wan/EventSequenceID.java      |  21 +++++--
 .../gemfire/cache/wan/GatewayEventFilter.java   |  20 ++++--
 .../wan/GatewayEventSubstitutionFilter.java     |  21 +++++--
 .../gemfire/cache/wan/GatewayQueueEvent.java    |  21 +++++--
 .../gemfire/cache/wan/GatewayReceiver.java      |  20 ++++--
 .../cache/wan/GatewayReceiverFactory.java       |  20 ++++--
 .../gemfire/cache/wan/GatewaySender.java        |  20 ++++--
 .../gemfire/cache/wan/GatewaySenderFactory.java |  20 ++++--
 .../cache/wan/GatewayTransportFilter.java       |  20 ++++--
 .../compression/CompressionException.java       |  23 ++++---
 .../gemfire/compression/Compressor.java         |  23 ++++---
 .../gemfire/compression/SnappyCompressor.java   |  21 +++++--
 .../gemfire/distributed/AbstractLauncher.java   |  20 ++++--
 .../distributed/ClientSocketFactory.java        |  21 +++++--
 .../distributed/DistributedLockService.java     |  21 +++++--
 .../gemfire/distributed/DistributedMember.java  |  21 +++++--
 .../gemfire/distributed/DistributedSystem.java  |  21 +++++--
 .../DistributedSystemDisconnectedException.java |  23 ++++---
 .../distributed/DurableClientAttributes.java    |  21 +++++--
 .../distributed/FutureCancelledException.java   |  21 +++++--
 .../distributed/GatewayCancelledException.java  |  23 ++++---
 .../distributed/LeaseExpiredException.java      |  21 +++++--
 .../gemstone/gemfire/distributed/Locator.java   |  21 +++++--
 .../gemfire/distributed/LocatorLauncher.java    |  20 ++++--
 .../distributed/LockNotHeldException.java       |  21 +++++--
 .../LockServiceDestroyedException.java          |  21 +++++--
 .../distributed/OplogCancelledException.java    |  23 ++++---
 .../distributed/PoolCancelledException.java     |  23 ++++---
 .../com/gemstone/gemfire/distributed/Role.java  |  21 +++++--
 .../gemfire/distributed/ServerLauncher.java     |  20 ++++--
 .../TXManagerCancelledException.java            |  23 ++++---
 .../internal/AbstractDistributionConfig.java    |  21 +++++--
 .../distributed/internal/AdminMessageType.java  |  16 +++++
 .../internal/AtomicLongWithTerminalState.java   |  21 +++++--
 .../internal/CollectingReplyProcessor.java      |  21 +++++--
 .../distributed/internal/ConflationKey.java     |  21 +++++--
 .../gemfire/distributed/internal/DM.java        |  21 +++++--
 .../gemfire/distributed/internal/DMStats.java   |  21 +++++--
 .../gemfire/distributed/internal/DSClock.java   |  16 +++++
 .../internal/DirectReplyProcessor.java          |  21 +++++--
 .../distributed/internal/DistributedState.java  |  21 +++++--
 .../internal/DistributionAdvisee.java           |  21 +++++--
 .../internal/DistributionAdvisor.java           |  21 +++++--
 .../internal/DistributionChannel.java           |  21 +++++--
 .../internal/DistributionConfig.java            |  21 +++++--
 .../internal/DistributionConfigImpl.java        |  21 +++++--
 .../internal/DistributionConfigSnapshot.java    |  21 +++++--
 .../internal/DistributionException.java         |  21 +++++--
 .../internal/DistributionManager.java           |  21 +++++--
 .../internal/DistributionManagerConfig.java     |  21 +++++--
 .../internal/DistributionMessage.java           |  21 +++++--
 .../internal/DistributionMessageObserver.java   |  23 ++++---
 .../distributed/internal/DistributionStats.java |  21 +++++--
 .../distributed/internal/FlowControlParams.java |  21 +++++--
 .../internal/ForceDisconnectOperation.java      |  21 +++++--
 .../FunctionExecutionPooledExecutor.java        |  21 +++++--
 .../distributed/internal/HealthMonitor.java     |  21 +++++--
 .../distributed/internal/HealthMonitorImpl.java |  21 +++++--
 .../internal/HighPriorityAckedMessage.java      |  21 +++++--
 .../HighPriorityDistributionMessage.java        |  21 +++++--
 .../distributed/internal/IgnoredByManager.java  |  16 +++++
 .../internal/InternalDistributedSystem.java     |  19 ++++--
 .../distributed/internal/InternalLocator.java   |  21 +++++--
 .../internal/LocatorLoadSnapshot.java           |  20 ++++--
 .../distributed/internal/LocatorStats.java      |  21 +++++--
 .../internal/LonerDistributionManager.java      |  21 +++++--
 .../gemfire/distributed/internal/MQueue.java    |  16 +++++
 .../internal/MembershipListener.java            |  21 +++++--
 .../distributed/internal/MessageFactory.java    |  21 +++++--
 .../distributed/internal/MessageWithReply.java  |  21 +++++--
 .../internal/OverflowQueueWithDMStats.java      |  21 +++++--
 .../distributed/internal/PoolStatHelper.java    |  21 +++++--
 .../internal/PooledDistributionMessage.java     |  21 +++++--
 .../internal/PooledExecutorWithDMStats.java     |  21 +++++--
 .../distributed/internal/ProcessorKeeper21.java |  21 +++++--
 .../distributed/internal/ProductUseLog.java     |  21 +++++--
 .../distributed/internal/ProfileListener.java   |  21 +++++--
 .../distributed/internal/QueueStatHelper.java   |  21 +++++--
 .../internal/ReliableReplyException.java        |  21 +++++--
 .../internal/ReliableReplyProcessor21.java      |  21 +++++--
 .../distributed/internal/ReplyException.java    |  21 +++++--
 .../distributed/internal/ReplyMessage.java      |  21 +++++--
 .../distributed/internal/ReplyProcessor21.java  |  21 +++++--
 .../distributed/internal/ReplySender.java       |  21 +++++--
 .../distributed/internal/ResourceEvent.java     |  20 ++++--
 .../internal/ResourceEventsListener.java        |  20 ++++--
 .../internal/RuntimeDistributionConfigImpl.java |  21 +++++--
 .../internal/SerialAckedMessage.java            |  21 +++++--
 .../internal/SerialDistributionMessage.java     |  21 +++++--
 .../SerialQueuedExecutorWithDMStats.java        |  21 +++++--
 .../distributed/internal/ServerLocation.java    |  21 +++++--
 .../distributed/internal/ServerLocator.java     |  21 +++++--
 .../internal/SharedConfiguration.java           |  21 +++++--
 .../distributed/internal/ShutdownMessage.java   |  21 +++++--
 .../gemfire/distributed/internal/Sizeable.java  |  16 +++++
 .../distributed/internal/SizeableRunnable.java  |  21 +++++--
 .../distributed/internal/StartupMessage.java    |  21 +++++--
 .../internal/StartupMessageData.java            |  21 +++++--
 .../internal/StartupMessageReplyProcessor.java  |  21 +++++--
 .../distributed/internal/StartupOperation.java  |  21 +++++--
 .../internal/StartupResponseMessage.java        |  21 +++++--
 .../StartupResponseWithVersionMessage.java      |  23 ++++---
 .../internal/ThrottledMemQueueStatHelper.java   |  21 +++++--
 .../internal/ThrottledQueueStatHelper.java      |  21 +++++--
 .../ThrottlingMemLinkedQueueWithDMStats.java    |  21 +++++--
 .../internal/WaitForViewInstallation.java       |  21 +++++--
 .../internal/WanLocatorDiscoverer.java          |  16 +++++
 .../deadlock/DLockDependencyMonitor.java        |  21 +++++--
 .../internal/deadlock/DeadlockDetector.java     |  21 +++++--
 .../internal/deadlock/Dependency.java           |  21 +++++--
 .../internal/deadlock/DependencyGraph.java      |  21 +++++--
 .../internal/deadlock/DependencyMonitor.java    |  21 +++++--
 .../deadlock/DependencyMonitorManager.java      |  21 +++++--
 .../deadlock/GemFireDeadlockDetector.java       |  21 +++++--
 .../internal/deadlock/LocalLockInfo.java        |  23 ++++---
 .../internal/deadlock/LocalThread.java          |  23 ++++---
 .../deadlock/MessageDependencyMonitor.java      |  21 +++++--
 .../internal/deadlock/ThreadReference.java      |  21 +++++--
 .../internal/deadlock/UnsafeThreadLocal.java    |  21 +++++--
 .../internal/direct/DirectChannel.java          |  21 +++++--
 .../internal/direct/MissingStubException.java   |  21 +++++--
 .../internal/locks/Collaboration.java           |  21 +++++--
 .../distributed/internal/locks/DLockBatch.java  |  21 +++++--
 .../internal/locks/DLockBatchId.java            |  21 +++++--
 .../internal/locks/DLockGrantor.java            |  21 +++++--
 .../locks/DLockLessorDepartureHandler.java      |  21 +++++--
 .../internal/locks/DLockQueryProcessor.java     |  21 +++++--
 .../locks/DLockRecoverGrantorProcessor.java     |  21 +++++--
 .../internal/locks/DLockReleaseProcessor.java   |  21 +++++--
 .../internal/locks/DLockRemoteToken.java        |  21 +++++--
 .../internal/locks/DLockRequestProcessor.java   |  21 +++++--
 .../internal/locks/DLockService.java            |  21 +++++--
 .../distributed/internal/locks/DLockStats.java  |  21 +++++--
 .../distributed/internal/locks/DLockToken.java  |  21 +++++--
 .../internal/locks/DeposeGrantorProcessor.java  |  21 +++++--
 .../internal/locks/DistributedLockStats.java    |  21 +++++--
 .../internal/locks/DistributedMemberLock.java   |  21 +++++--
 .../internal/locks/DummyDLockStats.java         |  21 +++++--
 .../internal/locks/ElderInitProcessor.java      |  21 +++++--
 .../distributed/internal/locks/ElderState.java  |  21 +++++--
 .../distributed/internal/locks/GrantorInfo.java |  21 +++++--
 .../internal/locks/GrantorRequestProcessor.java |  21 +++++--
 .../locks/LockGrantorDestroyedException.java    |  21 +++++--
 .../internal/locks/LockGrantorId.java           |  21 +++++--
 .../locks/NonGrantorDestroyedProcessor.java     |  21 +++++--
 .../internal/locks/RemoteThread.java            |  21 +++++--
 .../DistributedMembershipListener.java          |  21 +++++--
 .../membership/InternalDistributedMember.java   |  21 +++++--
 .../internal/membership/InternalRole.java       |  21 +++++--
 .../internal/membership/MemberAttributes.java   |  21 +++++--
 .../internal/membership/MemberFactory.java      |  21 +++++--
 .../internal/membership/MemberServices.java     |  21 +++++--
 .../internal/membership/MembershipManager.java  |  21 +++++--
 .../internal/membership/MembershipTestHook.java |  21 +++++--
 .../internal/membership/NetMember.java          |  21 +++++--
 .../internal/membership/NetView.java            |  21 +++++--
 .../internal/membership/QuorumChecker.java      |  21 +++++--
 .../membership/jgroup/GFJGBasicAdapter.java     |  16 +++++
 .../membership/jgroup/GFJGPeerAdapter.java      |  16 +++++
 .../membership/jgroup/JGroupMember.java         |  21 +++++--
 .../membership/jgroup/JGroupMemberFactory.java  |  21 +++++--
 .../jgroup/JGroupMembershipManager.java         |  21 +++++--
 .../internal/membership/jgroup/LocatorImpl.java |  16 +++++
 .../membership/jgroup/QuorumCheckerImpl.java    |  21 +++++--
 .../internal/membership/jgroup/ViewMessage.java |  21 +++++--
 .../internal/streaming/StreamingOperation.java  |  21 +++++--
 .../internal/tcpserver/InfoRequest.java         |  20 ++++--
 .../internal/tcpserver/InfoResponse.java        |  20 ++++--
 .../internal/tcpserver/ShutdownRequest.java     |  20 ++++--
 .../internal/tcpserver/ShutdownResponse.java    |  20 ++++--
 .../internal/tcpserver/TcpClient.java           |  16 +++++
 .../internal/tcpserver/TcpHandler.java          |  16 +++++
 .../internal/tcpserver/TcpServer.java           |  16 +++++
 .../internal/tcpserver/VersionRequest.java      |  16 +++++
 .../internal/tcpserver/VersionResponse.java     |  16 +++++
 .../unsafe/RegisterSignalHandlerSupport.java    |  20 ++++--
 .../gemstone/gemfire/i18n/LogWriterI18n.java    |  21 +++++--
 .../com/gemstone/gemfire/i18n/StringIdImpl.java |  21 +++++--
 .../gemfire/internal/AbstractConfig.java        |  21 +++++--
 .../internal/AbstractStatisticsFactory.java     |  21 +++++--
 .../gemfire/internal/ArchiveSplitter.java       |  21 +++++--
 .../com/gemstone/gemfire/internal/Assert.java   |  21 +++++--
 .../gemfire/internal/AvailablePort.java         |  21 +++++--
 .../com/gemstone/gemfire/internal/Banner.java   |  49 ++++++++++-----
 .../gemfire/internal/ByteArrayDataInput.java    |  21 +++++--
 .../internal/ByteBufferOutputStream.java        |  21 +++++--
 .../gemfire/internal/ByteBufferWriter.java      |  16 +++++
 .../gemfire/internal/ClassLoadUtil.java         |  21 +++++--
 .../gemfire/internal/ClassPathLoader.java       |  21 +++++--
 .../com/gemstone/gemfire/internal/Config.java   |  21 +++++--
 .../gemstone/gemfire/internal/ConfigSource.java |  21 +++++--
 .../gemfire/internal/CopyOnWriteHashSet.java    |  21 +++++--
 .../com/gemstone/gemfire/internal/DSCODE.java   |  21 +++++--
 .../gemstone/gemfire/internal/DSFIDFactory.java |  21 +++++--
 .../internal/DSFIDNotFoundException.java        |  21 +++++--
 .../internal/DataSerializableFixedID.java       |  21 +++++--
 .../gemfire/internal/DistributionLocator.java   |  21 +++++--
 .../internal/DummyStatisticsFactory.java        |  21 +++++--
 .../gemfire/internal/DummyStatisticsImpl.java   |  21 +++++--
 .../gemfire/internal/ExternalizableDSFID.java   |  21 +++++--
 .../com/gemstone/gemfire/internal/FileUtil.java |  21 +++++--
 .../gemfire/internal/GemFireStatSampler.java    |  21 +++++--
 .../gemfire/internal/GemFireUtilLauncher.java   |  21 +++++--
 .../gemfire/internal/GemFireVersion.java        |  21 +++++--
 .../internal/GfeConsoleReaderFactory.java       |  20 ++++--
 .../gemfire/internal/HeapDataOutputStream.java  |  21 +++++--
 .../gemfire/internal/HistogramStats.java        |  21 +++++--
 .../gemfire/internal/HostStatHelper.java        |  21 +++++--
 .../gemfire/internal/HostStatSampler.java       |  21 +++++--
 .../InsufficientDiskSpaceException.java         |  21 +++++--
 .../internal/InternalDataSerializer.java        |  21 +++++--
 .../gemfire/internal/InternalEntity.java        |  16 +++++
 .../gemfire/internal/InternalInstantiator.java  |  21 +++++--
 .../InternalStatisticsDisabledException.java    |  21 +++++--
 .../gemfire/internal/JarClassLoader.java        |  20 ++++--
 .../gemstone/gemfire/internal/JarDeployer.java  |  20 ++++--
 .../gemfire/internal/LinuxProcFsStatistics.java |  21 +++++--
 .../gemfire/internal/LinuxProcessStats.java     |  21 +++++--
 .../gemfire/internal/LinuxSystemStats.java      |  21 +++++--
 .../gemfire/internal/LocalStatListener.java     |  21 +++++--
 .../internal/LocalStatisticsFactory.java        |  21 +++++--
 .../gemfire/internal/LocalStatisticsImpl.java   |  21 +++++--
 .../gemstone/gemfire/internal/ManagerInfo.java  |  20 ++++--
 .../gemfire/internal/MigrationClient.java       |  21 +++++--
 .../gemfire/internal/MigrationServer.java       |  21 +++++--
 .../gemstone/gemfire/internal/NanoTimer.java    |  21 +++++--
 .../gemfire/internal/NullDataOutputStream.java  |  21 +++++--
 .../gemstone/gemfire/internal/OSProcess.java    |  19 ++++--
 .../gemfire/internal/OSXProcessStats.java       |  21 +++++--
 .../gemfire/internal/OSXSystemStats.java        |  21 +++++--
 .../gemfire/internal/ObjIdConcurrentMap.java    |  21 +++++--
 .../com/gemstone/gemfire/internal/ObjIdMap.java |  21 +++++--
 .../internal/ObjToByteArraySerializer.java      |  21 +++++--
 .../gemfire/internal/OneTaskOnlyExecutor.java   |  21 +++++--
 .../gemfire/internal/OsStatisticsFactory.java   |  21 +++++--
 .../gemfire/internal/PdxSerializerObject.java   |  21 +++++--
 .../gemfire/internal/ProcessOutputReader.java   |  21 +++++--
 .../gemstone/gemfire/internal/ProcessStats.java |  21 +++++--
 .../gemstone/gemfire/internal/PureJavaMode.java |  21 +++++--
 ...cheduledThreadPoolExecutorWithKeepAlive.java |  21 +++++--
 .../com/gemstone/gemfire/internal/Sendable.java |  18 +++++-
 .../gemfire/internal/SerializationVersions.java |  21 +++++--
 .../com/gemstone/gemfire/internal/SetUtils.java |  20 ++++--
 .../gemfire/internal/SharedLibrary.java         |  21 +++++--
 .../gemfire/internal/SimpleStatSampler.java     |  21 +++++--
 .../com/gemstone/gemfire/internal/SmHelper.java |  21 +++++--
 .../gemstone/gemfire/internal/SocketCloser.java |  16 +++++
 .../gemfire/internal/SocketCreator.java         |  21 +++++--
 .../gemfire/internal/SolarisProcessStats.java   |  21 +++++--
 .../gemfire/internal/SolarisSystemStats.java    |  21 +++++--
 .../gemfire/internal/StatArchiveFormat.java     |  21 +++++--
 .../gemfire/internal/StatArchiveReader.java     |  21 +++++--
 .../gemfire/internal/StatArchiveWriter.java     |  21 +++++--
 .../gemfire/internal/StatSamplerStats.java      |  21 +++++--
 .../internal/StatisticDescriptorImpl.java       |  21 +++++--
 .../gemfire/internal/StatisticsImpl.java        |  21 +++++--
 .../gemfire/internal/StatisticsManager.java     |  21 +++++--
 .../internal/StatisticsTypeFactoryImpl.java     |  21 +++++--
 .../gemfire/internal/StatisticsTypeImpl.java    |  21 +++++--
 .../gemfire/internal/StatisticsTypeXml.java     |  21 +++++--
 .../gemstone/gemfire/internal/SystemAdmin.java  |  21 +++++--
 .../gemfire/internal/SystemFailureTestHook.java |  21 +++++--
 .../gemstone/gemfire/internal/SystemTimer.java  |  21 +++++--
 .../gemfire/internal/UniqueIdGenerator.java     |  21 +++++--
 .../com/gemstone/gemfire/internal/VMStats.java  |  21 +++++--
 .../gemfire/internal/VMStatsContract.java       |  21 +++++--
 .../internal/VMStatsContractFactory.java        |  21 +++++--
 .../com/gemstone/gemfire/internal/Version.java  |  21 +++++--
 .../internal/VersionedDataInputStream.java      |  21 +++++--
 .../internal/VersionedDataOutputStream.java     |  21 +++++--
 .../internal/VersionedDataSerializable.java     |  16 +++++
 .../gemfire/internal/VersionedDataStream.java   |  21 +++++--
 .../gemfire/internal/VersionedObjectInput.java  |  21 +++++--
 .../gemfire/internal/VersionedObjectOutput.java |  21 +++++--
 .../gemfire/internal/WindowsProcessStats.java   |  21 +++++--
 .../gemfire/internal/WindowsSystemStats.java    |  21 +++++--
 .../internal/admin/AdminBridgeServer.java       |  16 +++++
 .../gemstone/gemfire/internal/admin/Alert.java  |  21 +++++--
 .../gemfire/internal/admin/AlertListener.java   |  21 +++++--
 .../gemfire/internal/admin/ApplicationVM.java   |  21 +++++--
 .../gemfire/internal/admin/CacheCollector.java  |  21 +++++--
 .../gemfire/internal/admin/CacheInfo.java       |  21 +++++--
 .../gemfire/internal/admin/CacheSnapshot.java   |  21 +++++--
 .../admin/ClientHealthMonitoringRegion.java     |  21 +++++--
 .../internal/admin/ClientMembershipMessage.java |  21 +++++--
 .../internal/admin/ClientStatsManager.java      |  21 +++++--
 .../internal/admin/CompoundEntrySnapshot.java   |  21 +++++--
 .../internal/admin/CompoundRegionSnapshot.java  |  21 +++++--
 .../gemfire/internal/admin/DLockInfo.java       |  21 +++++--
 .../gemfire/internal/admin/EntrySnapshot.java   |  21 +++++--
 .../gemfire/internal/admin/EntryValueNode.java  |  23 ++++---
 .../gemfire/internal/admin/GemFireVM.java       |  21 +++++--
 .../gemfire/internal/admin/GfManagerAgent.java  |  21 +++++--
 .../internal/admin/GfManagerAgentConfig.java    |  21 +++++--
 .../internal/admin/GfManagerAgentFactory.java   |  21 +++++--
 .../gemfire/internal/admin/GfObject.java        |  21 +++++--
 .../gemfire/internal/admin/HealthListener.java  |  21 +++++--
 .../internal/admin/JoinLeaveListener.java       |  21 +++++--
 .../gemfire/internal/admin/ListenerIdMap.java   |  21 +++++--
 .../gemfire/internal/admin/RegionSnapshot.java  |  21 +++++--
 .../gemfire/internal/admin/SSLConfig.java       |  21 +++++--
 .../gemfire/internal/admin/SnapshotClient.java  |  21 +++++--
 .../gemstone/gemfire/internal/admin/Stat.java   |  21 +++++--
 .../gemfire/internal/admin/StatAlert.java       |  21 +++++--
 .../internal/admin/StatAlertDefinition.java     |  21 +++++--
 .../internal/admin/StatAlertsManager.java       |  21 +++++--
 .../gemfire/internal/admin/StatListener.java    |  21 +++++--
 .../gemfire/internal/admin/StatResource.java    |  21 +++++--
 .../gemfire/internal/admin/TransportConfig.java |  21 +++++--
 .../admin/remote/AddHealthListenerRequest.java  |  21 +++++--
 .../admin/remote/AddHealthListenerResponse.java |  21 +++++--
 .../admin/remote/AddStatListenerRequest.java    |  21 +++++--
 .../admin/remote/AddStatListenerResponse.java   |  21 +++++--
 .../remote/AdminConsoleDisconnectMessage.java   |  21 +++++--
 .../admin/remote/AdminConsoleMessage.java       |  21 +++++--
 .../admin/remote/AdminFailureResponse.java      |  21 +++++--
 .../remote/AdminMultipleReplyProcessor.java     |  21 +++++--
 .../internal/admin/remote/AdminRegion.java      |  21 +++++--
 .../admin/remote/AdminReplyProcessor.java       |  21 +++++--
 .../internal/admin/remote/AdminRequest.java     |  21 +++++--
 .../internal/admin/remote/AdminResponse.java    |  21 +++++--
 .../internal/admin/remote/AdminWaiters.java     |  21 +++++--
 .../admin/remote/AlertLevelChangeMessage.java   |  21 +++++--
 .../admin/remote/AlertListenerMessage.java      |  21 +++++--
 .../admin/remote/AlertsNotificationMessage.java |  21 +++++--
 .../admin/remote/AppCacheSnapshotMessage.java   |  21 +++++--
 .../admin/remote/BridgeServerRequest.java       |  21 +++++--
 .../admin/remote/BridgeServerResponse.java      |  21 +++++--
 .../admin/remote/CacheConfigRequest.java        |  21 +++++--
 .../admin/remote/CacheConfigResponse.java       |  21 +++++--
 .../internal/admin/remote/CacheDisplay.java     |  21 +++++--
 .../internal/admin/remote/CacheInfoRequest.java |  21 +++++--
 .../admin/remote/CacheInfoResponse.java         |  21 +++++--
 .../admin/remote/CancelStatListenerRequest.java |  21 +++++--
 .../remote/CancelStatListenerResponse.java      |  21 +++++--
 .../internal/admin/remote/Cancellable.java      |  21 +++++--
 .../admin/remote/CancellationMessage.java       |  23 ++++---
 .../admin/remote/CancellationRegistry.java      |  23 ++++---
 .../remote/ChangeRefreshIntervalMessage.java    |  21 +++++--
 .../internal/admin/remote/CliLegacyMessage.java |  16 +++++
 .../admin/remote/ClientHealthStats.java         |  21 +++++--
 .../internal/admin/remote/CompactRequest.java   |  21 +++++--
 .../internal/admin/remote/CompactResponse.java  |  21 +++++--
 .../admin/remote/DestroyEntryMessage.java       |  23 ++++---
 .../admin/remote/DestroyRegionMessage.java      |  23 ++++---
 .../admin/remote/DistributionLocatorId.java     |  21 +++++--
 .../internal/admin/remote/DummyEntry.java       |  21 +++++--
 .../admin/remote/DurableClientInfoRequest.java  |  20 ++++--
 .../admin/remote/DurableClientInfoResponse.java |  20 ++++--
 .../admin/remote/EntryValueNodeImpl.java        |  23 ++++---
 .../admin/remote/FetchDistLockInfoRequest.java  |  21 +++++--
 .../admin/remote/FetchDistLockInfoResponse.java |  21 +++++--
 .../remote/FetchHealthDiagnosisRequest.java     |  21 +++++--
 .../remote/FetchHealthDiagnosisResponse.java    |  21 +++++--
 .../internal/admin/remote/FetchHostRequest.java |  21 +++++--
 .../admin/remote/FetchHostResponse.java         |  21 +++++--
 .../remote/FetchResourceAttributesRequest.java  |  21 +++++--
 .../remote/FetchResourceAttributesResponse.java |  21 +++++--
 .../admin/remote/FetchStatsRequest.java         |  21 +++++--
 .../admin/remote/FetchStatsResponse.java        |  20 ++++--
 .../admin/remote/FetchSysCfgRequest.java        |  21 +++++--
 .../admin/remote/FetchSysCfgResponse.java       |  21 +++++--
 .../remote/FlushAppCacheSnapshotMessage.java    |  21 +++++--
 .../admin/remote/HealthListenerMessage.java     |  21 +++++--
 .../remote/InspectionClasspathManager.java      |  21 +++++--
 .../admin/remote/LicenseInfoRequest.java        |  21 +++++--
 .../admin/remote/LicenseInfoResponse.java       |  21 +++++--
 .../remote/MissingPersistentIDsRequest.java     |  21 +++++--
 .../remote/MissingPersistentIDsResponse.java    |  21 +++++--
 .../admin/remote/ObjectDetailsRequest.java      |  21 +++++--
 .../admin/remote/ObjectDetailsResponse.java     |  21 +++++--
 .../admin/remote/ObjectNamesRequest.java        |  21 +++++--
 .../admin/remote/ObjectNamesResponse.java       |  21 +++++--
 .../PrepareRevokePersistentIDRequest.java       |  21 +++++--
 .../remote/RefreshMemberSnapshotRequest.java    |  21 +++++--
 .../remote/RefreshMemberSnapshotResponse.java   |  21 +++++--
 .../admin/remote/RegionAdminMessage.java        |  21 +++++--
 .../admin/remote/RegionAdminRequest.java        |  21 +++++--
 .../admin/remote/RegionAttributesRequest.java   |  21 +++++--
 .../admin/remote/RegionAttributesResponse.java  |  21 +++++--
 .../internal/admin/remote/RegionRequest.java    |  21 +++++--
 .../internal/admin/remote/RegionResponse.java   |  21 +++++--
 .../admin/remote/RegionSizeRequest.java         |  21 +++++--
 .../admin/remote/RegionSizeResponse.java        |  21 +++++--
 .../admin/remote/RegionStatisticsRequest.java   |  21 +++++--
 .../admin/remote/RegionStatisticsResponse.java  |  21 +++++--
 .../remote/RegionSubRegionSizeRequest.java      |  20 ++++--
 .../remote/RegionSubRegionsSizeResponse.java    |  20 ++++--
 .../internal/admin/remote/RemoteAlert.java      |  21 +++++--
 .../admin/remote/RemoteApplicationVM.java       |  21 +++++--
 .../admin/remote/RemoteBridgeServer.java        |  21 +++++--
 .../internal/admin/remote/RemoteCacheInfo.java  |  21 +++++--
 .../admin/remote/RemoteCacheStatistics.java     |  21 +++++--
 .../internal/admin/remote/RemoteDLockInfo.java  |  21 +++++--
 .../admin/remote/RemoteEntrySnapshot.java       |  21 +++++--
 .../internal/admin/remote/RemoteGemFireVM.java  |  21 +++++--
 .../admin/remote/RemoteGfManagerAgent.java      |  21 +++++--
 .../internal/admin/remote/RemoteObjectName.java |  21 +++++--
 .../admin/remote/RemoteRegionAttributes.java    |  21 +++++--
 .../admin/remote/RemoteRegionSnapshot.java      |  21 +++++--
 .../internal/admin/remote/RemoteStat.java       |  21 +++++--
 .../admin/remote/RemoteStatResource.java        |  21 +++++--
 .../admin/remote/RemoteTransportConfig.java     |  21 +++++--
 .../remote/RemoveHealthListenerRequest.java     |  21 +++++--
 .../remote/RemoveHealthListenerResponse.java    |  21 +++++--
 .../admin/remote/ResetHealthStatusRequest.java  |  21 +++++--
 .../admin/remote/ResetHealthStatusResponse.java |  21 +++++--
 .../admin/remote/RevokePersistentIDRequest.java |  21 +++++--
 .../remote/RevokePersistentIDResponse.java      |  21 +++++--
 .../admin/remote/RootRegionRequest.java         |  21 +++++--
 .../admin/remote/RootRegionResponse.java        |  21 +++++--
 .../remote/ShutdownAllGatewayHubsRequest.java   |  16 +++++
 .../admin/remote/ShutdownAllRequest.java        |  21 +++++--
 .../admin/remote/ShutdownAllResponse.java       |  21 +++++--
 .../admin/remote/SnapshotResultMessage.java     |  21 +++++--
 .../remote/StatAlertsManagerAssignMessage.java  |  21 +++++--
 .../admin/remote/StatListenerMessage.java       |  21 +++++--
 .../admin/remote/StoreSysCfgRequest.java        |  21 +++++--
 .../admin/remote/StoreSysCfgResponse.java       |  21 +++++--
 .../internal/admin/remote/SubRegionRequest.java |  21 +++++--
 .../admin/remote/SubRegionResponse.java         |  21 +++++--
 .../internal/admin/remote/TailLogRequest.java   |  21 +++++--
 .../internal/admin/remote/TailLogResponse.java  |  21 +++++--
 .../remote/UpdateAlertDefinitionMessage.java    |  21 +++++--
 .../admin/remote/VersionInfoRequest.java        |  21 +++++--
 .../admin/remote/VersionInfoResponse.java       |  21 +++++--
 .../admin/remote/VersionMismatchAlert.java      |  21 +++++--
 .../admin/statalerts/BaseDecoratorImpl.java     |  21 +++++--
 .../statalerts/DummyStatisticInfoImpl.java      |  21 +++++--
 .../admin/statalerts/FunctionDecoratorImpl.java |  21 +++++--
 .../admin/statalerts/FunctionHelper.java        |  20 ++++--
 .../statalerts/GaugeThresholdDecoratorImpl.java |  21 +++++--
 .../statalerts/MultiAttrDefinitionImpl.java     |  21 +++++--
 .../NumberThresholdDecoratorImpl.java           |  21 +++++--
 .../statalerts/SingleAttrDefinitionImpl.java    |  21 +++++--
 .../admin/statalerts/StatisticInfo.java         |  21 +++++--
 .../admin/statalerts/StatisticInfoImpl.java     |  21 +++++--
 .../cache/AbstractBucketRegionQueue.java        |  21 +++++--
 .../internal/cache/AbstractCacheServer.java     |  21 +++++--
 .../cache/AbstractDiskLRURegionEntry.java       |  21 +++++--
 .../internal/cache/AbstractDiskRegion.java      |  21 +++++--
 .../internal/cache/AbstractDiskRegionEntry.java |  21 +++++--
 .../internal/cache/AbstractLRURegionEntry.java  |  21 +++++--
 .../internal/cache/AbstractLRURegionMap.java    |  21 +++++--
 .../cache/AbstractOplogDiskRegionEntry.java     |  21 +++++--
 .../gemfire/internal/cache/AbstractRegion.java  |  21 +++++--
 .../internal/cache/AbstractRegionEntry.java     |  21 +++++--
 .../internal/cache/AbstractRegionMap.java       |  21 +++++--
 .../internal/cache/AbstractUpdateOperation.java |  21 +++++--
 .../gemfire/internal/cache/AcceptHelper.java    |  21 +++++--
 .../cache/AddCacheServerProfileMessage.java     |  21 +++++--
 .../gemfire/internal/cache/BackupLock.java      |  21 +++++--
 .../gemfire/internal/cache/BucketAdvisor.java   |  21 +++++--
 .../gemfire/internal/cache/BucketDump.java      |  21 +++++--
 .../internal/cache/BucketNotFoundException.java |  21 +++++--
 .../cache/BucketPersistenceAdvisor.java         |  21 +++++--
 .../gemfire/internal/cache/BucketRegion.java    |  21 +++++--
 .../internal/cache/BucketRegionEvictior.java    |  21 +++++--
 .../internal/cache/BucketRegionQueue.java       |  21 +++++--
 .../internal/cache/BucketServerLocation.java    |  20 ++++--
 .../internal/cache/BucketServerLocation66.java  |  20 ++++--
 .../cache/BytesAndBitsForCompactor.java         |  21 +++++--
 .../internal/cache/CacheClientStatus.java       |  21 +++++--
 .../gemfire/internal/cache/CacheConfig.java     |  21 +++++--
 .../cache/CacheDistributionAdvisee.java         |  21 +++++--
 .../cache/CacheDistributionAdvisor.java         |  21 +++++--
 .../internal/cache/CacheLifecycleListener.java  |  21 +++++--
 .../gemfire/internal/cache/CacheObserver.java   |  21 +++++--
 .../internal/cache/CacheObserverAdapter.java    |  21 +++++--
 .../internal/cache/CacheObserverHolder.java     |  21 +++++--
 .../gemfire/internal/cache/CachePerfStats.java  |  21 +++++--
 .../internal/cache/CacheServerAdvisor.java      |  21 +++++--
 .../gemfire/internal/cache/CacheServerImpl.java |  21 +++++--
 .../internal/cache/CacheServerLauncher.java     |  21 +++++--
 .../internal/cache/CacheStatisticsImpl.java     |  21 +++++--
 .../internal/cache/CachedDeserializable.java    |  21 +++++--
 .../cache/CachedDeserializableFactory.java      |  21 +++++--
 .../internal/cache/ClientRegionEventImpl.java   |  21 +++++--
 .../internal/cache/ClientServerObserver.java    |  21 +++++--
 .../cache/ClientServerObserverAdapter.java      |  21 +++++--
 .../cache/ClientServerObserverHolder.java       |  21 +++++--
 .../cache/ClientSubscriptionConfigImpl.java     |  21 +++++--
 .../internal/cache/CloseCacheMessage.java       |  21 +++++--
 .../cache/ClusterConfigurationLoader.java       |  16 +++++
 .../internal/cache/ColocationHelper.java        |  20 ++++--
 .../internal/cache/CommitReplyException.java    |  21 +++++--
 .../internal/cache/CompactableOplog.java        |  21 +++++--
 .../gemfire/internal/cache/Conflatable.java     |  21 +++++--
 .../internal/cache/ControllerAdvisor.java       |  21 +++++--
 .../internal/cache/CountingDataInputStream.java |  21 +++++--
 .../internal/cache/CreateRegionProcessor.java   |  21 +++++--
 .../internal/cache/CustomEntryExpiryTask.java   |  16 +++++
 .../cache/CustomEvictionAttributesImpl.java     |  21 +++++--
 .../internal/cache/DataLocationException.java   |  21 +++++--
 .../internal/cache/DestroyOperation.java        |  21 +++++--
 .../cache/DestroyPartitionedRegionMessage.java  |  21 +++++--
 .../internal/cache/DestroyRegionOperation.java  |  21 +++++--
 .../gemfire/internal/cache/DestroyedEntry.java  |  21 +++++--
 .../internal/cache/DirectReplyMessage.java      |  21 +++++--
 .../gemfire/internal/cache/DirectoryHolder.java |  21 +++++--
 .../internal/cache/DiskDirectoryStats.java      |  21 +++++--
 .../gemfire/internal/cache/DiskEntry.java       |  21 +++++--
 .../gemstone/gemfire/internal/cache/DiskId.java |  21 +++++--
 .../gemfire/internal/cache/DiskInitFile.java    |  21 +++++--
 .../gemfire/internal/cache/DiskRegion.java      |  21 +++++--
 .../gemfire/internal/cache/DiskRegionStats.java |  21 +++++--
 .../internal/cache/DiskStoreAttributes.java     |  21 +++++--
 .../gemfire/internal/cache/DiskStoreBackup.java |  21 +++++--
 .../internal/cache/DiskStoreFactoryImpl.java    |  21 +++++--
 .../gemfire/internal/cache/DiskStoreImpl.java   |  21 +++++--
 .../internal/cache/DiskStoreMonitor.java        |  21 +++++--
 .../internal/cache/DiskStoreObserver.java       |  21 +++++--
 .../gemfire/internal/cache/DiskStoreStats.java  |  21 +++++--
 .../gemfire/internal/cache/DiskStoreTask.java   |  21 +++++--
 .../internal/cache/DiskWriteAttributesImpl.java |  21 +++++--
 .../internal/cache/DistPeerTXStateStub.java     |  16 +++++
 .../cache/DistTXAdjunctCommitMessage.java       |  16 +++++
 .../internal/cache/DistTXCommitMessage.java     |  17 ++++-
 .../cache/DistTXCoordinatorInterface.java       |  21 +++++--
 .../internal/cache/DistTXPrecommitMessage.java  |  17 ++++-
 .../internal/cache/DistTXRollbackMessage.java   |  17 ++++-
 .../gemfire/internal/cache/DistTXState.java     |  16 +++++
 .../cache/DistTXStateOnCoordinator.java         |  16 +++++
 .../internal/cache/DistTXStateProxyImpl.java    |  16 +++++
 .../DistTXStateProxyImplOnCoordinator.java      |  18 +++++-
 .../cache/DistTXStateProxyImplOnDatanode.java   |  16 +++++
 .../cache/DistributedCacheOperation.java        |  20 ++++--
 .../cache/DistributedClearOperation.java        |  21 +++++--
 .../cache/DistributedPutAllOperation.java       |  21 +++++--
 .../internal/cache/DistributedRegion.java       |  21 +++++--
 ...stributedRegionFunctionStreamingMessage.java |  21 +++++--
 .../cache/DistributedRemoveAllOperation.java    |  21 +++++--
 .../cache/DistributedTombstoneOperation.java    |  21 +++++--
 .../internal/cache/DummyCachePerfStats.java     |  21 +++++--
 .../internal/cache/DynamicRegionAttributes.java |  21 +++++--
 .../cache/DynamicRegionFactoryImpl.java         |  21 +++++--
 .../gemfire/internal/cache/EntriesMap.java      |  21 +++++--
 .../gemfire/internal/cache/EntriesSet.java      |  21 +++++--
 .../gemfire/internal/cache/EntryBits.java       |  21 +++++--
 .../gemfire/internal/cache/EntryEventImpl.java  |  21 +++++--
 .../gemfire/internal/cache/EntryExpiryTask.java |  21 +++++--
 .../internal/cache/EntryOperationImpl.java      |  21 +++++--
 .../gemfire/internal/cache/EntrySnapshot.java   |  23 ++++---
 .../internal/cache/EnumListenerEvent.java       |  20 ++++--
 .../gemfire/internal/cache/EventID.java         |  21 +++++--
 .../internal/cache/EventStateHelper.java        |  21 +++++--
 .../gemfire/internal/cache/EventTracker.java    |  21 +++++--
 .../internal/cache/EvictionAttributesImpl.java  |  23 ++++---
 .../gemfire/internal/cache/EvictorService.java  |  21 +++++--
 .../internal/cache/ExpirationScheduler.java     |  21 +++++--
 .../gemfire/internal/cache/ExpiryTask.java      |  21 +++++--
 .../internal/cache/ExportDiskRegion.java        |  16 +++++
 .../gemfire/internal/cache/FilterProfile.java   |  21 +++++--
 .../internal/cache/FilterRoutingInfo.java       |  21 +++++--
 .../cache/FindDurableQueueProcessor.java        |  21 +++++--
 .../internal/cache/FindRemoteTXMessage.java     |  21 +++++--
 .../internal/cache/FindVersionTagOperation.java |  21 +++++--
 .../cache/FixedPartitionAttributesImpl.java     |  21 +++++--
 .../internal/cache/ForceReattemptException.java |  21 +++++--
 .../cache/ForceableLinkedBlockingQueue.java     |  21 +++++--
 .../FunctionStreamingOrderedReplyMessage.java   |  21 +++++--
 .../cache/FunctionStreamingReplyMessage.java    |  21 +++++--
 .../internal/cache/GatewayEventFilter.java      |  16 +++++
 .../internal/cache/GemFireCacheImpl.java        |  21 +++++--
 .../internal/cache/GemfireCacheHelper.java      |  23 ++++---
 .../gemfire/internal/cache/GridAdvisor.java     |  21 +++++--
 .../gemfire/internal/cache/HARegion.java        |  21 +++++--
 .../internal/cache/HDFSLRURegionMap.java        |  21 +++++--
 .../gemfire/internal/cache/HDFSRegionMap.java   |  16 +++++
 .../internal/cache/HDFSRegionMapDelegate.java   |  21 +++++--
 .../internal/cache/HDFSRegionMapImpl.java       |  21 +++++--
 .../internal/cache/HasCachePerfStats.java       |  16 +++++
 .../gemfire/internal/cache/ImageState.java      |  21 +++++--
 .../cache/InMemoryPersistentMemberView.java     |  21 +++++--
 .../internal/cache/IncomingGatewayStatus.java   |  21 +++++--
 .../internal/cache/InitialImageFlowControl.java |  21 +++++--
 .../internal/cache/InitialImageOperation.java   |  21 +++++--
 .../gemfire/internal/cache/InlineKeyHelper.java |  21 +++++--
 .../gemfire/internal/cache/InterestEvent.java   |  21 +++++--
 .../gemfire/internal/cache/InterestFilter.java  |  21 +++++--
 .../cache/InterestRegistrationEventImpl.java    |  21 +++++--
 .../gemfire/internal/cache/InternalCache.java   |  18 ++++--
 .../internal/cache/InternalCacheEvent.java      |  21 +++++--
 .../internal/cache/InternalDataView.java        |  21 +++++--
 .../internal/cache/InternalRegionArguments.java |  21 +++++--
 .../internal/cache/InvalidateOperation.java     |  21 +++++--
 .../InvalidatePartitionedRegionMessage.java     |  21 +++++--
 .../cache/InvalidateRegionOperation.java        |  21 +++++--
 .../cache/JtaAfterCompletionMessage.java        |  21 +++++--
 .../cache/JtaBeforeCompletionMessage.java       |  21 +++++--
 .../gemfire/internal/cache/KeyInfo.java         |  21 +++++--
 .../internal/cache/KeyWithRegionContext.java    |  21 +++++--
 .../gemfire/internal/cache/ListOfDeltas.java    |  21 +++++--
 .../internal/cache/LoaderHelperFactory.java     |  21 +++++--
 .../internal/cache/LoaderHelperImpl.java        |  21 +++++--
 .../gemfire/internal/cache/LocalDataSet.java    |  21 +++++--
 .../gemfire/internal/cache/LocalRegion.java     |  21 +++++--
 .../internal/cache/LocalRegionDataView.java     |  21 +++++--
 .../cache/MemberFunctionStreamingMessage.java   |  21 +++++--
 .../cache/MinimumSystemRequirements.java        |  21 +++++--
 .../cache/NetSearchExpirationCalculator.java    |  21 +++++--
 .../gemstone/gemfire/internal/cache/Node.java   |  20 ++++--
 .../internal/cache/NonLocalRegionEntry.java     |  21 +++++--
 .../cache/NonLocalRegionEntryWithStats.java     |  23 ++++---
 .../internal/cache/OffHeapRegionEntry.java      |  16 +++++
 .../cache/OfflineCompactionDiskRegion.java      |  21 +++++--
 .../gemstone/gemfire/internal/cache/OpType.java |  21 +++++--
 .../gemstone/gemfire/internal/cache/Oplog.java  |  21 +++++--
 .../gemfire/internal/cache/OplogSet.java        |  16 +++++
 .../internal/cache/OrderedTombstoneMap.java     |  21 +++++--
 .../gemfire/internal/cache/OverflowOplog.java   |  21 +++++--
 .../internal/cache/OverflowOplogSet.java        |  21 +++++--
 .../internal/cache/PRContainsValueFunction.java |  21 +++++--
 .../internal/cache/PRHARedundancyProvider.java  |  20 ++++--
 .../internal/cache/PRQueryProcessor.java        |  20 ++++--
 .../internal/cache/PRSystemPropertyGetter.java  |  21 +++++--
 .../internal/cache/PartitionAttributesImpl.java |  21 +++++--
 .../internal/cache/PartitionRegionConfig.java   |  21 +++++--
 .../cache/PartitionRegionConfigValidator.java   |  21 +++++--
 .../internal/cache/PartitionedRegion.java       |  20 ++++--
 .../PartitionedRegionBucketMgmtHelper.java      |  20 ++++--
 .../cache/PartitionedRegionDataStore.java       |  20 ++++--
 .../cache/PartitionedRegionDataView.java        |  21 +++++--
 .../cache/PartitionedRegionException.java       |  21 +++++--
 .../internal/cache/PartitionedRegionHelper.java |  20 ++++--
 .../cache/PartitionedRegionQueryEvaluator.java  |  20 ++++--
 .../internal/cache/PartitionedRegionStats.java  |  21 +++++--
 .../internal/cache/PartitionedRegionStatus.java |  21 +++++--
 .../gemfire/internal/cache/PeerTXStateStub.java |  21 +++++--
 .../internal/cache/PersistentOplogSet.java      |  21 +++++--
 .../internal/cache/PlaceHolderDiskRegion.java   |  23 ++++---
 .../gemfire/internal/cache/PoolFactoryImpl.java |  21 +++++--
 .../gemfire/internal/cache/PoolManagerImpl.java |  21 +++++--
 .../gemfire/internal/cache/PoolStats.java       |  21 +++++--
 .../cache/PreferBytesCachedDeserializable.java  |  21 +++++--
 .../internal/cache/PrimaryBucketException.java  |  21 +++++--
 .../cache/ProfileExchangeProcessor.java         |  21 +++++--
 .../internal/cache/ProxyBucketRegion.java       |  21 +++++--
 .../gemfire/internal/cache/ProxyRegionMap.java  |  21 +++++--
 .../cache/PutAllPartialResultException.java     |  21 +++++--
 .../gemfire/internal/cache/QueuedOperation.java |  21 +++++--
 .../internal/cache/RegionClearedException.java  |  21 +++++--
 .../gemfire/internal/cache/RegionEntry.java     |  21 +++++--
 .../internal/cache/RegionEntryContext.java      |  21 +++++--
 .../internal/cache/RegionEntryFactory.java      |  21 +++++--
 .../gemfire/internal/cache/RegionEventImpl.java |  21 +++++--
 .../internal/cache/RegionEvictorTask.java       |  21 +++++--
 .../internal/cache/RegionExpiryTask.java        |  21 +++++--
 .../internal/cache/RegionFactoryImpl.java       |  21 +++++--
 .../internal/cache/RegionIdleExpiryTask.java    |  21 +++++--
 .../gemfire/internal/cache/RegionMap.java       |  21 +++++--
 .../internal/cache/RegionMapFactory.java        |  21 +++++--
 .../gemfire/internal/cache/RegionQueue.java     |  21 +++++--
 .../internal/cache/RegionQueueException.java    |  21 +++++--
 .../gemfire/internal/cache/RegionStatus.java    |  21 +++++--
 .../internal/cache/RegionTTLExpiryTask.java     |  21 +++++--
 .../internal/cache/ReleaseClearLockMessage.java |  23 ++++---
 .../cache/ReliableDistributionData.java         |  21 +++++--
 .../internal/cache/ReliableMessageQueue.java    |  21 +++++--
 .../cache/ReliableMessageQueueFactory.java      |  21 +++++--
 .../cache/ReliableMessageQueueFactoryImpl.java  |  21 +++++--
 .../cache/RemoteContainsKeyValueMessage.java    |  20 ++++--
 .../internal/cache/RemoteDestroyMessage.java    |  21 +++++--
 .../internal/cache/RemoteFetchEntryMessage.java |  20 ++++--
 .../cache/RemoteFetchVersionMessage.java        |  20 ++++--
 .../internal/cache/RemoteGetMessage.java        |  21 +++++--
 .../internal/cache/RemoteInvalidateMessage.java |  20 ++++--
 .../cache/RemoteOperationException.java         |  21 +++++--
 .../internal/cache/RemoteOperationMessage.java  |  21 +++++--
 .../RemoteOperationMessageWithDirectReply.java  |  21 +++++--
 .../internal/cache/RemotePutAllMessage.java     |  21 +++++--
 .../internal/cache/RemotePutMessage.java        |  21 +++++--
 .../internal/cache/RemoteRegionOperation.java   |  20 ++++--
 .../internal/cache/RemoteRemoveAllMessage.java  |  21 +++++--
 .../gemfire/internal/cache/RoleEventImpl.java   |  21 +++++--
 .../cache/SearchLoadAndWriteProcessor.java      |  21 +++++--
 .../internal/cache/SendQueueOperation.java      |  21 +++++--
 .../internal/cache/SerializationHelper.java     |  16 +++++
 .../internal/cache/ServerPingMessage.java       |  16 +++++
 .../internal/cache/StateFlushOperation.java     |  21 +++++--
 .../cache/StoreAllCachedDeserializable.java     |  21 +++++--
 .../internal/cache/TXBucketRegionState.java     |  21 +++++--
 .../gemfire/internal/cache/TXCommitMessage.java |  21 +++++--
 .../gemfire/internal/cache/TXEntry.java         |  21 +++++--
 .../gemfire/internal/cache/TXEntryState.java    |  21 +++++--
 .../internal/cache/TXEntryStateFactory.java     |  21 +++++--
 .../internal/cache/TXEntryUserAttrState.java    |  21 +++++--
 .../gemfire/internal/cache/TXEvent.java         |  21 +++++--
 .../internal/cache/TXFarSideCMTracker.java      |  21 +++++--
 .../gemstone/gemfire/internal/cache/TXId.java   |  22 ++++---
 .../gemfire/internal/cache/TXLockRequest.java   |  21 +++++--
 .../gemfire/internal/cache/TXManagerImpl.java   |  21 +++++--
 .../gemfire/internal/cache/TXMessage.java       |  21 +++++--
 .../internal/cache/TXRegionLockRequestImpl.java |  21 +++++--
 .../gemfire/internal/cache/TXRegionState.java   |  21 +++++--
 .../internal/cache/TXRemoteCommitMessage.java   |  21 +++++--
 .../internal/cache/TXRemoteRollbackMessage.java |  21 +++++--
 .../internal/cache/TXReservationMgr.java        |  21 +++++--
 .../gemfire/internal/cache/TXRmtEvent.java      |  21 +++++--
 .../gemfire/internal/cache/TXState.java         |  21 +++++--
 .../internal/cache/TXStateInterface.java        |  21 +++++--
 .../gemfire/internal/cache/TXStateProxy.java    |  21 +++++--
 .../internal/cache/TXStateProxyImpl.java        |  21 +++++--
 .../gemfire/internal/cache/TXStateStub.java     |  21 +++++--
 .../cache/TXSynchronizationRunnable.java        |  21 +++++--
 .../cache/TestHeapThresholdObserver.java        |  21 +++++--
 .../cache/TimestampedEntryEventImpl.java        |  21 +++++--
 .../gemstone/gemfire/internal/cache/Token.java  |  21 +++++--
 .../internal/cache/TombstoneService.java        |  24 ++++---
 .../internal/cache/TransactionMessage.java      |  21 +++++--
 .../gemfire/internal/cache/TxEntryFactory.java  |  16 +++++
 .../internal/cache/UnsharedImageState.java      |  21 +++++--
 .../cache/UpdateAttributesProcessor.java        |  21 +++++--
 .../cache/UpdateEntryVersionOperation.java      |  22 ++++---
 .../gemfire/internal/cache/UpdateOperation.java |  20 ++++--
 .../cache/UserSpecifiedDiskStoreAttributes.java |  21 +++++--
 .../cache/UserSpecifiedRegionAttributes.java    |  21 +++++--
 .../internal/cache/VMCachedDeserializable.java  |  21 +++++--
 .../gemfire/internal/cache/VMLRURegionMap.java  |  21 +++++--
 .../gemfire/internal/cache/VMRegionMap.java     |  21 +++++--
 .../cache/VMStatsDiskLRURegionEntry.java        |  21 +++++--
 .../cache/VMStatsDiskLRURegionEntryHeap.java    |  21 +++++--
 .../VMStatsDiskLRURegionEntryHeapIntKey.java    |  21 +++++--
 .../VMStatsDiskLRURegionEntryHeapLongKey.java   |  21 +++++--
 .../VMStatsDiskLRURegionEntryHeapObjectKey.java |  21 +++++--
 ...VMStatsDiskLRURegionEntryHeapStringKey1.java |  21 +++++--
 ...VMStatsDiskLRURegionEntryHeapStringKey2.java |  21 +++++--
 .../VMStatsDiskLRURegionEntryHeapUUIDKey.java   |  21 +++++--
 .../cache/VMStatsDiskLRURegionEntryOffHeap.java |  21 +++++--
 .../VMStatsDiskLRURegionEntryOffHeapIntKey.java |  21 +++++--
 ...VMStatsDiskLRURegionEntryOffHeapLongKey.java |  21 +++++--
 ...StatsDiskLRURegionEntryOffHeapObjectKey.java |  21 +++++--
 ...tatsDiskLRURegionEntryOffHeapStringKey1.java |  21 +++++--
 ...tatsDiskLRURegionEntryOffHeapStringKey2.java |  21 +++++--
 ...VMStatsDiskLRURegionEntryOffHeapUUIDKey.java |  21 +++++--
 .../internal/cache/VMStatsDiskRegionEntry.java  |  21 +++++--
 .../cache/VMStatsDiskRegionEntryHeap.java       |  21 +++++--
 .../cache/VMStatsDiskRegionEntryHeapIntKey.java |  21 +++++--
 .../VMStatsDiskRegionEntryHeapLongKey.java      |  21 +++++--
 .../VMStatsDiskRegionEntryHeapObjectKey.java    |  21 +++++--
 .../VMStatsDiskRegionEntryHeapStringKey1.java   |  21 +++++--
 .../VMStatsDiskRegionEntryHeapStringKey2.java   |  21 +++++--
 .../VMStatsDiskRegionEntryHeapUUIDKey.java      |  21 +++++--
 .../cache/VMStatsDiskRegionEntryOffHeap.java    |  21 +++++--
 .../VMStatsDiskRegionEntryOffHeapIntKey.java    |  21 +++++--
 .../VMStatsDiskRegionEntryOffHeapLongKey.java   |  21 +++++--
 .../VMStatsDiskRegionEntryOffHeapObjectKey.java |  21 +++++--
 ...VMStatsDiskRegionEntryOffHeapStringKey1.java |  21 +++++--
 ...VMStatsDiskRegionEntryOffHeapStringKey2.java |  21 +++++--
 .../VMStatsDiskRegionEntryOffHeapUUIDKey.java   |  21 +++++--
 .../internal/cache/VMStatsLRURegionEntry.java   |  21 +++++--
 .../cache/VMStatsLRURegionEntryHeap.java        |  21 +++++--
 .../cache/VMStatsLRURegionEntryHeapIntKey.java  |  21 +++++--
 .../cache/VMStatsLRURegionEntryHeapLongKey.java |  21 +++++--
 .../VMStatsLRURegionEntryHeapObjectKey.java     |  21 +++++--
 .../VMStatsLRURegionEntryHeapStringKey1.java    |  21 +++++--
 .../VMStatsLRURegionEntryHeapStringKey2.java    |  21 +++++--
 .../cache/VMStatsLRURegionEntryHeapUUIDKey.java |  21 +++++--
 .../cache/VMStatsLRURegionEntryOffHeap.java     |  21 +++++--
 .../VMStatsLRURegionEntryOffHeapIntKey.java     |  21 +++++--
 .../VMStatsLRURegionEntryOffHeapLongKey.java    |  21 +++++--
 .../VMStatsLRURegionEntryOffHeapObjectKey.java  |  21 +++++--
 .../VMStatsLRURegionEntryOffHeapStringKey1.java |  21 +++++--
 .../VMStatsLRURegionEntryOffHeapStringKey2.java |  21 +++++--
 .../VMStatsLRURegionEntryOffHeapUUIDKey.java    |  21 +++++--
 .../internal/cache/VMStatsRegionEntry.java      |  21 +++++--
 .../internal/cache/VMStatsRegionEntryHeap.java  |  21 +++++--
 .../cache/VMStatsRegionEntryHeapIntKey.java     |  21 +++++--
 .../cache/VMStatsRegionEntryHeapLongKey.java    |  21 +++++--
 .../cache/VMStatsRegionEntryHeapObjectKey.java  |  21 +++++--
 .../cache/VMStatsRegionEntryHeapStringKey1.java |  21 +++++--
 .../cache/VMStatsRegionEntryHeapStringKey2.java |  21 +++++--
 .../cache/VMStatsRegionEntryHeapUUIDKey.java    |  21 +++++--
 .../cache/VMStatsRegionEntryOffHeap.java        |  21 +++++--
 .../cache/VMStatsRegionEntryOffHeapIntKey.java  |  21 +++++--
 .../cache/VMStatsRegionEntryOffHeapLongKey.java |  21 +++++--
 .../VMStatsRegionEntryOffHeapObjectKey.java     |  21 +++++--
 .../VMStatsRegionEntryOffHeapStringKey1.java    |  21 +++++--
 .../VMStatsRegionEntryOffHeapStringKey2.java    |  21 +++++--
 .../cache/VMStatsRegionEntryOffHeapUUIDKey.java |  21 +++++--
 .../cache/VMThinDiskLRURegionEntry.java         |  21 +++++--
 .../cache/VMThinDiskLRURegionEntryHeap.java     |  21 +++++--
 .../VMThinDiskLRURegionEntryHeapIntKey.java     |  21 +++++--
 .../VMThinDiskLRURegionEntryHeapLongKey.java    |  21 +++++--
 .../VMThinDiskLRURegionEntryHeapObjectKey.java  |  21 +++++--
 .../VMThinDiskLRURegionEntryHeapStringKey1.java |  21 +++++--
 .../VMThinDiskLRURegionEntryHeapStringKey2.java |  21 +++++--
 .../VMThinDiskLRURegionEntryHeapUUIDKey.java    |  21 +++++--
 .../cache/VMThinDiskLRURegionEntryOffHeap.java  |  21 +++++--
 .../VMThinDiskLRURegionEntryOffHeapIntKey.java  |  21 +++++--
 .../VMThinDiskLRURegionEntryOffHeapLongKey.java |  21 +++++--
 ...MThinDiskLRURegionEntryOffHeapObjectKey.java |  21 +++++--
 ...ThinDiskLRURegionEntryOffHeapStringKey1.java |  21 +++++--
 ...ThinDiskLRURegionEntryOffHeapStringKey2.java |  21 +++++--
 .../VMThinDiskLRURegionEntryOffHeapUUIDKey.java |  21 +++++--
 .../internal/cache/VMThinDiskRegionEntry.java   |  21 +++++--
 .../cache/VMThinDiskRegionEntryHeap.java        |  21 +++++--
 .../cache/VMThinDiskRegionEntryHeapIntKey.java  |  21 +++++--
 .../cache/VMThinDiskRegionEntryHeapLongKey.java |  21 +++++--
 .../VMThinDiskRegionEntryHeapObjectKey.java     |  21 +++++--
 .../VMThinDiskRegionEntryHeapStringKey1.java    |  21 +++++--
 .../VMThinDiskRegionEntryHeapStringKey2.java    |  21 +++++--
 .../cache/VMThinDiskRegionEntryHeapUUIDKey.java |  21 +++++--
 .../cache/VMThinDiskRegionEntryOffHeap.java     |  21 +++++--
 .../VMThinDiskRegionEntryOffHeapIntKey.java     |  21 +++++--
 .../VMThinDiskRegionEntryOffHeapLongKey.java    |  21 +++++--
 .../VMThinDiskRegionEntryOffHeapObjectKey.java  |  21 +++++--
 .../VMThinDiskRegionEntryOffHeapStringKey1.java |  21 +++++--
 .../VMThinDiskRegionEntryOffHeapStringKey2.java |  21 +++++--
 .../VMThinDiskRegionEntryOffHeapUUIDKey.java    |  21 +++++--
 .../internal/cache/VMThinLRURegionEntry.java    |  21 +++++--
 .../cache/VMThinLRURegionEntryHeap.java         |  21 +++++--
 .../cache/VMThinLRURegionEntryHeapIntKey.java   |  21 +++++--
 .../cache/VMThinLRURegionEntryHeapLongKey.java  |  21 +++++--
 .../VMThinLRURegionEntryHeapObjectKey.java      |  21 +++++--
 .../VMThinLRURegionEntryHeapStringKey1.java     |  21 +++++--
 .../VMThinLRURegionEntryHeapStringKey2.java     |  21 +++++--
 .../cache/VMThinLRURegionEntryHeapUUIDKey.java  |  21 +++++--
 .../cache/VMThinLRURegionEntryOffHeap.java      |  21 +++++--
 .../VMThinLRURegionEntryOffHeapIntKey.java      |  21 +++++--
 .../VMThinLRURegionEntryOffHeapLongKey.java     |  21 +++++--
 .../VMThinLRURegionEntryOffHeapObjectKey.java   |  21 +++++--
 .../VMThinLRURegionEntryOffHeapStringKey1.java  |  21 +++++--
 .../VMThinLRURegionEntryOffHeapStringKey2.java  |  21 +++++--
 .../VMThinLRURegionEntryOffHeapUUIDKey.java     |  21 +++++--
 .../internal/cache/VMThinRegionEntry.java       |  21 +++++--
 .../internal/cache/VMThinRegionEntryHeap.java   |  21 +++++--
 .../cache/VMThinRegionEntryHeapIntKey.java      |  21 +++++--
 .../cache/VMThinRegionEntryHeapLongKey.java     |  21 +++++--
 .../cache/VMThinRegionEntryHeapObjectKey.java   |  21 +++++--
 .../cache/VMThinRegionEntryHeapStringKey1.java  |  21 +++++--
 .../cache/VMThinRegionEntryHeapStringKey2.java  |  21 +++++--
 .../cache/VMThinRegionEntryHeapUUIDKey.java     |  21 +++++--
 .../cache/VMThinRegionEntryOffHeap.java         |  16 +++++
 .../cache/VMThinRegionEntryOffHeapIntKey.java   |  21 +++++--
 .../cache/VMThinRegionEntryOffHeapLongKey.java  |  21 +++++--
 .../VMThinRegionEntryOffHeapObjectKey.java      |  21 +++++--
 .../VMThinRegionEntryOffHeapStringKey1.java     |  21 +++++--
 .../VMThinRegionEntryOffHeapStringKey2.java     |  21 +++++--
 .../cache/VMThinRegionEntryOffHeapUUIDKey.java  |  21 +++++--
 .../internal/cache/ValidatingDiskRegion.java    |  21 +++++--
 .../internal/cache/ValueByteWrapper.java        |  21 +++++--
 .../internal/cache/VersionTimestamp.java        |  21 +++++--
 .../cache/VersionedStatsDiskLRURegionEntry.java |  21 +++++--
 .../VersionedStatsDiskLRURegionEntryHeap.java   |  21 +++++--
 ...sionedStatsDiskLRURegionEntryHeapIntKey.java |  21 +++++--
 ...ionedStatsDiskLRURegionEntryHeapLongKey.java |  21 +++++--
 ...nedStatsDiskLRURegionEntryHeapObjectKey.java |  21 +++++--
 ...edStatsDiskLRURegionEntryHeapStringKey1.java |  21 +++++--
 ...edStatsDiskLRURegionEntryHeapStringKey2.java |  21 +++++--
 ...ionedStatsDiskLRURegionEntryHeapUUIDKey.java |  21 +++++--
 ...VersionedStatsDiskLRURegionEntryOffHeap.java |  21 +++++--
 ...nedStatsDiskLRURegionEntryOffHeapIntKey.java |  21 +++++--
 ...edStatsDiskLRURegionEntryOffHeapLongKey.java |  21 +++++--
 ...StatsDiskLRURegionEntryOffHeapObjectKey.java |  21 +++++--
 ...tatsDiskLRURegionEntryOffHeapStringKey1.java |  21 +++++--
 ...tatsDiskLRURegionEntryOffHeapStringKey2.java |  21 +++++--
 ...edStatsDiskLRURegionEntryOffHeapUUIDKey.java |  21 +++++--
 .../cache/VersionedStatsDiskRegionEntry.java    |  21 +++++--
 .../VersionedStatsDiskRegionEntryHeap.java      |  21 +++++--
 ...VersionedStatsDiskRegionEntryHeapIntKey.java |  21 +++++--
 ...ersionedStatsDiskRegionEntryHeapLongKey.java |  21 +++++--
 ...sionedStatsDiskRegionEntryHeapObjectKey.java |  21 +++++--
 ...ionedStatsDiskRegionEntryHeapStringKey1.java |  21 +++++--
 ...ionedStatsDiskRegionEntryHeapStringKey2.java |  21 +++++--
 ...ersionedStatsDiskRegionEntryHeapUUIDKey.java |  21 +++++--
 .../VersionedStatsDiskRegionEntryOffHeap.java   |  21 +++++--
 ...sionedStatsDiskRegionEntryOffHeapIntKey.java |  21 +++++--
 ...ionedStatsDiskRegionEntryOffHeapLongKey.java |  21 +++++--
 ...nedStatsDiskRegionEntryOffHeapObjectKey.java |  21 +++++--
 ...edStatsDiskRegionEntryOffHeapStringKey1.java |  21 +++++--
 ...edStatsDiskRegionEntryOffHeapStringKey2.java |  21 +++++--
 ...ionedStatsDiskRegionEntryOffHeapUUIDKey.java |  21 +++++--
 .../cache/VersionedStatsLRURegionEntry.java     |  21 +++++--
 .../cache/VersionedStatsLRURegionEntryHeap.java |  21 +++++--
 .../VersionedStatsLRURegionEntryHeapIntKey.java |  21 +++++--
 ...VersionedStatsLRURegionEntryHeapLongKey.java |  21 +++++--
 ...rsionedStatsLRURegionEntryHeapObjectKey.java |  21 +++++--
 ...sionedStatsLRURegionEntryHeapStringKey1.java |  21 +++++--
 ...sionedStatsLRURegionEntryHeapStringKey2.java |  21 +++++--
 ...VersionedStatsLRURegionEntryHeapUUIDKey.java |  21 +++++--
 .../VersionedStatsLRURegionEntryOffHeap.java    |  21 +++++--
 ...rsionedStatsLRURegionEntryOffHeapIntKey.java |  21 +++++--
 ...sionedStatsLRURegionEntryOffHeapLongKey.java |  21 +++++--
 ...onedStatsLRURegionEntryOffHeapObjectKey.java |  21 +++++--
 ...nedStatsLRURegionEntryOffHeapStringKey1.java |  21 +++++--
 ...nedStatsLRURegionEntryOffHeapStringKey2.java |  21 +++++--
 ...sionedStatsLRURegionEntryOffHeapUUIDKey.java |  21 +++++--
 .../cache/VersionedStatsRegionEntry.java        |  21 +++++--
 .../cache/VersionedStatsRegionEntryHeap.java    |  21 +++++--
 .../VersionedStatsRegionEntryHeapIntKey.java    |  21 +++++--
 .../VersionedStatsRegionEntryHeapLongKey.java   |  21 +++++--
 .../VersionedStatsRegionEntryHeapObjectKey.java |  21 +++++--
 ...VersionedStatsRegionEntryHeapStringKey1.java |  21 +++++--
 ...VersionedStatsRegionEntryHeapStringKey2.java |  21 +++++--
 .../VersionedStatsRegionEntryHeapUUIDKey.java   |  21 +++++--
 .../cache/VersionedStatsRegionEntryOffHeap.java |  21 +++++--
 .../VersionedStatsRegionEntryOffHeapIntKey.java |  21 +++++--
 ...VersionedStatsRegionEntryOffHeapLongKey.java |  21 +++++--
 ...rsionedStatsRegionEntryOffHeapObjectKey.java |  21 +++++--
 ...sionedStatsRegionEntryOffHeapStringKey1.java |  21 +++++--
 ...sionedStatsRegionEntryOffHeapStringKey2.java |  21 +++++--
 ...VersionedStatsRegionEntryOffHeapUUIDKey.java |  21 +++++--
 .../cache/VersionedThinDiskLRURegionEntry.java  |  21 +++++--
 .../VersionedThinDiskLRURegionEntryHeap.java    |  21 +++++--
 ...rsionedThinDiskLRURegionEntryHeapIntKey.java |  21 +++++--
 ...sionedThinDiskLRURegionEntryHeapLongKey.java |  21 +++++--
 ...onedThinDiskLRURegionEntryHeapObjectKey.java |  21 +++++--
 ...nedThinDiskLRURegionEntryHeapStringKey1.java |  21 +++++--
 ...nedThinDiskLRURegionEntryHeapStringKey2.java |  21 +++++--
 ...sionedThinDiskLRURegionEntryHeapUUIDKey.java |  21 +++++--
 .../VersionedThinDiskLRURegionEntryOffHeap.java |  21 +++++--
 ...onedThinDiskLRURegionEntryOffHeapIntKey.java |  21 +++++--
 ...nedThinDiskLRURegionEntryOffHeapLongKey.java |  21 +++++--
 ...dThinDiskLRURegionEntryOffHeapObjectKey.java |  21 +++++--
 ...ThinDiskLRURegionEntryOffHeapStringKey1.java |  21 +++++--
 ...ThinDiskLRURegionEntryOffHeapStringKey2.java |  21 +++++--
 ...nedThinDiskLRURegionEntryOffHeapUUIDKey.java |  21 +++++--
 .../cache/VersionedThinDiskRegionEntry.java     |  21 +++++--
 .../cache/VersionedThinDiskRegionEntryHeap.java |  21 +++++--
 .../VersionedThinDiskRegionEntryHeapIntKey.java |  21 +++++--
 ...VersionedThinDiskRegionEntryHeapLongKey.java |  21 +++++--
 ...rsionedThinDiskRegionEntryHeapObjectKey.java |  21 +++++--
 ...sionedThinDiskRegionEntryHeapStringKey1.java |  21 +++++--
 ...sionedThinDiskRegionEntryHeapStringKey2.java |  21 +++++--
 ...VersionedThinDiskRegionEntryHeapUUIDKey.java |  21 +++++--
 .../VersionedThinDiskRegionEntryOffHeap.java    |  21 +++++--
 ...rsionedThinDiskRegionEntryOffHeapIntKey.java |  21 +++++--
 ...sionedThinDiskRegionEntryOffHeapLongKey.java |  21 +++++--
 ...onedThinDiskRegionEntryOffHeapObjectKey.java |  21 +++++--
 ...nedThinDiskRegionEntryOffHeapStringKey1.java |  21 +++++--
 ...nedThinDiskRegionEntryOffHeapStringKey2.java |  21 +++++--
 ...sionedThinDiskRegionEntryOffHeapUUIDKey.java |  21 +++++--
 .../cache/VersionedThinLRURegionEntry.java      |  21 +++++--
 .../cache/VersionedThinLRURegionEntryHeap.java  |  21 +++++--
 .../VersionedThinLRURegionEntryHeapIntKey.java  |  21 +++++--
 .../VersionedThinLRURegionEntryHeapLongKey.java |  21 +++++--
 ...ersionedThinLRURegionEntryHeapObjectKey.java |  21 +++++--
 ...rsionedThinLRURegionEntryHeapStringKey1.java |  21 +++++--
 ...rsionedThinLRURegionEntryHeapStringKey2.java |  21 +++++--
 .../VersionedThinLRURegionEntryHeapUUIDKey.java |  21 +++++--
 .../VersionedThinLRURegionEntryOffHeap.java     |  21 +++++--
 ...ersionedThinLRURegionEntryOffHeapIntKey.java |  21 +++++--
 ...rsionedThinLRURegionEntryOffHeapLongKey.java |  21 +++++--
 ...ionedThinLRURegionEntryOffHeapObjectKey.java |  21 +++++--
 ...onedThinLRURegionEntryOffHeapStringKey1.java |  21 +++++--
 ...onedThinLRURegionEntryOffHeapStringKey2.java |  21 +++++--
 ...rsionedThinLRURegionEntryOffHeapUUIDKey.java |  21 +++++--
 .../cache/VersionedThinRegionEntry.java         |  21 +++++--
 .../cache/VersionedThinRegionEntryHeap.java     |  21 +++++--
 .../VersionedThinRegionEntryHeapIntKey.java     |  21 +++++--
 .../VersionedThinRegionEntryHeapLongKey.java    |  21 +++++--
 .../VersionedThinRegionEntryHeapObjectKey.java  |  21 +++++--
 .../VersionedThinRegionEntryHeapStringKey1.java |  21 +++++--
 .../VersionedThinRegionEntryHeapStringKey2.java |  21 +++++--
 .../VersionedThinRegionEntryHeapUUIDKey.java    |  21 +++++--
 .../cache/VersionedThinRegionEntryOffHeap.java  |  21 +++++--
 .../VersionedThinRegionEntryOffHeapIntKey.java  |  21 +++++--
 .../VersionedThinRegionEntryOffHeapLongKey.java |  21 +++++--
 ...ersionedThinRegionEntryOffHeapObjectKey.java |  21 +++++--
 ...rsionedThinRegionEntryOffHeapStringKey1.java |  21 +++++--
 ...rsionedThinRegionEntryOffHeapStringKey2.java |  21 +++++--
 .../VersionedThinRegionEntryOffHeapUUIDKey.java |  21 +++++--
 .../internal/cache/WrappedCallbackArgument.java |  21 +++++--
 .../cache/WrappedRegionMembershipListener.java  |  21 +++++--
 .../CompressedCachedDeserializable.java         |  23 ++++---
 .../SnappyCompressedCachedDeserializable.java   |  23 ++++---
 .../internal/cache/control/FilterByPath.java    |  21 +++++--
 .../cache/control/HeapMemoryMonitor.java        |  21 +++++--
 .../cache/control/InternalResourceManager.java  |  21 +++++--
 .../internal/cache/control/MemoryEvent.java     |  20 ++++--
 .../cache/control/MemoryThresholds.java         |  16 +++++
 .../cache/control/OffHeapMemoryMonitor.java     |  21 +++++--
 .../control/PartitionRebalanceDetailsImpl.java  |  21 +++++--
 .../cache/control/RebalanceOperationImpl.java   |  21 +++++--
 .../cache/control/RebalanceResultsImpl.java     |  21 +++++--
 .../internal/cache/control/RegionFilter.java    |  21 +++++--
 .../internal/cache/control/ResourceAdvisor.java |  21 +++++--
 .../internal/cache/control/ResourceEvent.java   |  20 ++++--
 .../cache/control/ResourceListener.java         |  21 +++++--
 .../cache/control/ResourceManagerStats.java     |  21 +++++--
 .../internal/cache/control/ResourceMonitor.java |  16 +++++
 .../gemfire/internal/cache/delta/Delta.java     |  21 +++++--
 .../cache/execute/AbstractExecution.java        |  21 +++++--
 .../cache/execute/BucketMovedException.java     |  21 +++++--
 .../cache/execute/DefaultResultCollector.java   |  20 ++++--
 .../DistributedRegionFunctionExecutor.java      |  20 ++++--
 .../DistributedRegionFunctionResultSender.java  |  21 +++++--
 .../DistributedRegionFunctionResultWaiter.java  |  21 +++++--
 .../cache/execute/FunctionContextImpl.java      |  21 +++++--
 .../execute/FunctionExecutionNodePruner.java    |  21 +++++--
 .../cache/execute/FunctionRemoteContext.java    |  21 +++++--
 .../cache/execute/FunctionServiceStats.java     |  21 +++++--
 .../internal/cache/execute/FunctionStats.java   |  21 +++++--
 .../FunctionStreamingResultCollector.java       |  21 +++++--
 .../cache/execute/InternalExecution.java        |  21 +++++--
 .../execute/InternalFunctionException.java      |  21 +++++--
 ...ternalFunctionInvocationTargetException.java |  21 +++++--
 .../cache/execute/InternalFunctionService.java  |  20 ++++--
 .../execute/InternalRegionFunctionContext.java  |  21 +++++--
 .../cache/execute/InternalResultSender.java     |  21 +++++--
 .../cache/execute/LocalResultCollector.java     |  20 ++++--
 .../cache/execute/LocalResultCollectorImpl.java |  21 +++++--
 .../cache/execute/MemberFunctionExecutor.java   |  21 +++++--
 .../execute/MemberFunctionResultSender.java     |  20 ++++--
 .../execute/MemberFunctionResultWaiter.java     |  21 +++++--
 .../cache/execute/MemberMappedArgument.java     |  20 ++++--
 .../execute/MultiRegionFunctionContext.java     |  21 +++++--
 .../execute/MultiRegionFunctionContextImpl.java |  21 +++++--
 .../execute/MultiRegionFunctionExecutor.java    |  21 +++++--
 .../MultiRegionFunctionResultWaiter.java        |  21 +++++--
 .../internal/cache/execute/NoResult.java        |  20 ++++--
 .../PartitionedRegionFunctionExecutor.java      |  21 +++++--
 .../PartitionedRegionFunctionResultSender.java  |  20 ++++--
 .../PartitionedRegionFunctionResultWaiter.java  |  21 +++++--
 .../execute/RegionFunctionContextImpl.java      |  21 +++++--
 .../cache/execute/ServerFunctionExecutor.java   |  21 +++++--
 .../execute/ServerRegionFunctionExecutor.java   |  23 ++++---
 .../ServerToClientFunctionResultSender.java     |  20 ++++--
 .../ServerToClientFunctionResultSender65.java   |  20 ++++--
 .../execute/StreamingFunctionOperation.java     |  21 +++++--
 .../execute/TransactionFunctionService.java     |  21 +++++--
 .../cache/execute/util/CommitFunction.java      |  21 +++++--
 .../util/FindRestEnabledServersFunction.java    |  23 ++++---
 .../execute/util/NestedTransactionFunction.java |  21 +++++--
 .../cache/execute/util/RollbackFunction.java    |  21 +++++--
 .../internal/cache/extension/Extensible.java    |  21 +++++--
 .../internal/cache/extension/Extension.java     |  21 +++++--
 .../cache/extension/ExtensionPoint.java         |  21 +++++--
 .../cache/extension/SimpleExtensionPoint.java   |  21 +++++--
 .../internal/cache/ha/HAContainerMap.java       |  21 +++++--
 .../internal/cache/ha/HAContainerRegion.java    |  21 +++++--
 .../internal/cache/ha/HAContainerWrapper.java   |  21 +++++--
 .../internal/cache/ha/HARegionQueue.java        |  20 ++++--
 .../cache/ha/HARegionQueueAttributes.java       |  21 +++++--
 .../internal/cache/ha/HARegionQueueStats.java   |  21 +++++--
 .../internal/cache/ha/QueueRemovalMessage.java  |  21 +++++--
 .../internal/cache/ha/ThreadIdentifier.java     |  21 +++++--
 .../locks/GFEAbstractQueuedSynchronizer.java    |  21 +++++--
 .../locks/ReentrantReadWriteWriteShareLock.java |  21 +++++--
 .../cache/locks/TXLessorDepartureHandler.java   |  21 +++++--
 .../internal/cache/locks/TXLockBatch.java       |  21 +++++--
 .../gemfire/internal/cache/locks/TXLockId.java  |  21 +++++--
 .../internal/cache/locks/TXLockIdImpl.java      |  21 +++++--
 .../internal/cache/locks/TXLockService.java     |  21 +++++--
 .../internal/cache/locks/TXLockServiceImpl.java |  21 +++++--
 .../internal/cache/locks/TXLockToken.java       |  21 +++++--
 .../locks/TXLockUpdateParticipantsMessage.java  |  21 +++++--
 .../locks/TXOriginatorRecoveryProcessor.java    |  21 +++++--
 .../locks/TXRecoverGrantorMessageProcessor.java |  21 +++++--
 .../cache/locks/TXRegionLockRequest.java        |  21 +++++--
 .../gemfire/internal/cache/lru/EnableLRU.java   |  21 +++++--
 .../gemfire/internal/cache/lru/HeapEvictor.java |  21 +++++--
 .../cache/lru/HeapLRUCapacityController.java    |  21 +++++--
 .../internal/cache/lru/HeapLRUStatistics.java   |  21 +++++--
 .../internal/cache/lru/LRUAlgorithm.java        |  21 +++++--
 .../cache/lru/LRUCapacityController.java        |  21 +++++--
 .../internal/cache/lru/LRUClockNode.java        |  21 +++++--
 .../gemfire/internal/cache/lru/LRUEntry.java    |  21 +++++--
 .../internal/cache/lru/LRUMapCallbacks.java     |  21 +++++--
 .../internal/cache/lru/LRUStatistics.java       |  21 +++++--
 .../cache/lru/MemLRUCapacityController.java     |  21 +++++--
 .../internal/cache/lru/NewLIFOClockHand.java    |  21 +++++--
 .../internal/cache/lru/NewLRUClockHand.java     |  21 +++++--
 .../internal/cache/lru/OffHeapEvictor.java      |  21 +++++--
 .../gemfire/internal/cache/lru/Sizeable.java    |  21 +++++--
 .../operations/ContainsKeyOperationContext.java |  21 +++++--
 .../AllBucketProfilesUpdateMessage.java         |  21 +++++--
 .../partitioned/BecomePrimaryBucketMessage.java |  21 +++++--
 .../internal/cache/partitioned/Bucket.java      |  21 +++++--
 .../cache/partitioned/BucketBackupMessage.java  |  20 ++++--
 .../partitioned/BucketProfileUpdateMessage.java |  21 +++++--
 .../cache/partitioned/BucketSizeMessage.java    |  21 +++++--
 .../partitioned/ContainsKeyValueMessage.java    |  20 ++++--
 .../cache/partitioned/CreateBucketMessage.java  |  21 +++++--
 .../partitioned/CreateMissingBucketsTask.java   |  23 ++++---
 .../partitioned/DeposePrimaryBucketMessage.java |  21 +++++--
 .../cache/partitioned/DestroyMessage.java       |  21 +++++--
 .../DestroyRegionOnDataStoreMessage.java        |  20 ++++--
 .../partitioned/DumpAllPRConfigMessage.java     |  20 ++++--
 .../cache/partitioned/DumpB2NRegion.java        |  20 ++++--
 .../cache/partitioned/DumpBucketsMessage.java   |  21 +++++--
 .../partitioned/EndBucketCreationMessage.java   |  21 +++++--
 .../partitioned/FetchBulkEntriesMessage.java    |  21 +++++--
 .../cache/partitioned/FetchEntriesMessage.java  |  21 +++++--
 .../cache/partitioned/FetchEntryMessage.java    |  20 ++++--
 .../cache/partitioned/FetchKeysMessage.java     |  21 +++++--
 .../FetchPartitionDetailsMessage.java           |  21 +++++--
 .../cache/partitioned/FlushMessage.java         |  21 +++++--
 .../internal/cache/partitioned/GetMessage.java  |  21 +++++--
 .../partitioned/IdentityRequestMessage.java     |  20 ++++--
 .../partitioned/IdentityUpdateMessage.java      |  21 +++++--
 .../cache/partitioned/IndexCreationMsg.java     |  21 +++++--
 .../cache/partitioned/InterestEventMessage.java |  21 +++++--
 .../cache/partitioned/InternalPRInfo.java       |  21 +++++--
 .../partitioned/InternalPartitionDetails.java   |  21 +++++--
 .../cache/partitioned/InvalidateMessage.java    |  20 ++++--
 .../internal/cache/partitioned/LoadProbe.java   |  21 +++++--
 .../internal/cache/partitioned/LockObject.java  |  21 +++++--
 .../partitioned/ManageBackupBucketMessage.java  |  21 +++++--
 .../cache/partitioned/ManageBucketMessage.java  |  21 +++++--
 .../cache/partitioned/MoveBucketMessage.java    |  21 +++++--
 .../cache/partitioned/OfflineMemberDetails.java |  23 ++++---
 .../partitioned/OfflineMemberDetailsImpl.java   |  21 +++++--
 .../cache/partitioned/PREntriesIterator.java    |  21 +++++--
 .../PRFunctionStreamingResultCollector.java     |  21 +++++--
 .../internal/cache/partitioned/PRLoad.java      |  21 +++++--
 .../PRLocallyDestroyedException.java            |  21 +++++--
 .../cache/partitioned/PRSanityCheckMessage.java |  21 +++++--
 .../cache/partitioned/PRTombstoneMessage.java   |  21 +++++--
 .../PRUpdateEntryVersionMessage.java            |  22 ++++---
 .../partitioned/PartitionMemberInfoImpl.java    |  21 +++++--
 .../cache/partitioned/PartitionMessage.java     |  21 +++++--
 .../PartitionMessageWithDirectReply.java        |  21 +++++--
 .../partitioned/PartitionRegionInfoImpl.java    |  21 +++++--
 ...rtitionedRegionFunctionStreamingMessage.java |  21 +++++--
 .../partitioned/PartitionedRegionObserver.java  |  21 +++++--
 .../PartitionedRegionObserverAdapter.java       |  21 +++++--
 .../PartitionedRegionObserverHolder.java        |  21 +++++--
 .../PartitionedRegionRebalanceOp.java           |  21 +++++--
 .../partitioned/PrimaryRequestMessage.java      |  21 +++++--
 .../cache/partitioned/PutAllPRMessage.java      |  21 +++++--
 .../internal/cache/partitioned/PutMessage.java  |  21 +++++--
 .../cache/partitioned/QueryMessage.java         |  21 +++++--
 .../cache/partitioned/RecoveryRunnable.java     |  23 ++++---
 .../RedundancyAlreadyMetException.java          |  21 +++++--
 .../cache/partitioned/RedundancyLogger.java     |  21 +++++--
 .../cache/partitioned/RegionAdvisor.java        |  20 ++++--
 .../partitioned/RemoteFetchKeysMessage.java     |  21 +++++--
 .../cache/partitioned/RemoteSizeMessage.java    |  21 +++++--
 .../cache/partitioned/RemoveAllPRMessage.java   |  21 +++++--
 .../cache/partitioned/RemoveBucketMessage.java  |  21 +++++--
 .../cache/partitioned/RemoveIndexesMessage.java |  21 +++++--
 .../internal/cache/partitioned/SizeMessage.java |  21 +++++--
 .../cache/partitioned/SizedBasedLoadProbe.java  |  21 +++++--
 .../StreamingPartitionOperation.java            |  24 ++++---
 .../partitioned/rebalance/BucketOperator.java   |  21 +++++--
 .../rebalance/CompositeDirector.java            |  21 +++++--
 .../rebalance/ExplicitMoveDirector.java         |  21 +++++--
 .../partitioned/rebalance/FPRDirector.java      |  21 +++++--
 .../partitioned/rebalance/MoveBuckets.java      |  21 +++++--
 .../partitioned/rebalance/MovePrimaries.java    |  21 +++++--
 .../partitioned/rebalance/MovePrimariesFPR.java |  21 +++++--
 .../rebalance/ParallelBucketOperator.java       |  16 +++++
 .../rebalance/PartitionedRegionLoadModel.java   |  21 +++++--
 .../rebalance/PercentageMoveDirector.java       |  21 +++++--
 .../rebalance/RebalanceDirector.java            |  21 +++++--
 .../rebalance/RebalanceDirectorAdapter.java     |  21 +++++--
 .../rebalance/RemoveOverRedundancy.java         |  21 +++++--
 .../rebalance/SatisfyRedundancy.java            |  21 +++++--
 .../rebalance/SatisfyRedundancyFPR.java         |  21 +++++--
 .../rebalance/SimulatedBucketOperator.java      |  21 +++++--
 .../cache/persistence/BackupInspector.java      |  21 +++++--
 .../cache/persistence/BackupManager.java        |  21 +++++--
 .../cache/persistence/BytesAndBits.java         |  21 +++++--
 .../cache/persistence/CanonicalIdHolder.java    |  21 +++++--
 .../CreatePersistentRegionProcessor.java        |  21 +++++--
 .../cache/persistence/DiskExceptionHandler.java |  21 +++++--
 .../persistence/DiskInitFileInterpreter.java    |  21 +++++--
 .../cache/persistence/DiskInitFileParser.java   |  21 +++++--
 .../cache/persistence/DiskRecoveryStore.java    |  23 ++++---
 .../cache/persistence/DiskRegionView.java       |  21 +++++--
 .../cache/persistence/DiskStoreFilter.java      |  23 ++++---
 .../internal/cache/persistence/DiskStoreID.java |  21 +++++--
 .../persistence/MembershipFlushRequest.java     |  21 +++++--
 .../persistence/MembershipViewRequest.java      |  21 +++++--
 .../internal/cache/persistence/OplogType.java   |  18 +++++-
 .../cache/persistence/PRPersistentConfig.java   |  21 +++++--
 .../cache/persistence/PersistenceAdvisor.java   |  21 +++++--
 .../persistence/PersistenceAdvisorImpl.java     |  21 +++++--
 .../persistence/PersistenceObserverHolder.java  |  21 +++++--
 .../cache/persistence/PersistentMemberID.java   |  21 +++++--
 .../persistence/PersistentMemberManager.java    |  21 +++++--
 .../persistence/PersistentMemberPattern.java    |  23 ++++---
 .../persistence/PersistentMemberState.java      |  21 +++++--
 .../cache/persistence/PersistentMemberView.java |  21 +++++--
 .../persistence/PersistentMembershipView.java   |  23 ++++---
 .../persistence/PersistentStateListener.java    |  23 ++++---
 .../PersistentStateQueryMessage.java            |  21 +++++--
 .../PersistentStateQueryResults.java            |  21 +++++--
 .../PrepareNewPersistentMemberMessage.java      |  21 +++++--
 .../RemovePersistentMemberMessage.java          |  21 +++++--
 .../cache/persistence/RestoreScript.java        |  21 +++++--
 .../persistence/UninterruptibleFileChannel.java |  18 +++++-
 .../UninterruptibleRandomAccessFile.java        |  16 +++++
 .../persistence/query/CloseableIterator.java    |  18 +++++-
 .../persistence/query/IdentityExtractor.java    |  16 +++++
 .../cache/persistence/query/IndexMap.java       |  21 +++++--
 .../cache/persistence/query/ResultBag.java      |  21 +++++--
 .../cache/persistence/query/ResultList.java     |  21 +++++--
 .../cache/persistence/query/ResultMap.java      |  21 +++++--
 .../cache/persistence/query/ResultSet.java      |  21 +++++--
 .../persistence/query/SortKeyExtractor.java     |  16 +++++
 .../query/TemporaryResultSetFactory.java        |  21 +++++--
 .../persistence/query/mock/ByteComparator.java  |  21 +++++--
 .../mock/CachedDeserializableComparator.java    |  23 ++++---
 .../persistence/query/mock/IndexMapImpl.java    |  21 +++++--
 .../persistence/query/mock/ItrAdapter.java      |  23 ++++---
 .../query/mock/NaturalComparator.java           |  16 +++++
 .../cache/persistence/query/mock/Pair.java      |  21 +++++--
 .../persistence/query/mock/PairComparator.java  |  21 +++++--
 .../persistence/query/mock/ResultListImpl.java  |  21 +++++--
 .../query/mock/ReverseComparator.java           |  21 +++++--
 .../query/mock/SortedResultBagImpl.java         |  21 +++++--
 .../query/mock/SortedResultMapImpl.java         |  21 +++++--
 .../query/mock/SortedResultSetImpl.java         |  21 +++++--
 .../persistence/soplog/AbstractCompactor.java   |  21 +++++--
 .../soplog/AbstractKeyValueIterator.java        |  21 +++++--
 .../soplog/AbstractSortedReader.java            |  21 +++++--
 .../soplog/ArraySerializedComparator.java       |  21 +++++--
 .../persistence/soplog/ByteComparator.java      |  21 +++++--
 .../cache/persistence/soplog/Compactor.java     |  21 +++++--
 .../soplog/CompositeSerializedComparator.java   |  21 +++++--
 .../persistence/soplog/CursorIterator.java      |  21 +++++--
 .../soplog/DelegatingSerializedComparator.java  |  21 +++++--
 .../soplog/HFileStoreStatistics.java            |  21 +++++--
 .../soplog/IndexSerializedComparator.java       |  21 +++++--
 .../persistence/soplog/KeyValueIterator.java    |  21 +++++--
 .../cache/persistence/soplog/LevelTracker.java  |  21 +++++--
 .../soplog/LexicographicalComparator.java       |  21 +++++--
 .../cache/persistence/soplog/NonCompactor.java  |  21 +++++--
 .../soplog/ReversingSerializedComparator.java   |  21 +++++--
 .../persistence/soplog/SizeTieredCompactor.java |  21 +++++--
 .../cache/persistence/soplog/SoplogToken.java   |  21 +++++--
 .../cache/persistence/soplog/SortedBuffer.java  |  21 +++++--
 .../cache/persistence/soplog/SortedOplog.java   |  21 +++++--
 .../persistence/soplog/SortedOplogFactory.java  |  21 +++++--
 .../persistence/soplog/SortedOplogSet.java      |  21 +++++--
 .../persistence/soplog/SortedOplogSetImpl.java  |  21 +++++--
 .../soplog/SortedOplogStatistics.java           |  21 +++++--
 .../cache/persistence/soplog/SortedReader.java  |  21 +++++--
 .../persistence/soplog/TrackedReference.java    |  21 +++++--
 .../soplog/hfile/BlockCacheHolder.java          |  21 +++++--
 .../soplog/hfile/HFileSortedOplog.java          |  21 +++++--
 .../soplog/hfile/HFileSortedOplogFactory.java   |  21 +++++--
 .../soplog/nofile/NoFileSortedOplog.java        |  21 +++++--
 .../soplog/nofile/NoFileSortedOplogFactory.java |  21 +++++--
 .../snapshot/CacheSnapshotServiceImpl.java      |  21 +++++--
 .../internal/cache/snapshot/ClientExporter.java |  21 +++++--
 .../cache/snapshot/ExportedRegistry.java        |  21 +++++--
 .../internal/cache/snapshot/FlowController.java |  21 +++++--
 .../internal/cache/snapshot/GFSnapshot.java     |  21 +++++--
 .../internal/cache/snapshot/LocalExporter.java  |  21 +++++--
 .../snapshot/RegionSnapshotServiceImpl.java     |  21 +++++--
 .../cache/snapshot/SnapshotFileMapper.java      |  21 +++++--
 .../cache/snapshot/SnapshotOptionsImpl.java     |  21 +++++--
 .../internal/cache/snapshot/SnapshotPacket.java |  21 +++++--
 .../cache/snapshot/WindowedExporter.java        |  21 +++++--
 .../gemfire/internal/cache/tier/Acceptor.java   |  21 +++++--
 .../internal/cache/tier/BatchException.java     |  21 +++++--
 .../internal/cache/tier/CachedRegionHelper.java |  21 +++++--
 .../internal/cache/tier/ClientHandShake.java    |  21 +++++--
 .../gemfire/internal/cache/tier/Command.java    |  21 +++++--
 .../internal/cache/tier/ConnectionProxy.java    |  21 +++++--
 .../internal/cache/tier/InterestType.java       |  21 +++++--
 .../cache/tier/InternalClientMembership.java    |  21 +++++--
 .../internal/cache/tier/MessageType.java        |  21 +++++--
 .../cache/tier/sockets/AcceptorImpl.java        |  21 +++++--
 .../cache/tier/sockets/BaseCommand.java         |  21 +++++--
 .../cache/tier/sockets/BaseCommandQuery.java    |  16 +++++
 .../cache/tier/sockets/CacheClientNotifier.java |  21 +++++--
 .../tier/sockets/CacheClientNotifierStats.java  |  21 +++++--
 .../cache/tier/sockets/CacheClientProxy.java    |  20 ++++--
 .../tier/sockets/CacheClientProxyStats.java     |  21 +++++--
 .../cache/tier/sockets/CacheClientUpdater.java  |  21 +++++--
 .../cache/tier/sockets/CacheServerHelper.java   |  21 +++++--
 .../cache/tier/sockets/CacheServerStats.java    |  21 +++++--
 .../cache/tier/sockets/ChunkedMessage.java      |  21 +++++--
 .../tier/sockets/ClientBlacklistProcessor.java  |  21 +++++--
 .../sockets/ClientDataSerializerMessage.java    |  21 +++++--
 .../cache/tier/sockets/ClientHealthMonitor.java |  21 +++++--
 .../tier/sockets/ClientInstantiatorMessage.java |  20 ++++--
 .../tier/sockets/ClientInterestMessageImpl.java |  21 +++++--
 .../tier/sockets/ClientMarkerMessageImpl.java   |  21 +++++--
 .../cache/tier/sockets/ClientMessage.java       |  21 +++++--
 .../tier/sockets/ClientPingMessageImpl.java     |  21 +++++--
 .../tier/sockets/ClientProxyMembershipID.java   |  21 +++++--
 .../tier/sockets/ClientTombstoneMessage.java    |  21 +++++--
 .../cache/tier/sockets/ClientUpdateMessage.java |  21 +++++--
 .../tier/sockets/ClientUpdateMessageImpl.java   |  21 +++++--
 .../cache/tier/sockets/ClientUserAuths.java     |  21 +++++--
 .../cache/tier/sockets/CommandInitializer.java  |  21 +++++--
 .../cache/tier/sockets/ConnectionListener.java  |  21 +++++--
 .../tier/sockets/ConnectionListenerAdapter.java |  21 +++++--
 .../cache/tier/sockets/HAEventWrapper.java      |  21 +++++--
 .../internal/cache/tier/sockets/HandShake.java  |  21 +++++--
 .../tier/sockets/InterestResultPolicyImpl.java  |  21 +++++--
 .../internal/cache/tier/sockets/Message.java    |  21 +++++--
 .../cache/tier/sockets/MessageStats.java        |  21 +++++--
 .../cache/tier/sockets/ObjectPartList.java      |  21 +++++--
 .../cache/tier/sockets/ObjectPartList651.java   |  21 +++++--
 .../internal/cache/tier/sockets/Part.java       |  21 +++++--
 .../RemoveClientFromBlacklistMessage.java       |  23 ++++---
 .../tier/sockets/SerializedObjectPartList.java  |  21 +++++--
 .../cache/tier/sockets/ServerConnection.java    |  21 +++++--
 .../tier/sockets/ServerHandShakeProcessor.java  |  21 +++++--
 .../cache/tier/sockets/ServerQueueStatus.java   |  21 +++++--
 .../tier/sockets/ServerResponseMatrix.java      |  20 ++++--
 .../tier/sockets/UnregisterAllInterest.java     |  21 +++++--
 .../cache/tier/sockets/UserAuthAttributes.java  |  21 +++++--
 .../cache/tier/sockets/VersionedObjectList.java |  21 +++++--
 .../cache/tier/sockets/command/AddPdxEnum.java  |  21 +++++--
 .../cache/tier/sockets/command/AddPdxType.java  |  21 +++++--
 .../cache/tier/sockets/command/ClearRegion.java |  21 +++++--
 .../cache/tier/sockets/command/ClientReady.java |  21 +++++--
 .../tier/sockets/command/CloseConnection.java   |  21 +++++--
 .../tier/sockets/command/CommitCommand.java     |  21 +++++--
 .../cache/tier/sockets/command/ContainsKey.java |  21 +++++--
 .../tier/sockets/command/ContainsKey66.java     |  21 +++++--
 .../tier/sockets/command/CreateRegion.java      |  21 +++++--
 .../cache/tier/sockets/command/Default.java     |  21 +++++--
 .../cache/tier/sockets/command/Destroy.java     |  21 +++++--
 .../cache/tier/sockets/command/Destroy65.java   |  21 +++++--
 .../cache/tier/sockets/command/Destroy70.java   |  23 ++++---
 .../tier/sockets/command/DestroyRegion.java     |  21 +++++--
 .../tier/sockets/command/ExecuteFunction.java   |  21 +++++--
 .../tier/sockets/command/ExecuteFunction65.java |  20 ++++--
 .../tier/sockets/command/ExecuteFunction66.java |  20 ++++--
 .../tier/sockets/command/ExecuteFunction70.java |  21 +++++--
 .../sockets/command/ExecuteRegionFunction.java  |  20 ++++--
 .../command/ExecuteRegionFunction61.java        |  20 ++++--
 .../command/ExecuteRegionFunction65.java        |  20 ++++--
 .../command/ExecuteRegionFunction66.java        |  20 ++++--
 .../command/ExecuteRegionFunctionSingleHop.java |  21 ++++---
 .../sockets/command/GatewayReceiverCommand.java |  21 +++++--
 .../cache/tier/sockets/command/Get70.java       |  21 +++++--
 .../cache/tier/sockets/command/GetAll.java      |  21 +++++--
 .../cache/tier/sockets/command/GetAll651.java   |  21 +++++--
 .../cache/tier/sockets/command/GetAll70.java    |  23 ++++---
 .../cache/tier/sockets/command/GetAllForRI.java |  21 +++++--
 .../sockets/command/GetAllWithCallback.java     |  21 +++++--
 .../command/GetClientPRMetadataCommand.java     |  20 ++++--
 .../command/GetClientPRMetadataCommand66.java   |  20 ++++--
 .../GetClientPartitionAttributesCommand.java    |  20 ++++--
 .../GetClientPartitionAttributesCommand66.java  |  20 ++++--
 .../cache/tier/sockets/command/GetEntry70.java  |  23 ++++---
 .../tier/sockets/command/GetEntryCommand.java   |  21 +++++--
 .../sockets/command/GetFunctionAttribute.java   |  21 +++++--
 .../tier/sockets/command/GetPDXEnumById.java    |  21 +++++--
 .../tier/sockets/command/GetPDXIdForEnum.java   |  21 +++++--
 .../tier/sockets/command/GetPDXIdForType.java   |  21 +++++--
 .../tier/sockets/command/GetPDXTypeById.java    |  21 +++++--
 .../tier/sockets/command/GetPdxEnums70.java     |  21 +++++--
 .../tier/sockets/command/GetPdxTypes70.java     |  21 +++++--
 .../cache/tier/sockets/command/Invalid.java     |  21 +++++--
 .../cache/tier/sockets/command/Invalidate.java  |  21 +++++--
 .../tier/sockets/command/Invalidate70.java      |  23 ++++---
 .../cache/tier/sockets/command/KeySet.java      |  21 +++++--
 .../cache/tier/sockets/command/MakePrimary.java |  21 +++++--
 .../tier/sockets/command/ManagementCommand.java |  21 +++++--
 .../cache/tier/sockets/command/PeriodicAck.java |  21 +++++--
 .../cache/tier/sockets/command/Ping.java        |  21 +++++--
 .../cache/tier/sockets/command/Put.java         |  21 +++++--
 .../cache/tier/sockets/command/Put61.java       |  21 +++++--
 .../cache/tier/sockets/command/Put65.java       |  21 +++++--
 .../cache/tier/sockets/command/Put70.java       |  21 +++++--
 .../cache/tier/sockets/command/PutAll.java      |  21 +++++--
 .../cache/tier/sockets/command/PutAll70.java    |  21 +++++--
 .../cache/tier/sockets/command/PutAll80.java    |  21 +++++--
 .../sockets/command/PutAllWithCallback.java     |  21 +++++--
 .../sockets/command/PutUserCredentials.java     |  21 +++++--
 .../cache/tier/sockets/command/Query.java       |  21 +++++--
 .../cache/tier/sockets/command/Query651.java    |  21 +++++--
 .../command/RegisterDataSerializers.java        |  21 +++++--
 .../sockets/command/RegisterInstantiators.java  |  21 +++++--
 .../tier/sockets/command/RegisterInterest.java  |  21 +++++--
 .../sockets/command/RegisterInterest61.java     |  21 +++++--
 .../sockets/command/RegisterInterestList.java   |  21 +++++--
 .../sockets/command/RegisterInterestList61.java |  21 +++++--
 .../sockets/command/RegisterInterestList66.java |  21 +++++--
 .../cache/tier/sockets/command/RemoveAll.java   |  21 +++++--
 .../tier/sockets/command/RemoveUserAuth.java    |  21 +++++--
 .../cache/tier/sockets/command/Request.java     |  21 +++++--
 .../tier/sockets/command/RequestEventValue.java |  20 ++++--
 .../tier/sockets/command/RollbackCommand.java   |  21 +++++--
 .../cache/tier/sockets/command/Size.java        |  21 +++++--
 .../tier/sockets/command/TXFailoverCommand.java |  21 +++++--
 .../command/TXSynchronizationCommand.java       |  21 +++++--
 .../sockets/command/UnregisterInterest.java     |  21 +++++--
 .../sockets/command/UnregisterInterestList.java |  21 +++++--
 .../command/UpdateClientNotification.java       |  21 +++++--
 .../cache/tx/AbstractPeerTXRegionStub.java      |  21 +++++--
 .../internal/cache/tx/ClientTXRegionStub.java   |  21 +++++--
 .../internal/cache/tx/ClientTXStateStub.java    |  21 +++++--
 .../cache/tx/DistClientTXStateStub.java         |  19 +++++-
 .../internal/cache/tx/DistTxEntryEvent.java     |  16 +++++
 .../internal/cache/tx/DistTxKeyInfo.java        |  18 +++++-
 .../cache/tx/DistributedTXRegionStub.java       |  21 +++++--
 .../cache/tx/PartitionedTXRegionStub.java       |  21 +++++--
 .../gemfire/internal/cache/tx/TXRegionStub.java |  21 +++++--
 .../cache/tx/TransactionalOperation.java        |  24 ++++---
 .../cache/versions/CompactVersionHolder.java    |  21 +++++--
 .../ConcurrentCacheModificationException.java   |  23 ++++---
 .../cache/versions/DiskRegionVersionVector.java |  21 +++++--
 .../internal/cache/versions/DiskVersionTag.java |  21 +++++--
 .../internal/cache/versions/RVVException.java   |  21 +++++--
 .../internal/cache/versions/RVVExceptionB.java  |  23 ++++---
 .../internal/cache/versions/RVVExceptionT.java  |  23 ++++---
 .../cache/versions/RegionVersionHolder.java     |  21 +++++--
 .../cache/versions/RegionVersionVector.java     |  21 +++++--
 .../cache/versions/VMRegionVersionVector.java   |  21 +++++--
 .../internal/cache/versions/VMVersionTag.java   |  21 +++++--
 .../internal/cache/versions/VersionHolder.java  |  21 +++++--
 .../internal/cache/versions/VersionSource.java  |  21 +++++--
 .../internal/cache/versions/VersionStamp.java   |  21 +++++--
 .../internal/cache/versions/VersionTag.java     |  21 +++++--
 .../internal/cache/vmotion/VMotionObserver.java |  20 ++++--
 .../cache/vmotion/VMotionObserverAdapter.java   |  20 ++++--
 .../cache/vmotion/VMotionObserverHolder.java    |  20 ++++--
 .../cache/wan/AbstractGatewaySender.java        |  20 ++++--
 .../AbstractGatewaySenderEventProcessor.java    |  21 +++++--
 .../AsyncEventQueueConfigurationException.java  |  21 +++++--
 .../internal/cache/wan/BatchException70.java    |  21 +++++--
 .../cache/wan/DistributedSystemListener.java    |  16 +++++
 .../cache/wan/GatewayEventFilterImpl.java       |  21 +++++--
 .../cache/wan/GatewayReceiverException.java     |  21 +++++--
 .../cache/wan/GatewayReceiverStats.java         |  21 +++++--
 .../cache/wan/GatewaySenderAdvisor.java         |  21 +++++--
 .../cache/wan/GatewaySenderAttributes.java      |  21 +++++--
 .../GatewaySenderConfigurationException.java    |  21 +++++--
 .../wan/GatewaySenderEventCallbackArgument.java |  20 ++++--
 .../GatewaySenderEventCallbackDispatcher.java   |  20 ++++--
 .../cache/wan/GatewaySenderEventDispatcher.java |  21 +++++--
 .../cache/wan/GatewaySenderEventImpl.java       |  20 ++++--
 .../cache/wan/GatewaySenderException.java       |  21 +++++--
 .../internal/cache/wan/GatewaySenderStats.java  |  21 +++++--
 .../cache/wan/InternalGatewaySenderFactory.java |  16 +++++
 .../cache/wan/TransportFilterServerSocket.java  |  21 +++++--
 .../cache/wan/TransportFilterSocket.java        |  21 +++++--
 .../cache/wan/TransportFilterSocketFactory.java |  21 +++++--
 .../internal/cache/wan/WANServiceProvider.java  |  16 +++++
 .../BucketRegionQueueUnavailableException.java  |  16 +++++
 ...rentParallelGatewaySenderEventProcessor.java |  21 +++++--
 .../ConcurrentParallelGatewaySenderQueue.java   |  21 +++++--
 .../ParallelGatewaySenderEventProcessor.java    |  21 +++++--
 .../parallel/ParallelGatewaySenderQueue.java    |  21 +++++--
 .../ParallelQueueBatchRemovalMessage.java       |  21 +++++--
 .../parallel/ParallelQueueRemovalMessage.java   |  21 +++++--
 .../cache/wan/parallel/RREventIDResolver.java   |  21 +++++--
 .../cache/wan/serial/BatchDestroyOperation.java |  21 +++++--
 ...urrentSerialGatewaySenderEventProcessor.java |  21 +++++--
 .../SerialGatewaySenderEventProcessor.java      |  20 ++++--
 .../wan/serial/SerialGatewaySenderQueue.java    |  21 +++++--
 .../serial/SerialSecondaryGatewayListener.java  |  21 +++++--
 .../internal/cache/wan/spi/WANFactory.java      |  16 +++++
 .../cache/xmlcache/AbstractXmlParser.java       |  21 +++++--
 .../cache/xmlcache/AsyncEventQueueCreation.java |  21 +++++--
 .../cache/xmlcache/BindingCreation.java         |  21 +++++--
 .../internal/cache/xmlcache/CacheCreation.java  |  21 +++++--
 .../cache/xmlcache/CacheServerCreation.java     |  21 +++++--
 .../CacheTransactionManagerCreation.java        |  21 +++++--
 .../internal/cache/xmlcache/CacheXml.java       |  21 +++++--
 .../cache/xmlcache/CacheXmlGenerator.java       |  21 +++++--
 .../internal/cache/xmlcache/CacheXmlParser.java |  21 +++++--
 .../xmlcache/CacheXmlPropertyResolver.java      |  21 +++++--
 .../CacheXmlPropertyResolverHelper.java         |  21 +++++--
 .../cache/xmlcache/CacheXmlVersion.java         |  17 ++++-
 .../cache/xmlcache/ClientCacheCreation.java     |  21 +++++--
 .../cache/xmlcache/ClientHaQueueCreation.java   |  21 +++++--
 .../internal/cache/xmlcache/Declarable2.java    |  21 +++++--
 .../cache/xmlcache/DefaultEntityResolver2.java  |  21 +++++--
 .../xmlcache/DiskStoreAttributesCreation.java   |  21 +++++--
 .../cache/xmlcache/FunctionServiceCreation.java |  21 +++++--
 .../cache/xmlcache/GatewayReceiverCreation.java |  21 +++++--
 .../cache/xmlcache/IndexCreationData.java       |  21 +++++--
 .../ParallelAsyncEventQueueCreation.java        |  21 +++++--
 .../xmlcache/ParallelGatewaySenderCreation.java |  21 +++++--
 .../cache/xmlcache/PivotalEntityResolver.java   |  21 +++++--
 .../cache/xmlcache/PropertyResolver.java        |  21 +++++--
 .../xmlcache/RegionAttributesCreation.java      |  21 +++++--
 .../internal/cache/xmlcache/RegionCreation.java |  21 +++++--
 .../cache/xmlcache/ResourceManagerCreation.java |  21 +++++--
 .../xmlcache/SerialAsyncEventQueueCreation.java |  21 +++++--
 .../xmlcache/SerialGatewaySenderCreation.java   |  21 +++++--
 .../cache/xmlcache/SerializerCreation.java      |  21 +++++--
 .../internal/cache/xmlcache/XmlGenerator.java   |  21 +++++--
 .../cache/xmlcache/XmlGeneratorUtils.java       |  21 +++++--
 .../internal/cache/xmlcache/XmlParser.java      |  21 +++++--
 .../gemfire/internal/concurrent/AL.java         |  21 +++++--
 .../internal/concurrent/AtomicLong5.java        |  21 +++++--
 .../gemfire/internal/concurrent/Atomics.java    |  21 +++++--
 .../concurrent/CompactConcurrentHashSet2.java   |  16 +++++
 .../internal/concurrent/ConcurrentHashSet.java  |  21 +++++--
 .../gemfire/internal/concurrent/LI.java         |  21 +++++--
 .../internal/concurrent/MapCallback.java        |  21 +++++--
 .../internal/concurrent/MapCallbackAdapter.java |  21 +++++--
 .../gemfire/internal/concurrent/MapResult.java  |  21 +++++--
 .../internal/datasource/AbstractDataSource.java |  21 +++++--
 .../internal/datasource/AbstractPoolCache.java  |  21 +++++--
 .../ClientConnectionFactoryWrapper.java         |  21 +++++--
 .../internal/datasource/ConfigProperty.java     |  21 +++++--
 .../ConfiguredDataSourceProperties.java         |  21 +++++--
 .../ConnectionEventListenerAdaptor.java         |  21 +++++--
 .../datasource/ConnectionPoolCache.java         |  21 +++++--
 .../datasource/ConnectionPoolCacheImpl.java     |  21 +++++--
 .../internal/datasource/ConnectionProvider.java |  21 +++++--
 .../datasource/ConnectionProviderException.java |  21 +++++--
 .../datasource/DataSourceCreateException.java   |  21 +++++--
 .../internal/datasource/DataSourceFactory.java  |  21 +++++--
 .../datasource/DataSourceResources.java         |  21 +++++--
 .../FacetsJCAConnectionManagerImpl.java         |  21 +++++--
 .../datasource/GemFireBasicDataSource.java      |  21 +++++--
 .../datasource/GemFireConnPooledDataSource.java |  21 +++++--
 .../GemFireConnectionPoolManager.java           |  21 +++++--
 .../GemFireTransactionDataSource.java           |  21 +++++--
 .../datasource/JCAConnectionManagerImpl.java    |  21 +++++--
 .../datasource/ManagedPoolCacheImpl.java        |  21 +++++--
 .../internal/datasource/PoolException.java      |  21 +++++--
 .../internal/datasource/TranxPoolCacheImpl.java |  21 +++++--
 .../i18n/AbstractStringIdResourceBundle.java    |  21 +++++--
 .../gemfire/internal/i18n/LocalizedStrings.java |  23 ++++---
 .../internal/i18n/ParentLocalizedStrings.java   |  21 +++++--
 .../internal/io/CompositeOutputStream.java      |  21 +++++--
 .../internal/io/CompositePrintStream.java       |  21 +++++--
 .../gemfire/internal/io/TeeOutputStream.java    |  21 +++++--
 .../gemfire/internal/io/TeePrintStream.java     |  21 +++++--
 .../gemfire/internal/jndi/ContextImpl.java      |  21 +++++--
 .../jndi/InitialContextFactoryImpl.java         |  21 +++++--
 .../gemfire/internal/jndi/JNDIInvoker.java      |  21 +++++--
 .../gemfire/internal/jndi/NameParserImpl.java   |  21 +++++--
 .../gemfire/internal/jta/GlobalTransaction.java |  21 +++++--
 .../gemfire/internal/jta/TransactionImpl.java   |  21 +++++--
 .../internal/jta/TransactionManagerImpl.java    |  21 +++++--
 .../gemfire/internal/jta/TransactionUtils.java  |  21 +++++--
 .../internal/jta/UserTransactionImpl.java       |  21 +++++--
 .../gemstone/gemfire/internal/jta/XidImpl.java  |  21 +++++--
 .../gemfire/internal/lang/ClassUtils.java       |  20 ++++--
 .../gemstone/gemfire/internal/lang/Filter.java  |  18 ++++--
 .../gemfire/internal/lang/InOutParameter.java   |  20 ++++--
 .../gemfire/internal/lang/Initable.java         |  20 ++++--
 .../gemfire/internal/lang/Initializer.java      |  21 +++++--
 .../internal/lang/MutableIdentifiable.java      |  24 ++++---
 .../gemfire/internal/lang/ObjectUtils.java      |  20 ++++--
 .../gemfire/internal/lang/Orderable.java        |  20 ++++--
 .../gemstone/gemfire/internal/lang/Ordered.java |  20 ++++--
 .../gemfire/internal/lang/StringUtils.java      |  20 ++++--
 .../gemfire/internal/lang/SystemUtils.java      |  20 ++++--
 .../gemfire/internal/lang/ThreadUtils.java      |  20 ++++--
 .../gemfire/internal/logging/DateFormatter.java |  16 +++++
 .../internal/logging/DebugLogWriter.java        |  21 +++++--
 .../internal/logging/GemFireFormatter.java      |  21 +++++--
 .../internal/logging/GemFireHandler.java        |  21 +++++--
 .../gemfire/internal/logging/GemFireLevel.java  |  21 +++++--
 .../internal/logging/InternalLogWriter.java     |  16 +++++
 .../internal/logging/LocalLogWriter.java        |  21 +++++--
 .../gemfire/internal/logging/LogConfig.java     |  16 +++++
 .../gemfire/internal/logging/LogFileParser.java |  21 +++++--
 .../gemfire/internal/logging/LogService.java    |  21 +++++--
 .../internal/logging/LogWriterFactory.java      |  16 +++++
 .../gemfire/internal/logging/LogWriterImpl.java |  21 +++++--
 .../internal/logging/LoggingThreadGroup.java    |  16 +++++
 .../internal/logging/ManagerLogWriter.java      |  21 +++++--
 .../gemfire/internal/logging/MergeLogFiles.java |  21 +++++--
 .../gemfire/internal/logging/PureLogWriter.java |  21 +++++--
 .../logging/SecurityLocalLogWriter.java         |  21 +++++--
 .../internal/logging/SecurityLogConfig.java     |  16 +++++
 .../internal/logging/SecurityLogWriter.java     |  21 +++++--
 .../logging/SecurityManagerLogWriter.java       |  21 +++++--
 .../gemfire/internal/logging/SortLogFile.java   |  21 +++++--
 .../internal/logging/StandardErrorPrinter.java  |  16 +++++
 .../internal/logging/StandardOutputPrinter.java |  16 +++++
 .../internal/logging/log4j/AlertAppender.java   |  16 +++++
 .../internal/logging/log4j/AppenderContext.java |  16 +++++
 .../internal/logging/log4j/ConfigLocator.java   |  16 +++++
 .../internal/logging/log4j/Configurator.java    |  16 +++++
 .../internal/logging/log4j/FastLogger.java      |  16 +++++
 .../internal/logging/log4j/GemFireLogger.java   |  16 +++++
 .../logging/log4j/LocalizedMessage.java         |  16 +++++
 .../internal/logging/log4j/LogMarker.java       |  16 +++++
 .../logging/log4j/LogWriterAppender.java        |  18 +++++-
 .../logging/log4j/LogWriterAppenders.java       |  16 +++++
 .../internal/logging/log4j/LogWriterLogger.java |  16 +++++
 .../logging/log4j/ThreadIdPatternConverter.java |  16 +++++
 .../gemfire/internal/memcached/Command.java     |  21 +++++--
 .../internal/memcached/CommandProcessor.java    |  21 +++++--
 .../internal/memcached/ConnectionHandler.java   |  21 +++++--
 .../gemfire/internal/memcached/KeyWrapper.java  |  21 +++++--
 .../gemfire/internal/memcached/Reply.java       |  21 +++++--
 .../internal/memcached/RequestReader.java       |  21 +++++--
 .../internal/memcached/ResponseStatus.java      |  21 +++++--
 .../internal/memcached/ValueWrapper.java        |  21 +++++--
 .../memcached/commands/AbstractCommand.java     |  21 +++++--
 .../internal/memcached/commands/AddCommand.java |  21 +++++--
 .../memcached/commands/AddQCommand.java         |  16 +++++
 .../memcached/commands/AppendCommand.java       |  21 +++++--
 .../memcached/commands/AppendQCommand.java      |  16 +++++
 .../internal/memcached/commands/CASCommand.java |  21 +++++--
 .../memcached/commands/ClientError.java         |  21 +++++--
 .../memcached/commands/DecrementCommand.java    |  21 +++++--
 .../memcached/commands/DecrementQCommand.java   |  16 +++++
 .../memcached/commands/DeleteCommand.java       |  21 +++++--
 .../memcached/commands/DeleteQCommand.java      |  16 +++++
 .../memcached/commands/FlushAllCommand.java     |  21 +++++--
 .../memcached/commands/FlushAllQCommand.java    |  16 +++++
 .../internal/memcached/commands/GATCommand.java |  16 +++++
 .../memcached/commands/GATQCommand.java         |  16 +++++
 .../internal/memcached/commands/GetCommand.java |  21 +++++--
 .../memcached/commands/GetKCommand.java         |  16 +++++
 .../memcached/commands/GetKQCommand.java        |  16 +++++
 .../memcached/commands/GetQCommand.java         |  21 +++++--
 .../memcached/commands/IncrementCommand.java    |  21 +++++--
 .../memcached/commands/IncrementQCommand.java   |  16 +++++
 .../memcached/commands/NoOpCommand.java         |  21 +++++--
 .../memcached/commands/NotSupportedCommand.java |  21 +++++--
 .../memcached/commands/PrependCommand.java      |  21 +++++--
 .../memcached/commands/PrependQCommand.java     |  16 +++++
 .../memcached/commands/QuitCommand.java         |  21 +++++--
 .../memcached/commands/QuitQCommand.java        |  16 +++++
 .../memcached/commands/ReplaceCommand.java      |  21 +++++--
 .../memcached/commands/ReplaceQCommand.java     |  16 +++++
 .../internal/memcached/commands/SetCommand.java |  21 +++++--
 .../memcached/commands/SetQCommand.java         |  16 +++++
 .../memcached/commands/StatsCommand.java        |  21 +++++--
 .../memcached/commands/StorageCommand.java      |  21 +++++--
 .../memcached/commands/TouchCommand.java        |  21 +++++--
 .../memcached/commands/VerbosityCommand.java    |  21 +++++--
 .../memcached/commands/VersionCommand.java      |  21 +++++--
 .../modules/util/RegionConfiguration.java       |  21 +++++--
 .../gemfire/internal/net/SocketUtils.java       |  20 ++++--
 .../internal/offheap/ByteArrayMemoryChunk.java  |  16 +++++
 .../internal/offheap/ByteBufferMemoryChunk.java |  16 +++++
 .../gemfire/internal/offheap/DataType.java      |  16 +++++
 .../internal/offheap/MemoryAllocator.java       |  16 +++++
 .../gemfire/internal/offheap/MemoryBlock.java   |  16 +++++
 .../gemfire/internal/offheap/MemoryChunk.java   |  16 +++++
 .../offheap/MemoryChunkWithRefCount.java        |  16 +++++
 .../internal/offheap/MemoryInspector.java       |  16 +++++
 .../internal/offheap/MemoryUsageListener.java   |  16 +++++
 .../offheap/OffHeapCachedDeserializable.java    |  16 +++++
 .../gemfire/internal/offheap/OffHeapHelper.java |  16 +++++
 .../internal/offheap/OffHeapMemoryStats.java    |  16 +++++
 .../internal/offheap/OffHeapReference.java      |  18 +++++-
 .../offheap/OffHeapRegionEntryHelper.java       |  16 +++++
 .../internal/offheap/OffHeapStorage.java        |  16 +++++
 .../offheap/OutOfOffHeapMemoryListener.java     |  16 +++++
 .../gemfire/internal/offheap/Releasable.java    |  16 +++++
 .../offheap/SimpleMemoryAllocatorImpl.java      |  16 +++++
 .../gemfire/internal/offheap/StoredObject.java  |  16 +++++
 .../internal/offheap/UnsafeMemoryChunk.java     |  16 +++++
 .../offheap/annotations/OffHeapIdentifier.java  |  16 +++++
 .../internal/offheap/annotations/Released.java  |  16 +++++
 .../internal/offheap/annotations/Retained.java  |  16 +++++
 .../offheap/annotations/Unretained.java         |  16 +++++
 .../internal/process/AttachProcessUtils.java    |  16 +++++
 .../process/BlockingProcessStreamReader.java    |  16 +++++
 ...usterConfigurationNotAvailableException.java |  16 +++++
 .../process/ConnectionFailedException.java      |  21 +++++--
 .../internal/process/ControlFileWatchdog.java   |  16 +++++
 .../process/ControlNotificationHandler.java     |  16 +++++
 .../internal/process/ControllableProcess.java   |  16 +++++
 .../process/FileAlreadyExistsException.java     |  21 +++++--
 .../process/FileControllerParameters.java       |  16 +++++
 .../internal/process/FileProcessController.java |  16 +++++
 .../process/LocalProcessController.java         |  21 +++++--
 .../internal/process/LocalProcessLauncher.java  |  21 +++++--
 .../process/MBeanControllerParameters.java      |  16 +++++
 .../process/MBeanInvocationFailedException.java |  21 +++++--
 .../process/MBeanProcessController.java         |  16 +++++
 .../internal/process/NativeProcessUtils.java    |  16 +++++
 .../process/NonBlockingProcessStreamReader.java |  16 +++++
 .../gemfire/internal/process/PidFile.java       |  16 +++++
 .../process/PidUnavailableException.java        |  21 +++++--
 .../internal/process/ProcessController.java     |  16 +++++
 .../process/ProcessControllerFactory.java       |  16 +++++
 .../process/ProcessControllerParameters.java    |  16 +++++
 .../process/ProcessLauncherContext.java         |  21 +++++--
 .../internal/process/ProcessStreamReader.java   |  21 +++++--
 .../ProcessTerminatedAbnormallyException.java   |  20 ++++--
 .../gemfire/internal/process/ProcessType.java   |  16 +++++
 .../gemfire/internal/process/ProcessUtils.java  |  21 +++++--
 .../gemfire/internal/process/StartupStatus.java |  16 +++++
 .../internal/process/StartupStatusListener.java |  16 +++++
 .../UnableToControlProcessException.java        |  16 +++++
 .../AbstractSignalNotificationHandler.java      |  20 ++++--
 .../gemfire/internal/process/signal/Signal.java |  20 ++++--
 .../internal/process/signal/SignalEvent.java    |  20 ++++--
 .../internal/process/signal/SignalListener.java |  20 ++++--
 .../internal/process/signal/SignalType.java     |  20 ++++--
 .../internal/redis/ByteArrayWrapper.java        |  16 +++++
 .../internal/redis/ByteToCommandDecoder.java    |  16 +++++
 .../gemstone/gemfire/internal/redis/Coder.java  |  16 +++++
 .../gemfire/internal/redis/Command.java         |  16 +++++
 .../gemfire/internal/redis/DoubleWrapper.java   |  16 +++++
 .../internal/redis/ExecutionHandlerContext.java |  16 +++++
 .../gemfire/internal/redis/Executor.java        |  16 +++++
 .../gemfire/internal/redis/Extendable.java      |  16 +++++
 .../redis/RedisCommandParserException.java      |  16 +++++
 .../internal/redis/RedisCommandType.java        |  16 +++++
 .../gemfire/internal/redis/RedisConstants.java  |  16 +++++
 .../gemfire/internal/redis/RedisDataType.java   |  18 +++++-
 .../redis/RedisDataTypeMismatchException.java   |  16 +++++
 .../internal/redis/RegionCreationException.java |  16 +++++
 .../gemfire/internal/redis/RegionProvider.java  |  16 +++++
 .../redis/executor/AbstractExecutor.java        |  16 +++++
 .../redis/executor/AbstractScanExecutor.java    |  16 +++++
 .../internal/redis/executor/AuthExecutor.java   |  16 +++++
 .../internal/redis/executor/DBSizeExecutor.java |  16 +++++
 .../internal/redis/executor/DelExecutor.java    |  16 +++++
 .../internal/redis/executor/EchoExecutor.java   |  16 +++++
 .../internal/redis/executor/ExistsExecutor.java |  16 +++++
 .../redis/executor/ExpirationExecutor.java      |  16 +++++
 .../redis/executor/ExpireAtExecutor.java        |  16 +++++
 .../internal/redis/executor/ExpireExecutor.java |  16 +++++
 .../redis/executor/FlushAllExecutor.java        |  16 +++++
 .../internal/redis/executor/KeysExecutor.java   |  16 +++++
 .../internal/redis/executor/ListQuery.java      |  16 +++++
 .../redis/executor/PExpireAtExecutor.java       |  16 +++++
 .../redis/executor/PExpireExecutor.java         |  16 +++++
 .../internal/redis/executor/PTTLExecutor.java   |  16 +++++
 .../redis/executor/PersistExecutor.java         |  16 +++++
 .../internal/redis/executor/PingExecutor.java   |  16 +++++
 .../internal/redis/executor/QuitExecutor.java   |  16 +++++
 .../internal/redis/executor/ScanExecutor.java   |  16 +++++
 .../redis/executor/ShutDownExecutor.java        |  16 +++++
 .../internal/redis/executor/SortedSetQuery.java |  16 +++++
 .../internal/redis/executor/TTLExecutor.java    |  16 +++++
 .../internal/redis/executor/TimeExecutor.java   |  16 +++++
 .../internal/redis/executor/TypeExecutor.java   |  16 +++++
 .../internal/redis/executor/UnkownExecutor.java |  16 +++++
 .../redis/executor/hash/HDelExecutor.java       |  16 +++++
 .../redis/executor/hash/HExistsExecutor.java    |  16 +++++
 .../redis/executor/hash/HGetAllExecutor.java    |  16 +++++
 .../redis/executor/hash/HGetExecutor.java       |  16 +++++
 .../redis/executor/hash/HIncrByExecutor.java    |  16 +++++
 .../executor/hash/HIncrByFloatExecutor.java     |  16 +++++
 .../redis/executor/hash/HKeysExecutor.java      |  16 +++++
 .../redis/executor/hash/HLenExecutor.java       |  16 +++++
 .../redis/executor/hash/HMGetExecutor.java      |  16 +++++
 .../redis/executor/hash/HMSetExecutor.java      |  16 +++++
 .../redis/executor/hash/HScanExecutor.java      |  16 +++++
 .../redis/executor/hash/HSetExecutor.java       |  16 +++++
 .../redis/executor/hash/HSetNXExecutor.java     |  16 +++++
 .../redis/executor/hash/HValsExecutor.java      |  16 +++++
 .../redis/executor/hash/HashExecutor.java       |  18 +++++-
 .../internal/redis/executor/hll/Bits.java       |  16 +++++
 .../executor/hll/CardinalityMergeException.java |  18 +++++-
 .../redis/executor/hll/HllExecutor.java         |  16 +++++
 .../redis/executor/hll/HyperLogLog.java         |  16 +++++
 .../redis/executor/hll/HyperLogLogPlus.java     |  18 +++++-
 .../internal/redis/executor/hll/IBuilder.java   |  18 +++++-
 .../redis/executor/hll/ICardinality.java        |  16 +++++
 .../internal/redis/executor/hll/MurmurHash.java |  18 +++++-
 .../redis/executor/hll/PFAddExecutor.java       |  16 +++++
 .../redis/executor/hll/PFCountExecutor.java     |  16 +++++
 .../redis/executor/hll/PFMergeExecutor.java     |  16 +++++
 .../redis/executor/hll/RegisterSet.java         |  18 +++++-
 .../internal/redis/executor/hll/Varint.java     |  18 +++++-
 .../redis/executor/list/LIndexExecutor.java     |  16 +++++
 .../redis/executor/list/LInsertExecutor.java    |  16 +++++
 .../redis/executor/list/LLenExecutor.java       |  16 +++++
 .../redis/executor/list/LPopExecutor.java       |  16 +++++
 .../redis/executor/list/LPushExecutor.java      |  16 +++++
 .../redis/executor/list/LPushXExecutor.java     |  16 +++++
 .../redis/executor/list/LRangeExecutor.java     |  16 +++++
 .../redis/executor/list/LRemExecutor.java       |  16 +++++
 .../redis/executor/list/LSetExecutor.java       |  16 +++++
 .../redis/executor/list/LTrimExecutor.java      |  16 +++++
 .../redis/executor/list/ListExecutor.java       |  16 +++++
 .../redis/executor/list/PopExecutor.java        |  16 +++++
 .../redis/executor/list/PushExecutor.java       |  16 +++++
 .../redis/executor/list/PushXExecutor.java      |  16 +++++
 .../redis/executor/list/RPopExecutor.java       |  16 +++++
 .../redis/executor/list/RPushExecutor.java      |  16 +++++
 .../redis/executor/list/RPushXExecutor.java     |  16 +++++
 .../redis/executor/set/SAddExecutor.java        |  16 +++++
 .../redis/executor/set/SCardExecutor.java       |  16 +++++
 .../redis/executor/set/SDiffExecutor.java       |  16 +++++
 .../redis/executor/set/SDiffStoreExecutor.java  |  16 +++++
 .../redis/executor/set/SInterExecutor.java      |  16 +++++
 .../redis/executor/set/SInterStoreExecutor.java |  16 +++++
 .../redis/executor/set/SIsMemberExecutor.java   |  16 +++++
 .../redis/executor/set/SMembersExecutor.java    |  16 +++++
 .../redis/executor/set/SMoveExecutor.java       |  16 +++++
 .../redis/executor/set/SPopExecutor.java        |  16 +++++
 .../redis/executor/set/SRandMemberExecutor.java |  16 +++++
 .../redis/executor/set/SRemExecutor.java        |  16 +++++
 .../redis/executor/set/SScanExecutor.java       |  16 +++++
 .../redis/executor/set/SUnionExecutor.java      |  16 +++++
 .../redis/executor/set/SUnionStoreExecutor.java |  16 +++++
 .../redis/executor/set/SetExecutor.java         |  16 +++++
 .../redis/executor/set/SetOpExecutor.java       |  16 +++++
 .../executor/sortedset/SortedSetExecutor.java   |  16 +++++
 .../redis/executor/sortedset/ZAddExecutor.java  |  16 +++++
 .../redis/executor/sortedset/ZCardExecutor.java |  16 +++++
 .../executor/sortedset/ZCountExecutor.java      |  16 +++++
 .../executor/sortedset/ZIncrByExecutor.java     |  16 +++++
 .../executor/sortedset/ZLexCountExecutor.java   |  16 +++++
 .../executor/sortedset/ZRangeByLexExecutor.java |  16 +++++
 .../sortedset/ZRangeByScoreExecutor.java        |  16 +++++
 .../executor/sortedset/ZRangeExecutor.java      |  16 +++++
 .../redis/executor/sortedset/ZRankExecutor.java |  16 +++++
 .../redis/executor/sortedset/ZRemExecutor.java  |  16 +++++
 .../sortedset/ZRemRangeByLexExecutor.java       |  16 +++++
 .../sortedset/ZRemRangeByRankExecutor.java      |  16 +++++
 .../sortedset/ZRemRangeByScoreExecutor.java     |  16 +++++
 .../sortedset/ZRevRangeByScoreExecutor.java     |  16 +++++
 .../executor/sortedset/ZRevRangeExecutor.java   |  16 +++++
 .../executor/sortedset/ZRevRankExecutor.java    |  16 +++++
 .../redis/executor/sortedset/ZScanExecutor.java |  16 +++++
 .../executor/sortedset/ZScoreExecutor.java      |  16 +++++
 .../redis/executor/string/AppendExecutor.java   |  16 +++++
 .../redis/executor/string/BitCountExecutor.java |  16 +++++
 .../redis/executor/string/BitOpExecutor.java    |  16 +++++
 .../redis/executor/string/BitPosExecutor.java   |  16 +++++
 .../redis/executor/string/DecrByExecutor.java   |  16 +++++
 .../redis/executor/string/DecrExecutor.java     |  16 +++++
 .../redis/executor/string/GetBitExecutor.java   |  16 +++++
 .../redis/executor/string/GetExecutor.java      |  16 +++++
 .../redis/executor/string/GetRangeExecutor.java |  16 +++++
 .../redis/executor/string/GetSetExecutor.java   |  16 +++++
 .../redis/executor/string/IncrByExecutor.java   |  16 +++++
 .../executor/string/IncrByFloatExecutor.java    |  16 +++++
 .../redis/executor/string/IncrExecutor.java     |  16 +++++
 .../redis/executor/string/MGetExecutor.java     |  16 +++++
 .../redis/executor/string/MSetExecutor.java     |  16 +++++
 .../redis/executor/string/MSetNXExecutor.java   |  16 +++++
 .../redis/executor/string/PSetEXExecutor.java   |  16 +++++
 .../redis/executor/string/SetBitExecutor.java   |  16 +++++
 .../redis/executor/string/SetEXExecutor.java    |  16 +++++
 .../redis/executor/string/SetExecutor.java      |  16 +++++
 .../redis/executor/string/SetNXExecutor.java    |  16 +++++
 .../redis/executor/string/SetRangeExecutor.java |  16 +++++
 .../redis/executor/string/StringExecutor.java   |  18 +++++-
 .../redis/executor/string/StrlenExecutor.java   |  16 +++++
 .../executor/transactions/DiscardExecutor.java  |  16 +++++
 .../executor/transactions/ExecExecutor.java     |  16 +++++
 .../executor/transactions/MultiExecutor.java    |  16 +++++
 .../transactions/TransactionExecutor.java       |  16 +++++
 .../executor/transactions/UnwatchExecutor.java  |  16 +++++
 .../executor/transactions/WatchExecutor.java    |  16 +++++
 .../internal/security/AuthorizeRequest.java     |  21 +++++--
 .../internal/security/AuthorizeRequestPP.java   |  21 +++++--
 .../security/FilterPostAuthorization.java       |  21 +++++--
 .../security/FilterPreAuthorization.java        |  21 +++++--
 .../internal/security/ObjectWithAuthz.java      |  21 +++++--
 .../internal/sequencelog/EntryLogger.java       |  23 ++++---
 .../gemfire/internal/sequencelog/GraphType.java |  21 +++++--
 .../internal/sequencelog/MembershipLogger.java  |  21 +++++--
 .../internal/sequencelog/MessageLogger.java     |  21 +++++--
 .../internal/sequencelog/RegionLogger.java      |  21 +++++--
 .../internal/sequencelog/SequenceLogger.java    |  21 +++++--
 .../sequencelog/SequenceLoggerImpl.java         |  21 +++++--
 .../internal/sequencelog/Transition.java        |  21 +++++--
 .../gemfire/internal/sequencelog/io/Filter.java |  21 +++++--
 .../sequencelog/io/GemfireLogConverter.java     |  21 +++++--
 .../internal/sequencelog/io/GraphReader.java    |  21 +++++--
 .../sequencelog/io/InputStreamReader.java       |  21 +++++--
 .../sequencelog/io/OutputStreamAppender.java    |  21 +++++--
 .../internal/sequencelog/model/Edge.java        |  23 ++++---
 .../internal/sequencelog/model/Graph.java       |  21 +++++--
 .../internal/sequencelog/model/GraphID.java     |  21 +++++--
 .../sequencelog/model/GraphReaderCallback.java  |  23 ++++---
 .../internal/sequencelog/model/GraphSet.java    |  21 +++++--
 .../internal/sequencelog/model/Vertex.java      |  21 +++++--
 .../visualization/text/TextDisplay.java         |  21 +++++--
 .../gemfire/internal/shared/NativeCalls.java    |  21 +++++--
 .../internal/shared/NativeCallsJNAImpl.java     |  21 +++++--
 .../internal/shared/NativeErrorException.java   |  21 +++++--
 .../gemfire/internal/shared/OSType.java         |  21 +++++--
 .../internal/shared/StringPrintWriter.java      |  21 +++++--
 .../internal/shared/TCPSocketOptions.java       |  21 +++++--
 .../internal/size/CachingSingleObjectSizer.java |  21 +++++--
 .../size/InstrumentationSingleObjectSizer.java  |  21 +++++--
 .../gemfire/internal/size/ObjectGraphSizer.java |  28 +++++----
 .../gemfire/internal/size/ObjectTraverser.java  |  28 +++++----
 .../internal/size/ReflectionObjectSizer.java    |  21 +++++--
 .../size/ReflectionSingleObjectSizer.java       |  21 +++++--
 .../internal/size/SingleObjectSizer.java        |  16 +++++
 .../internal/size/SizeClassOnceObjectSizer.java |  21 +++++--
 .../gemfire/internal/size/SizeOfUtil0.java      |  21 +++++--
 .../internal/size/WellKnownClassSizer.java      |  21 +++++--
 .../internal/statistics/CounterMonitor.java     |  21 +++++--
 .../internal/statistics/GaugeMonitor.java       |  21 +++++--
 .../statistics/IgnoreResourceException.java     |  21 +++++--
 .../MapBasedStatisticsNotification.java         |  21 +++++--
 .../internal/statistics/ResourceInstance.java   |  21 +++++--
 .../internal/statistics/ResourceType.java       |  21 +++++--
 .../internal/statistics/SampleCollector.java    |  21 +++++--
 .../internal/statistics/SampleHandler.java      |  21 +++++--
 .../internal/statistics/SimpleStatisticId.java  |  21 +++++--
 .../statistics/StatArchiveDescriptor.java       |  21 +++++--
 .../internal/statistics/StatArchiveHandler.java |  21 +++++--
 .../statistics/StatArchiveHandlerConfig.java    |  21 +++++--
 .../internal/statistics/StatMonitorHandler.java |  21 +++++--
 .../internal/statistics/StatisticId.java        |  21 +++++--
 .../statistics/StatisticNotFoundException.java  |  21 +++++--
 .../internal/statistics/StatisticsListener.java |  23 ++++---
 .../internal/statistics/StatisticsMonitor.java  |  21 +++++--
 .../statistics/StatisticsNotification.java      |  21 +++++--
 .../internal/statistics/StatisticsSampler.java  |  21 +++++--
 .../internal/statistics/ValueMonitor.java       |  21 +++++--
 .../stats50/Atomic50StatisticsImpl.java         |  21 +++++--
 .../gemfire/internal/stats50/VMStats50.java     |  21 +++++--
 .../gemfire/internal/tcp/BaseMsgStreamer.java   |  21 +++++--
 .../gemstone/gemfire/internal/tcp/Buffers.java  |  21 +++++--
 .../internal/tcp/ByteBufferInputStream.java     |  21 +++++--
 .../gemfire/internal/tcp/ConnectExceptions.java |  21 +++++--
 .../gemfire/internal/tcp/Connection.java        |  21 +++++--
 .../internal/tcp/ConnectionException.java       |  21 +++++--
 .../gemfire/internal/tcp/ConnectionTable.java   |  21 +++++--
 .../gemfire/internal/tcp/DirectReplySender.java |  21 +++++--
 .../tcp/ImmutableByteBufferInputStream.java     |  21 +++++--
 .../internal/tcp/MemberShunnedException.java    |  21 +++++--
 .../gemfire/internal/tcp/MsgDestreamer.java     |  21 +++++--
 .../gemfire/internal/tcp/MsgIdGenerator.java    |  21 +++++--
 .../gemfire/internal/tcp/MsgOutputStream.java   |  21 +++++--
 .../gemfire/internal/tcp/MsgReader.java         |  21 +++++--
 .../gemfire/internal/tcp/MsgStreamer.java       |  21 +++++--
 .../gemfire/internal/tcp/MsgStreamerList.java   |  21 +++++--
 .../gemfire/internal/tcp/NIOMsgReader.java      |  21 +++++--
 .../gemfire/internal/tcp/OioMsgReader.java      |  21 +++++--
 .../internal/tcp/ReenteredConnectException.java |  21 +++++--
 .../gemfire/internal/tcp/ServerDelegate.java    |  21 +++++--
 .../com/gemstone/gemfire/internal/tcp/Stub.java |  21 +++++--
 .../gemfire/internal/tcp/TCPConduit.java        |  21 +++++--
 .../tcp/VersionedByteBufferInputStream.java     |  21 +++++--
 .../internal/tcp/VersionedMsgStreamer.java      |  21 +++++--
 .../internal/util/AbortableTaskService.java     |  21 +++++--
 .../gemfire/internal/util/ArrayUtils.java       |  21 +++++--
 .../gemfire/internal/util/BlobHelper.java       |  21 +++++--
 .../gemfire/internal/util/Breadcrumbs.java      |  21 +++++--
 .../gemstone/gemfire/internal/util/Bytes.java   |  21 +++++--
 .../gemfire/internal/util/Callable.java         |  21 +++++--
 .../gemfire/internal/util/CollectionUtils.java  |  21 +++++--
 .../gemfire/internal/util/DebuggerSupport.java  |  21 +++++--
 .../gemfire/internal/util/DelayedAction.java    |  21 +++++--
 .../com/gemstone/gemfire/internal/util/Hex.java |  21 +++++--
 .../gemstone/gemfire/internal/util/IOUtils.java |  21 +++++--
 .../internal/util/JavaCommandBuilder.java       |  21 +++++--
 .../gemfire/internal/util/LogFileUtils.java     |  21 +++++--
 .../internal/util/ObjectIntProcedure.java       |  16 +++++
 .../gemfire/internal/util/ObjectProcedure.java  |  16 +++++
 .../gemfire/internal/util/PasswordUtil.java     |  20 ++++--
 .../gemfire/internal/util/PluckStacks.java      |  21 +++++--
 .../internal/util/SingletonCallable.java        |  16 +++++
 .../gemfire/internal/util/SingletonValue.java   |  18 +++++-
 .../internal/util/StackTraceCollector.java      |  21 +++++--
 .../gemfire/internal/util/StopWatch.java        |  21 +++++--
 .../internal/util/SunAPINotFoundException.java  |  20 ++++--
 .../gemfire/internal/util/TransformUtils.java   |  21 +++++--
 .../gemfire/internal/util/Transformer.java      |  21 +++++--
 .../gemfire/internal/util/Versionable.java      |  21 +++++--
 .../internal/util/VersionedArrayList.java       |  20 ++++--
 .../util/concurrent/CopyOnWriteHashMap.java     |  21 +++++--
 .../util/concurrent/CopyOnWriteWeakHashMap.java |  21 +++++--
 .../CustomEntryConcurrentHashMap.java           |  21 +++++--
 .../internal/util/concurrent/FutureResult.java  |  21 +++++--
 .../util/concurrent/ReentrantSemaphore.java     |  21 +++++--
 .../util/concurrent/SemaphoreReadWriteLock.java |  21 +++++--
 .../util/concurrent/StoppableCondition.java     |  21 +++++--
 .../concurrent/StoppableCountDownLatch.java     |  21 +++++--
 .../concurrent/StoppableCountDownOrUpLatch.java |  21 +++++--
 .../concurrent/StoppableNonReentrantLock.java   |  21 +++++--
 .../util/concurrent/StoppableReadWriteLock.java |  16 +++++
 .../util/concurrent/StoppableReentrantLock.java |  21 +++++--
 .../StoppableReentrantReadWriteLock.java        |  21 +++++--
 .../lang/AttachAPINotFoundException.java        |  20 ++++--
 .../com/gemstone/gemfire/lang/Identifiable.java |  20 ++++--
 .../management/AlreadyRunningException.java     |  21 +++++--
 .../management/AsyncEventQueueMXBean.java       |  20 ++++--
 .../gemfire/management/CacheServerMXBean.java   |  20 ++++--
 .../gemfire/management/ClientHealthStatus.java  |  20 ++++--
 .../gemfire/management/ClientQueueDetail.java   |  21 +++++--
 .../DependenciesNotFoundException.java          |  20 ++++--
 .../gemfire/management/DiskBackupResult.java    |  20 ++++--
 .../gemfire/management/DiskBackupStatus.java    |  20 ++++--
 .../gemfire/management/DiskMetrics.java         |  20 ++++--
 .../gemfire/management/DiskStoreMXBean.java     |  20 ++++--
 .../DistributedLockServiceMXBean.java           |  20 ++++--
 .../management/DistributedRegionMXBean.java     |  20 ++++--
 .../management/DistributedSystemMXBean.java     |  20 ++++--
 .../management/EvictionAttributesData.java      |  20 ++++--
 .../FixedPartitionAttributesData.java           |  21 +++++--
 .../management/GatewayReceiverMXBean.java       |  20 ++++--
 .../gemfire/management/GatewaySenderMXBean.java |  20 ++++--
 .../gemfire/management/GemFireProperties.java   |  20 ++++--
 .../gemfire/management/JMXNotificationType.java |  28 +++++----
 .../management/JMXNotificationUserData.java     |  28 +++++----
 .../gemstone/gemfire/management/JVMMetrics.java |  20 ++++--
 .../gemfire/management/LocatorMXBean.java       |  20 ++++--
 .../gemfire/management/LockServiceMXBean.java   |  20 ++++--
 .../gemfire/management/ManagementException.java |  20 ++++--
 .../gemfire/management/ManagementService.java   |  21 +++++--
 .../gemfire/management/ManagerMXBean.java       |  20 ++++--
 .../gemfire/management/MemberMXBean.java        |  20 ++++--
 .../management/MembershipAttributesData.java    |  20 ++++--
 .../gemfire/management/NetworkMetrics.java      |  20 ++++--
 .../gemstone/gemfire/management/OSMetrics.java  |  20 ++++--
 .../management/PartitionAttributesData.java     |  20 ++++--
 .../management/PersistentMemberDetails.java     |  20 ++++--
 .../management/RegionAttributesData.java        |  20 ++++--
 .../gemfire/management/RegionMXBean.java        |  20 ++++--
 .../gemfire/management/ServerLoadData.java      |  20 ++++--
 .../gemfire/management/cli/CliMetaData.java     |  20 ++++--
 .../cli/CommandProcessingException.java         |  20 ++++--
 .../gemfire/management/cli/CommandService.java  |  20 ++++--
 .../management/cli/CommandServiceException.java |  20 ++++--
 .../management/cli/CommandStatement.java        |  20 ++++--
 .../gemfire/management/cli/ConverterHint.java   |  20 ++++--
 .../gemstone/gemfire/management/cli/Result.java |  21 +++++--
 .../management/internal/AlertDetails.java       |  21 +++++--
 .../management/internal/ArrayConverter.java     |  20 ++++--
 .../internal/BaseManagementService.java         |  21 +++++--
 .../internal/CollectionConverter.java           |  22 ++++---
 .../management/internal/CompositeConverter.java |  20 ++++--
 .../management/internal/EnumConverter.java      |  20 ++++--
 .../management/internal/FederatingManager.java  |  21 +++++--
 .../internal/FederationComponent.java           |  20 ++++--
 .../management/internal/FilterChain.java        |  20 ++++--
 .../management/internal/FilterParam.java        |  22 ++++---
 .../management/internal/IdentityConverter.java  |  20 ++++--
 .../management/internal/JettyHelper.java        |  21 +++++--
 .../management/internal/JmxManagerAdvisee.java  |  21 +++++--
 .../management/internal/JmxManagerAdvisor.java  |  21 +++++--
 .../management/internal/JmxManagerLocator.java  |  21 +++++--
 .../internal/JmxManagerLocatorRequest.java      |  20 ++++--
 .../internal/JmxManagerLocatorResponse.java     |  20 ++++--
 .../management/internal/LocalFilterChain.java   |  20 ++++--
 .../management/internal/LocalManager.java       |  20 ++++--
 .../management/internal/MBeanJMXAdapter.java    |  20 ++++--
 .../management/internal/MBeanProxyFactory.java  |  22 ++++---
 .../internal/MBeanProxyInfoRepository.java      |  22 ++++---
 .../internal/MBeanProxyInvocationHandler.java   |  23 ++++---
 .../internal/MXBeanProxyInvocationHandler.java  |  20 ++++--
 .../management/internal/ManagementAgent.java    |  21 +++++--
 .../internal/ManagementCacheListener.java       |  22 ++++---
 .../internal/ManagementConstants.java           |  20 ++++--
 .../management/internal/ManagementFunction.java |  20 ++++--
 .../internal/ManagementMembershipListener.java  |  20 ++++--
 .../internal/ManagementResourceRepo.java        |  20 ++++--
 .../management/internal/ManagementStrings.java  |  20 ++++--
 .../gemfire/management/internal/Manager.java    |  21 +++++--
 .../internal/ManagerStartupMessage.java         |  21 +++++--
 .../management/internal/MemberMessenger.java    |  20 ++++--
 .../internal/MonitoringRegionCacheListener.java |  20 ++++--
 .../internal/NotificationBroadCasterProxy.java  |  20 ++++--
 .../internal/NotificationCacheListener.java     |  20 ++++--
 .../management/internal/NotificationHub.java    |  22 ++++---
 .../internal/NotificationHubClient.java         |  20 ++++--
 .../management/internal/NotificationKey.java    |  20 ++++--
 .../gemfire/management/internal/OpenMethod.java |  22 ++++---
 .../management/internal/OpenTypeConverter.java  |  20 ++++--
 .../management/internal/OpenTypeUtil.java       |  20 ++++--
 .../gemfire/management/internal/ProxyInfo.java  |  20 ++++--
 .../management/internal/ProxyInterface.java     |  20 ++++--
 .../management/internal/ProxyListener.java      |  20 ++++--
 .../management/internal/RemoteFilterChain.java  |  20 ++++--
 .../gemfire/management/internal/RestAgent.java  |  21 +++++--
 .../gemfire/management/internal/SSLUtil.java    |  16 +++++
 .../management/internal/StringBasedFilter.java  |  20 ++++--
 .../internal/SystemManagementService.java       |  21 +++++--
 .../management/internal/TableConverter.java     |  22 ++++---
 .../internal/beans/AggregateHandler.java        |  20 ++++--
 .../internal/beans/AsyncEventQueueMBean.java    |  20 ++++--
 .../beans/AsyncEventQueueMBeanBridge.java       |  20 ++++--
 .../internal/beans/BeanUtilFuncs.java           |  20 ++++--
 .../internal/beans/CacheServerBridge.java       |  20 ++++--
 .../internal/beans/CacheServerMBean.java        |  20 ++++--
 .../internal/beans/DiskRegionBridge.java        |  22 ++++---
 .../internal/beans/DiskStoreMBean.java          |  20 ++++--
 .../internal/beans/DiskStoreMBeanBridge.java    |  20 ++++--
 .../beans/DistributedLockServiceBridge.java     |  20 ++++--
 .../beans/DistributedLockServiceMBean.java      |  20 ++++--
 .../internal/beans/DistributedRegionBridge.java |  20 ++++--
 .../internal/beans/DistributedRegionMBean.java  |  20 ++++--
 .../internal/beans/DistributedSystemBridge.java |  20 ++++--
 .../internal/beans/DistributedSystemMBean.java  |  20 ++++--
 .../internal/beans/GatewayReceiverMBean.java    |  20 ++++--
 .../beans/GatewayReceiverMBeanBridge.java       |  20 ++++--
 .../internal/beans/GatewaySenderMBean.java      |  20 ++++--
 .../beans/GatewaySenderMBeanBridge.java         |  20 ++++--
 .../internal/beans/HDFSRegionBridge.java        |  20 ++++--
 .../management/internal/beans/LocatorMBean.java |  20 ++++--
 .../internal/beans/LocatorMBeanBridge.java      |  20 ++++--
 .../internal/beans/LockServiceMBean.java        |  20 ++++--
 .../internal/beans/LockServiceMBeanBridge.java  |  22 ++++---
 .../internal/beans/MBeanAggregator.java         |  20 ++++--
 .../internal/beans/ManagementAdapter.java       |  22 ++++---
 .../internal/beans/ManagementListener.java      |  20 ++++--
 .../management/internal/beans/ManagerMBean.java |  20 ++++--
 .../internal/beans/ManagerMBeanBridge.java      |  20 ++++--
 .../management/internal/beans/MemberMBean.java  |  20 ++++--
 .../internal/beans/MemberMBeanBridge.java       |  20 ++++--
 .../internal/beans/MetricsCalculator.java       |  20 ++++--
 .../internal/beans/PartitionedRegionBridge.java |  20 ++++--
 .../internal/beans/QueryDataFunction.java       |  20 ++++--
 .../management/internal/beans/RegionMBean.java  |  20 ++++--
 .../internal/beans/RegionMBeanBridge.java       |  20 ++++--
 .../beans/RegionMBeanCompositeDataFactory.java  |  20 ++++--
 .../internal/beans/SequenceNumber.java          |  20 ++++--
 .../management/internal/beans/ServerBridge.java |  21 +++++--
 .../stats/AggregateRegionStatsMonitor.java      |  20 ++++--
 .../internal/beans/stats/GCStatsMonitor.java    |  20 ++++--
 .../GatewayReceiverClusterStatsMonitor.java     |  20 ++++--
 .../stats/GatewaySenderClusterStatsMonitor.java |  20 ++++--
 .../stats/IntegerStatsDeltaAggregator.java      |  23 ++++---
 .../beans/stats/LongStatsDeltaAggregator.java   |  23 ++++---
 .../internal/beans/stats/MBeanStatsMonitor.java |  20 ++++--
 .../beans/stats/MemberClusterStatsMonitor.java  |  20 ++++--
 .../beans/stats/MemberLevelDiskMonitor.java     |  20 ++++--
 .../beans/stats/RegionClusterStatsMonitor.java  |  20 ++++--
 .../beans/stats/ServerClusterStatsMonitor.java  |  20 ++++--
 .../internal/beans/stats/StatType.java          |  20 ++++--
 .../internal/beans/stats/StatsAggregator.java   |  20 ++++--
 .../beans/stats/StatsAverageLatency.java        |  20 ++++--
 .../internal/beans/stats/StatsKey.java          |  20 ++++--
 .../internal/beans/stats/StatsLatency.java      |  20 ++++--
 .../internal/beans/stats/StatsRate.java         |  20 ++++--
 .../internal/beans/stats/VMStatsMonitor.java    |  20 ++++--
 .../cli/AbstractCliAroundInterceptor.java       |  20 ++++--
 .../internal/cli/CliAroundInterceptor.java      |  20 ++++--
 .../management/internal/cli/CliUtil.java        |  20 ++++--
 .../management/internal/cli/CommandManager.java |  20 ++++--
 .../management/internal/cli/CommandRequest.java |  21 +++++--
 .../internal/cli/CommandResponse.java           |  22 ++++---
 .../internal/cli/CommandResponseBuilder.java    |  20 ++++--
 .../internal/cli/CommandResponseWriter.java     |  20 ++++--
 .../internal/cli/GfshParseResult.java           |  20 ++++--
 .../management/internal/cli/GfshParser.java     |  20 ++++--
 .../management/internal/cli/Launcher.java       |  20 ++++--
 .../management/internal/cli/LogWrapper.java     |  20 ++++--
 .../internal/cli/MultipleValueAdapter.java      |  21 +++++--
 .../internal/cli/MultipleValueConverter.java    |  21 +++++--
 .../internal/cli/annotation/CliArgument.java    |  20 ++++--
 .../cli/commands/AbstractCommandsSupport.java   |  20 ++++--
 .../internal/cli/commands/ClientCommands.java   |  20 ++++--
 .../internal/cli/commands/ConfigCommands.java   |  20 ++++--
 .../CreateAlterDestroyRegionCommands.java       |  20 ++++--
 .../internal/cli/commands/DataCommands.java     |  20 ++++--
 .../internal/cli/commands/DeployCommands.java   |  20 ++++--
 .../cli/commands/DiskStoreCommands.java         |  41 +++++++++---
 .../cli/commands/DurableClientCommands.java     |  20 ++++--
 ...ExportImportSharedConfigurationCommands.java |  21 +++++--
 .../internal/cli/commands/FunctionCommands.java |  20 ++++--
 .../internal/cli/commands/GfshHelpCommands.java |  20 ++++--
 .../internal/cli/commands/IndexCommands.java    |  20 ++++--
 .../cli/commands/LauncherLifecycleCommands.java |  20 ++++--
 .../internal/cli/commands/MemberCommands.java   |  20 ++++--
 .../cli/commands/MiscellaneousCommands.java     |  20 ++++--
 .../internal/cli/commands/PDXCommands.java      |  16 +++++
 .../internal/cli/commands/QueueCommands.java    |  20 ++++--
 .../internal/cli/commands/RegionCommands.java   |  20 ++++--
 .../internal/cli/commands/ShellCommands.java    |  20 ++++--
 .../internal/cli/commands/StatusCommands.java   |  21 +++++--
 .../internal/cli/commands/WanCommands.java      |  21 +++++--
 .../cli/commands/dto/RegionAttributesInfo.java  |  21 +++++--
 .../cli/commands/dto/RegionDetails.java         |  21 +++++--
 .../cli/commands/dto/RegionMemberDetails.java   |  21 +++++--
 .../cli/converters/BooleanConverter.java        |  22 ++++---
 .../ClusterMemberIdNameConverter.java           |  20 ++++--
 .../converters/ConnectionEndpointConverter.java |  21 +++++--
 .../internal/cli/converters/DirConverter.java   |  20 ++++--
 .../cli/converters/DirPathConverter.java        |  20 ++++--
 .../cli/converters/DiskStoreNameConverter.java  |  20 ++++--
 .../internal/cli/converters/EnumConverter.java  |  20 ++++--
 .../cli/converters/FilePathConverter.java       |  20 ++++--
 .../cli/converters/FilePathStringConverter.java |  20 ++++--
 .../converters/GatewayReceiverIdsConverter.java |  21 +++++--
 .../converters/GatewaySenderIdConverter.java    |  20 ++++--
 .../internal/cli/converters/HelpConverter.java  |  20 ++++--
 .../cli/converters/HintTopicConverter.java      |  20 ++++--
 .../cli/converters/IndexTypeConverter.java      |  21 +++++--
 .../LocatorDiscoveryConfigConverter.java        |  20 ++++--
 .../cli/converters/LocatorIdNameConverter.java  |  20 ++++--
 .../cli/converters/LogLevelConverter.java       |  20 ++++--
 .../cli/converters/MemberGroupConverter.java    |  20 ++++--
 .../cli/converters/MemberIdNameConverter.java   |  20 ++++--
 .../cli/converters/RegionPathConverter.java     |  20 ++++--
 .../cli/converters/StringArrayConverter.java    |  20 ++++--
 .../cli/converters/StringListConverter.java     |  20 ++++--
 .../cli/domain/AsyncEventQueueDetails.java      |  21 +++++--
 .../internal/cli/domain/CacheServerInfo.java    |  21 +++++--
 .../cli/domain/ConnectToLocatorResult.java      |  20 ++++--
 .../internal/cli/domain/DataCommandRequest.java |  21 +++++--
 .../internal/cli/domain/DataCommandResult.java  |  21 +++++--
 .../internal/cli/domain/DiskStoreDetails.java   |  18 ++++--
 .../cli/domain/DurableCqNamesResult.java        |  21 +++++--
 .../cli/domain/EvictionAttributesInfo.java      |  21 +++++--
 .../domain/FixedPartitionAttributesInfo.java    |  21 +++++--
 .../internal/cli/domain/IndexDetails.java       |  20 ++++--
 .../internal/cli/domain/IndexInfo.java          |  21 +++++--
 .../cli/domain/MemberConfigurationInfo.java     |  21 +++++--
 .../internal/cli/domain/MemberInformation.java  |  20 ++++--
 .../internal/cli/domain/MemberResult.java       |  21 +++++--
 .../cli/domain/PartitionAttributesInfo.java     |  21 +++++--
 .../cli/domain/RegionAttributesInfo.java        |  21 +++++--
 .../internal/cli/domain/RegionDescription.java  |  21 +++++--
 .../cli/domain/RegionDescriptionPerMember.java  |  21 +++++--
 .../internal/cli/domain/RegionInformation.java  |  20 ++++--
 .../cli/domain/StackTracesPerMember.java        |  21 +++++--
 .../cli/domain/SubscriptionQueueSizeResult.java |  21 +++++--
 .../cli/exceptions/CliCommandException.java     |  20 ++++--
 .../exceptions/CliCommandInvalidException.java  |  20 ++++--
 .../CliCommandMultiModeOptionException.java     |  16 +++++
 .../CliCommandNotAvailableException.java        |  20 ++++--
 .../exceptions/CliCommandOptionException.java   |  20 ++++--
 ...CommandOptionHasMultipleValuesException.java |  20 ++++--
 .../CliCommandOptionInvalidException.java       |  20 ++++--
 .../CliCommandOptionMissingException.java       |  20 ++++--
 .../CliCommandOptionNotApplicableException.java |  20 ++++--
 ...liCommandOptionValueConversionException.java |  20 ++++--
 .../CliCommandOptionValueException.java         |  20 ++++--
 .../CliCommandOptionValueMissingException.java  |  20 ++++--
 .../internal/cli/exceptions/CliException.java   |  20 ++++--
 .../exceptions/CreateSubregionException.java    |  20 ++++--
 .../cli/exceptions/ExceptionGenerator.java      |  20 ++++--
 .../cli/exceptions/ExceptionHandler.java        |  20 ++++--
 .../cli/exceptions/IndexNotFoundException.java  |  16 +++++
 .../functions/AlterRuntimeConfigFunction.java   |  21 +++++--
 .../cli/functions/ChangeLogLevelFunction.java   |  20 ++++--
 .../cli/functions/CliFunctionResult.java        |  21 +++++--
 .../functions/CloseDurableClientFunction.java   |  21 +++++--
 .../cli/functions/CloseDurableCqFunction.java   |  21 +++++--
 .../cli/functions/ContunuousQueryFunction.java  |  20 ++++--
 .../CreateAsyncEventQueueFunction.java          |  21 +++++--
 .../functions/CreateDefinedIndexesFunction.java |  16 +++++
 .../cli/functions/CreateDiskStoreFunction.java  |  21 +++++--
 .../cli/functions/CreateIndexFunction.java      |  21 +++++--
 .../cli/functions/DataCommandFunction.java      |  21 +++++--
 .../internal/cli/functions/DeployFunction.java  |  21 +++++--
 .../functions/DescribeDiskStoreFunction.java    |  18 ++++--
 .../functions/DescribeHDFSStoreFunction.java    |  16 +++++
 .../cli/functions/DestroyDiskStoreFunction.java |  21 +++++--
 .../cli/functions/DestroyIndexFunction.java     |  21 +++++--
 .../cli/functions/ExportConfigFunction.java     |  23 ++++---
 .../cli/functions/ExportDataFunction.java       |  21 +++++--
 .../ExportSharedConfigurationFunction.java      |  21 +++++--
 .../FetchRegionAttributesFunction.java          |  20 ++++--
 .../FetchSharedConfigurationStatusFunction.java |  21 +++++--
 .../functions/GarbageCollectionFunction.java    |  21 +++++--
 .../GatewayReceiverCreateFunction.java          |  21 +++++--
 .../functions/GatewayReceiverFunctionArgs.java  |  21 +++++--
 .../functions/GatewaySenderCreateFunction.java  |  21 +++++--
 .../functions/GatewaySenderFunctionArgs.java    |  21 +++++--
 .../GetMemberConfigInformationFunction.java     |  21 +++++--
 .../functions/GetMemberInformationFunction.java |  21 +++++--
 .../functions/GetRegionDescriptionFunction.java |  20 ++++--
 .../cli/functions/GetRegionsFunction.java       |  20 ++++--
 .../cli/functions/GetStackTracesFunction.java   |  21 +++++--
 .../GetSubscriptionQueueSizeFunction.java       |  21 +++++--
 .../cli/functions/ImportDataFunction.java       |  21 +++++--
 ...ortSharedConfigurationArtifactsFunction.java |  21 +++++--
 .../functions/ListAsyncEventQueuesFunction.java |  18 ++++--
 .../cli/functions/ListDeployedFunction.java     |  21 +++++--
 .../cli/functions/ListDiskStoresFunction.java   |  18 ++++--
 .../functions/ListDurableCqNamesFunction.java   |  20 ++++--
 .../cli/functions/ListFunctionFunction.java     |  21 +++++--
 .../cli/functions/ListIndexFunction.java        |  20 ++++--
 .../LoadSharedConfigurationFunction.java        |  21 +++++--
 .../internal/cli/functions/LogFileFunction.java |  23 ++++---
 .../cli/functions/MemberRegionFunction.java     |  23 ++++---
 .../cli/functions/MembersForRegionFunction.java |  20 ++++--
 .../internal/cli/functions/NetstatFunction.java |  20 ++++--
 .../cli/functions/RebalanceFunction.java        |  23 ++++---
 .../cli/functions/RegionAlterFunction.java      |  20 ++++--
 .../cli/functions/RegionCreateFunction.java     |  20 ++++--
 .../cli/functions/RegionDestroyFunction.java    |  20 ++++--
 .../cli/functions/RegionFunctionArgs.java       |  20 ++++--
 .../cli/functions/ShutDownFunction.java         |  23 ++++---
 .../cli/functions/UndeployFunction.java         |  21 +++++--
 .../cli/functions/UnregisterFunction.java       |  23 ++++---
 .../cli/functions/UserFunctionExecution.java    |  24 ++++---
 .../management/internal/cli/help/CliTopic.java  |  20 ++++--
 .../internal/cli/help/format/Block.java         |  20 ++++--
 .../internal/cli/help/format/DataNode.java      |  20 ++++--
 .../internal/cli/help/format/Help.java          |  20 ++++--
 .../internal/cli/help/format/NewHelp.java       |  20 ++++--
 .../internal/cli/help/format/Row.java           |  20 ++++--
 .../internal/cli/help/utils/FormatOutput.java   |  16 +++++
 .../internal/cli/help/utils/HelpUtils.java      |  20 ++++--
 .../internal/cli/i18n/CliStrings.java           |  22 +++++--
 .../internal/cli/json/GfJsonArray.java          |  20 ++++--
 .../internal/cli/json/GfJsonException.java      |  22 ++++---
 .../internal/cli/json/GfJsonObject.java         |  20 ++++--
 .../management/internal/cli/json/TypedJson.java |  20 ++++--
 .../internal/cli/modes/CommandModes.java        |  18 +++++-
 .../cli/multistep/CLIMultiStepHelper.java       |  21 +++++--
 .../internal/cli/multistep/CLIRemoteStep.java   |  16 +++++
 .../internal/cli/multistep/CLIStep.java         |  16 +++++
 .../cli/multistep/CLIStepExecption.java         |  21 +++++--
 .../cli/multistep/MultiStepCommand.java         |  16 +++++
 .../internal/cli/parser/Argument.java           |  20 ++++--
 .../internal/cli/parser/AvailabilityTarget.java |  20 ++++--
 .../internal/cli/parser/CommandTarget.java      |  22 ++++---
 .../internal/cli/parser/GfshMethodTarget.java   |  22 ++++---
 .../internal/cli/parser/GfshOptionParser.java   |  22 ++++---
 .../internal/cli/parser/MethodParameter.java    |  20 ++++--
 .../management/internal/cli/parser/Option.java  |  20 ++++--
 .../internal/cli/parser/OptionSet.java          |  20 ++++--
 .../internal/cli/parser/Parameter.java          |  20 ++++--
 .../internal/cli/parser/ParserUtils.java        |  20 ++++--
 .../internal/cli/parser/SyntaxConstants.java    |  22 ++++---
 .../cli/parser/jopt/JoptOptionParser.java       |  20 ++++--
 .../preprocessor/EnclosingCharacters.java       |  20 ++++--
 .../cli/parser/preprocessor/Preprocessor.java   |  20 ++++--
 .../parser/preprocessor/PreprocessorUtils.java  |  20 ++++--
 .../internal/cli/parser/preprocessor/Stack.java |  20 ++++--
 .../cli/parser/preprocessor/TrimmedInput.java   |  20 ++++--
 .../cli/remote/CommandExecutionContext.java     |  20 ++++--
 .../internal/cli/remote/CommandProcessor.java   |  20 ++++--
 .../cli/remote/CommandStatementImpl.java        |  20 ++++--
 .../cli/remote/MemberCommandService.java        |  20 ++++--
 .../cli/remote/RemoteExecutionStrategy.java     |  20 ++++--
 .../internal/cli/remote/WrapperThreadLocal.java |  20 ++++--
 .../internal/cli/result/AbstractResultData.java |  20 ++++--
 .../cli/result/CliJsonSerializable.java         |  20 ++++--
 .../cli/result/CliJsonSerializableFactory.java  |  21 +++++--
 .../cli/result/CliJsonSerializableIds.java      |  20 ++++--
 .../internal/cli/result/CommandResult.java      |  20 ++++--
 .../cli/result/CommandResultException.java      |  21 +++++--
 .../cli/result/CompositeResultData.java         |  20 ++++--
 .../internal/cli/result/ErrorResultData.java    |  20 ++++--
 .../internal/cli/result/FileResult.java         |  20 ++++--
 .../internal/cli/result/InfoResultData.java     |  20 ++++--
 .../internal/cli/result/ObjectResultData.java   |  20 ++++--
 .../internal/cli/result/ResultBuilder.java      |  20 ++++--
 .../internal/cli/result/ResultData.java         |  20 ++++--
 .../cli/result/ResultDataException.java         |  20 ++++--
 .../internal/cli/result/TableBuilder.java       |  20 ++++--
 .../internal/cli/result/TableBuilderHelper.java |  21 +++++--
 .../internal/cli/result/TabularResultData.java  |  20 ++++--
 .../management/internal/cli/shell/Gfsh.java     |  20 ++++--
 .../internal/cli/shell/GfshConfig.java          |  20 ++++--
 .../cli/shell/GfshExecutionStrategy.java        |  20 ++++--
 .../cli/shell/JMXConnectionException.java       |  20 ++++--
 .../cli/shell/JMXInvocationException.java       |  20 ++++--
 .../internal/cli/shell/JmxOperationInvoker.java |  20 ++++--
 .../internal/cli/shell/MultiCommandHelper.java  |  16 +++++
 .../internal/cli/shell/OperationInvoker.java    |  20 ++++--
 .../internal/cli/shell/jline/ANSIHandler.java   |  20 ++++--
 .../cli/shell/jline/CygwinMinttyTerminal.java   |  21 +++++--
 .../internal/cli/shell/jline/GfshHistory.java   |  20 ++++--
 .../shell/jline/GfshUnsupportedTerminal.java    |  20 ++++--
 .../cli/shell/unsafe/GfshSignalHandler.java     |  21 +++++--
 .../internal/cli/util/CLIConsoleBufferUtil.java |  21 +++++--
 .../internal/cli/util/CauseFinder.java          |  20 ++++--
 .../cli/util/ClasspathScanLoadHelper.java       |  20 ++++--
 .../internal/cli/util/CommandStringBuilder.java |  20 ++++--
 .../internal/cli/util/CommentSkipHelper.java    |  20 ++++--
 .../internal/cli/util/ConnectionEndpoint.java   |  21 +++++--
 .../internal/cli/util/DiskStoreCompacter.java   |  20 ++++--
 .../cli/util/DiskStoreNotFoundException.java    |  18 ++++--
 .../internal/cli/util/DiskStoreUpgrader.java    |  21 +++++--
 .../internal/cli/util/DiskStoreValidater.java   |  21 ++++---
 .../cli/util/EvictionAttributesInfo.java        |  21 +++++--
 .../cli/util/FixedPartitionAttributesInfo.java  |  21 +++++--
 .../internal/cli/util/GfshConsoleReader.java    |  22 ++++---
 .../cli/util/HDFSStoreNotFoundException.java    |  18 ++++--
 .../cli/util/JConsoleNotFoundException.java     |  20 ++++--
 .../management/internal/cli/util/JsonUtil.java  |  20 ++++--
 .../internal/cli/util/MemberInformation.java    |  20 ++++--
 .../cli/util/MemberNotFoundException.java       |  18 ++++--
 .../management/internal/cli/util/MergeLogs.java |  21 +++++--
 .../internal/cli/util/ReadWriteFile.java        |  21 +++++--
 .../cli/util/RegionAttributesDefault.java       |  21 +++++--
 .../cli/util/RegionAttributesNames.java         |  21 +++++--
 .../internal/cli/util/RegionPath.java           |  20 ++++--
 .../cli/util/VisualVmNotFoundException.java     |  20 ++++--
 .../internal/cli/util/spring/Assert.java        |  20 ++++--
 .../internal/cli/util/spring/ObjectUtils.java   |  21 +++++--
 .../cli/util/spring/ReflectionUtils.java        |  21 +++++--
 .../internal/cli/util/spring/StringUtils.java   |  21 +++++--
 .../SharedConfigurationWriter.java              |  21 +++++--
 .../callbacks/ConfigurationChangeListener.java  |  21 +++++--
 .../configuration/domain/CacheElement.java      |  21 +++++--
 .../configuration/domain/Configuration.java     |  21 +++++--
 .../domain/ConfigurationChangeResult.java       |  21 +++++--
 .../domain/SharedConfigurationStatus.java       |  16 +++++
 .../configuration/domain/XmlEntity.java         |  21 +++++--
 .../configuration/functions/AddJarFunction.java |  21 +++++--
 .../functions/AddXmlEntityFunction.java         |  21 +++++--
 .../functions/DeleteJarFunction.java            |  21 +++++--
 .../functions/DeleteXmlEntityFunction.java      |  21 +++++--
 .../functions/GetAllJarsFunction.java           |  21 +++++--
 .../functions/ModifyPropertiesFunction.java     |  21 +++++--
 .../handlers/ConfigurationRequestHandler.java   |  21 +++++--
 ...SharedConfigurationStatusRequestHandler.java |  21 +++++--
 .../messages/ConfigurationRequest.java          |  21 +++++--
 .../messages/ConfigurationResponse.java         |  21 +++++--
 .../SharedConfigurationStatusRequest.java       |  21 +++++--
 .../SharedConfigurationStatusResponse.java      |  21 +++++--
 .../configuration/utils/DtdResolver.java        |  16 +++++
 .../configuration/utils/XmlConstants.java       |  21 +++++--
 .../internal/configuration/utils/XmlUtils.java  |  21 +++++--
 .../internal/configuration/utils/ZipUtils.java  |  21 +++++--
 .../internal/messages/CompactRequest.java       |  20 ++++--
 .../internal/messages/CompactResponse.java      |  20 ++++--
 .../internal/security/AccessControl.java        |  16 +++++
 .../internal/security/AccessControlContext.java |  16 +++++
 .../internal/security/AccessControlMXBean.java  |  16 +++++
 .../internal/security/CLIOperationContext.java  |  16 +++++
 .../internal/security/JMXOperationContext.java  |  16 +++++
 .../internal/security/JSONAuthorization.java    |  16 +++++
 .../internal/security/MBeanServerWrapper.java   |  16 +++++
 .../security/ManagementInterceptor.java         |  16 +++++
 .../management/internal/security/Resource.java  |  16 +++++
 .../internal/security/ResourceConstants.java    |  16 +++++
 .../internal/security/ResourceOperation.java    |  16 +++++
 .../security/ResourceOperationContext.java      |  16 +++++
 .../unsafe/ReadOpFileAccessController.java      |  21 +++++--
 .../controllers/AbstractCommandsController.java |  21 +++++--
 .../AbstractMultiPartCommandsController.java    |  21 +++++--
 .../controllers/ClientCommandsController.java   |  21 +++++--
 .../controllers/ClusterCommandsController.java  |  21 +++++--
 .../controllers/ConfigCommandsController.java   |  21 +++++--
 .../web/controllers/DataCommandsController.java |  21 +++++--
 .../controllers/DeployCommandsController.java   |  21 +++++--
 .../DiskStoreCommandsController.java            |  21 +++++--
 .../DurableClientCommandsController.java        |  21 +++++--
 .../controllers/FunctionCommandsController.java |  21 +++++--
 .../controllers/IndexCommandsController.java    |  21 +++++--
 .../LauncherLifecycleCommandsController.java    |  21 +++++--
 .../controllers/MemberCommandsController.java   |  21 +++++--
 .../MiscellaneousCommandsController.java        |  21 +++++--
 .../web/controllers/PdxCommandsController.java  |  16 +++++
 .../controllers/QueueCommandsController.java    |  21 +++++--
 .../controllers/RegionCommandsController.java   |  21 +++++--
 .../controllers/ShellCommandsController.java    |  21 +++++--
 .../web/controllers/WanCommandsController.java  |  21 +++++--
 .../EnvironmentVariablesHandlerInterceptor.java |  21 +++++--
 .../support/MemberMXBeanAdapter.java            |  21 +++++--
 .../management/internal/web/domain/Link.java    |  21 +++++--
 .../internal/web/domain/LinkIndex.java          |  21 +++++--
 .../web/domain/QueryParameterSource.java        |  21 +++++--
 .../internal/web/http/ClientHttpRequest.java    |  21 +++++--
 .../internal/web/http/HttpHeader.java           |  21 +++++--
 .../internal/web/http/HttpMethod.java           |  21 +++++--
 .../SerializableObjectHttpMessageConverter.java |  21 +++++--
 .../web/http/support/SimpleHttpRequester.java   |  21 +++++--
 .../internal/web/io/MultipartFileAdapter.java   |  21 +++++--
 .../web/io/MultipartFileResourceAdapter.java    |  21 +++++--
 .../web/shell/AbstractHttpOperationInvoker.java |  21 +++++--
 .../web/shell/HttpOperationInvoker.java         |  16 +++++
 .../web/shell/MBeanAccessException.java         |  21 +++++--
 .../RestApiCallForCommandNotFoundException.java |  21 +++++--
 .../web/shell/RestHttpOperationInvoker.java     |  21 +++++--
 .../web/shell/SimpleHttpOperationInvoker.java   |  21 +++++--
 .../shell/support/HttpInvocationHandler.java    |  21 +++++--
 .../shell/support/HttpMBeanProxyFactory.java    |  21 +++++--
 .../internal/web/util/ConvertUtils.java         |  21 +++++--
 .../management/internal/web/util/UriUtils.java  |  21 +++++--
 .../management/membership/ClientMembership.java |  21 +++++--
 .../membership/ClientMembershipEvent.java       |  21 +++++--
 .../membership/ClientMembershipListener.java    |  21 +++++--
 .../ClientMembershipListenerAdapter.java        |  21 +++++--
 .../management/membership/MembershipEvent.java  |  21 +++++--
 .../membership/MembershipListener.java          |  21 +++++--
 .../UniversalMembershipListenerAdapter.java     |  21 +++++--
 .../memcached/GemFireMemcachedServer.java       |  21 +++++--
 .../com/gemstone/gemfire/pdx/FieldType.java     |  21 +++++--
 .../com/gemstone/gemfire/pdx/JSONFormatter.java |  16 +++++
 .../gemfire/pdx/JSONFormatterException.java     |  23 ++++---
 .../gemfire/pdx/NonPortableClassException.java  |  16 +++++
 .../gemfire/pdx/PdxConfigurationException.java  |  21 +++++--
 .../pdx/PdxFieldAlreadyExistsException.java     |  21 +++++--
 .../pdx/PdxFieldDoesNotExistException.java      |  21 +++++--
 .../pdx/PdxFieldTypeMismatchException.java      |  21 +++++--
 .../gemfire/pdx/PdxInitializationException.java |  21 +++++--
 .../com/gemstone/gemfire/pdx/PdxInstance.java   |  21 +++++--
 .../gemfire/pdx/PdxInstanceFactory.java         |  21 +++++--
 .../com/gemstone/gemfire/pdx/PdxReader.java     |  21 +++++--
 .../pdx/PdxRegistryMismatchException.java       |  24 ++++---
 .../gemstone/gemfire/pdx/PdxSerializable.java   |  21 +++++--
 .../gemfire/pdx/PdxSerializationException.java  |  21 +++++--
 .../com/gemstone/gemfire/pdx/PdxSerializer.java |  21 +++++--
 .../gemstone/gemfire/pdx/PdxUnreadFields.java   |  21 +++++--
 .../com/gemstone/gemfire/pdx/PdxWriter.java     |  21 +++++--
 .../pdx/ReflectionBasedAutoSerializer.java      |  21 +++++--
 .../gemfire/pdx/WritablePdxInstance.java        |  21 +++++--
 .../pdx/internal/AutoSerializableManager.java   |  21 +++++--
 .../pdx/internal/CheckTypeRegistryState.java    |  21 +++++--
 .../pdx/internal/ClientTypeRegistration.java    |  21 +++++--
 .../gemfire/pdx/internal/ComparableEnum.java    |  16 +++++
 .../pdx/internal/ConvertableToBytes.java        |  16 +++++
 .../gemstone/gemfire/pdx/internal/DataSize.java |  21 +++++--
 .../gemfire/pdx/internal/DefaultPdxField.java   |  21 +++++--
 .../gemstone/gemfire/pdx/internal/EnumId.java   |  21 +++++--
 .../gemstone/gemfire/pdx/internal/EnumInfo.java |  21 +++++--
 .../pdx/internal/FieldNotFoundInPdxVersion.java |  16 +++++
 .../gemfire/pdx/internal/InternalPdxReader.java |  21 +++++--
 .../pdx/internal/LonerTypeRegistration.java     |  21 +++++--
 .../pdx/internal/NullTypeRegistration.java      |  21 +++++--
 .../gemstone/gemfire/pdx/internal/PdxField.java |  21 +++++--
 .../gemfire/pdx/internal/PdxInputStream.java    |  21 +++++--
 .../gemfire/pdx/internal/PdxInstanceEnum.java   |  21 +++++--
 .../pdx/internal/PdxInstanceFactoryImpl.java    |  21 +++++--
 .../gemfire/pdx/internal/PdxInstanceImpl.java   |  21 +++++--
 .../pdx/internal/PdxInstanceInputStream.java    |  21 +++++--
 .../gemfire/pdx/internal/PdxOutputStream.java   |  21 +++++--
 .../gemfire/pdx/internal/PdxReaderImpl.java     |  21 +++++--
 .../gemfire/pdx/internal/PdxString.java         |  23 ++++---
 .../gemstone/gemfire/pdx/internal/PdxType.java  |  21 +++++--
 .../gemfire/pdx/internal/PdxUnreadData.java     |  21 +++++--
 .../gemfire/pdx/internal/PdxWriterImpl.java     |  21 +++++--
 .../pdx/internal/PeerTypeRegistration.java      |  21 +++++--
 .../pdx/internal/TrackingPdxReaderImpl.java     |  21 +++++--
 .../gemfire/pdx/internal/TypeRegistration.java  |  21 +++++--
 .../gemfire/pdx/internal/TypeRegistry.java      |  21 +++++--
 .../gemfire/pdx/internal/UnreadPdxType.java     |  21 +++++--
 .../internal/WeakConcurrentIdentityHashMap.java |  21 +++++--
 .../pdx/internal/WritablePdxInstanceImpl.java   |  21 +++++--
 .../gemfire/pdx/internal/json/JsonHelper.java   |  21 +++++--
 .../pdx/internal/json/PdxInstanceHelper.java    |  23 ++++---
 .../pdx/internal/json/PdxListHelper.java        |  23 ++++---
 .../gemfire/pdx/internal/json/PdxToJSON.java    |  23 ++++---
 .../pdx/internal/unsafe/UnsafeWrapper.java      |  21 +++++--
 .../com/gemstone/gemfire/ra/GFConnection.java   |  16 +++++
 .../gemfire/ra/GFConnectionFactory.java         |  16 +++++
 .../gemfire/redis/GemFireRedisServer.java       |  18 +++++-
 .../gemfire/security/AccessControl.java         |  21 +++++--
 .../gemfire/security/AuthInitialize.java        |  21 +++++--
 .../security/AuthenticationFailedException.java |  21 +++++--
 .../AuthenticationRequiredException.java        |  21 +++++--
 .../gemfire/security/Authenticator.java         |  21 +++++--
 .../security/GemFireSecurityException.java      |  21 +++++--
 .../security/NotAuthorizedException.java        |  21 +++++--
 .../batterytest/greplogs/ExpectedStrings.java   |  21 +++++--
 .../java/batterytest/greplogs/LogConsumer.java  |  21 +++++--
 .../src/test/java/cacheRunner/Portfolio.java    |  16 +++++
 .../src/test/java/cacheRunner/Position.java     |  16 +++++
 .../src/test/java/com/company/app/Customer.java |  21 +++++--
 .../src/test/java/com/company/app/DBLoader.java |  21 +++++--
 .../com/company/app/OrdersCacheListener.java    |  21 +++++--
 .../java/com/company/data/DatabaseLoader.java   |  21 +++++--
 .../java/com/company/data/MyDeclarable.java     |  16 +++++
 .../src/test/java/com/company/data/MySizer.java |  21 +++++--
 .../com/company/data/MyTransactionListener.java |  21 +++++--
 .../src/test/java/com/examples/LinkNode.java    |  21 +++++--
 .../src/test/java/com/examples/SuperClass.java  |  21 +++++--
 .../src/test/java/com/examples/TestObject.java  |  21 +++++--
 .../src/test/java/com/examples/ds/Address.java  |  16 +++++
 .../src/test/java/com/examples/ds/Company.java  |  21 +++++--
 .../java/com/examples/ds/CompanySerializer.java |  21 +++++--
 .../src/test/java/com/examples/ds/Employee.java |  21 +++++--
 .../com/examples/ds/PutDataSerializables.java   |  21 +++++--
 .../src/test/java/com/examples/ds/User.java     |  21 +++++--
 .../com/examples/snapshot/MyDataSerializer.java |  21 +++++--
 .../java/com/examples/snapshot/MyObject.java    |  23 ++++---
 .../snapshot/MyObjectDataSerializable.java      |  23 ++++---
 .../java/com/examples/snapshot/MyObjectPdx.java |  16 +++++
 .../snapshot/MyObjectPdxSerializable.java       |  21 +++++--
 .../com/examples/snapshot/MyPdxSerializer.java  |  21 +++++--
 .../java/com/gemstone/gemfire/AppObject.java    |  18 +++++-
 .../test/java/com/gemstone/gemfire/BadTest.java |  21 +++++--
 .../com/gemstone/gemfire/CopyJUnitTest.java     |  21 +++++--
 .../com/gemstone/gemfire/DeltaTestImpl.java     |  21 +++++--
 .../gemfire/DiskInstantiatorsJUnitTest.java     |  21 +++++--
 .../com/gemstone/gemfire/GemFireTestCase.java   |  21 +++++--
 .../java/com/gemstone/gemfire/Invariant.java    |  22 ++++---
 .../com/gemstone/gemfire/InvariantResult.java   |  22 ++++---
 .../com/gemstone/gemfire/JUnitTestSetup.java    |  21 +++++--
 .../gemfire/JtaNoninvolvementJUnitTest.java     |  21 +++++--
 .../gemfire/LocalStatisticsJUnitTest.java       |  21 +++++--
 .../com/gemstone/gemfire/LonerDMJUnitTest.java  |  21 +++++--
 .../gemstone/gemfire/StatisticsTestCase.java    |  21 +++++--
 .../gemfire/StatisticsTypeJUnitTest.java        |  21 +++++--
 .../com/gemstone/gemfire/TXExpiryJUnitTest.java |  21 +++++--
 .../java/com/gemstone/gemfire/TXJUnitTest.java  |  21 +++++--
 .../com/gemstone/gemfire/TXWriterJUnitTest.java |  21 +++++--
 .../gemstone/gemfire/TXWriterOOMEJUnitTest.java |  21 +++++--
 .../com/gemstone/gemfire/TXWriterTestCase.java  |  16 +++++
 .../gemstone/gemfire/TestDataSerializer.java    |  21 +++++--
 .../com/gemstone/gemfire/TimingTestCase.java    |  22 ++++---
 .../com/gemstone/gemfire/UnitTestDoclet.java    |  21 +++++--
 .../gemstone/gemfire/admin/AdminTestHelper.java |  16 +++++
 .../BindDistributedSystemJUnitTest.java         |  21 +++++--
 .../internal/CacheHealthEvaluatorJUnitTest.java |  21 +++++--
 .../internal/DistributedSystemTestCase.java     |  21 +++++--
 .../admin/internal/HealthEvaluatorTestCase.java |  21 +++++--
 .../MemberHealthEvaluatorJUnitTest.java         |  21 +++++--
 .../cache/AttributesFactoryJUnitTest.java       |  21 +++++--
 .../gemfire/cache/Bug36619JUnitTest.java        |  21 +++++--
 .../gemfire/cache/Bug42039JUnitTest.java        |  21 +++++--
 .../gemfire/cache/Bug52289JUnitTest.java        |  24 ++++---
 .../gemfire/cache/CacheListenerJUnitTest.java   |  21 +++++--
 .../cache/CacheRegionClearStatsDUnitTest.java   |  21 +++++--
 .../gemstone/gemfire/cache/ClientHelper.java    |  21 +++++--
 .../cache/ClientServerTimeSyncDUnitTest.java    |  16 +++++
 .../cache/ConnectionPoolAndLoaderDUnitTest.java |  21 +++++--
 .../cache/ConnectionPoolFactoryJUnitTest.java   |  21 +++++--
 .../gemfire/cache/OperationJUnitTest.java       |  21 +++++--
 .../gemfire/cache/PoolManagerJUnitTest.java     |  21 +++++--
 .../gemstone/gemfire/cache/ProxyJUnitTest.java  |  21 +++++--
 .../gemfire/cache/RegionFactoryJUnitTest.java   |  21 +++++--
 .../gemfire/cache/RoleExceptionJUnitTest.java   |  21 +++++--
 .../client/ClientCacheFactoryJUnitTest.java     |  21 +++++--
 .../client/ClientRegionFactoryJUnitTest.java    |  21 +++++--
 .../ClientServerRegisterInterestsDUnitTest.java |  16 +++++
 .../internal/AutoConnectionSourceDUnitTest.java |  21 +++++--
 .../AutoConnectionSourceImplJUnitTest.java      |  21 +++++--
 .../AutoConnectionSourceWithUDPDUnitTest.java   |  21 +++++--
 .../CacheServerSSLConnectionDUnitTest.java      |  21 +++++--
 .../internal/ConnectionPoolImplJUnitTest.java   |  21 +++++--
 .../internal/LocatorLoadBalancingDUnitTest.java |  21 +++++--
 .../cache/client/internal/LocatorTestBase.java  |  21 +++++--
 .../internal/OpExecutorImplJUnitTest.java       |  21 +++++--
 .../client/internal/QueueManagerJUnitTest.java  |  21 +++++--
 .../internal/SSLNoClientAuthDUnitTest.java      |  21 +++++--
 .../internal/ServerBlackListJUnitTest.java      |  21 +++++--
 .../locator/LocatorStatusResponseJUnitTest.java |  20 ++++--
 .../pooling/ConnectionManagerJUnitTest.java     |  21 +++++--
 .../SignalledFlushObserverJUnitTest.java        |  16 +++++
 .../SortedListForAsyncQueueJUnitTest.java       |  24 ++++---
 .../management/MXMemoryPoolListenerExample.java |  21 +++++--
 .../management/MemoryThresholdsDUnitTest.java   |  21 +++++--
 .../MemoryThresholdsOffHeapDUnitTest.java       |  21 +++++--
 .../management/ResourceManagerDUnitTest.java    |  21 +++++--
 .../ExceptionHandlingJUnitTest.java             |  21 +++++--
 .../mapInterface/MapFunctionalJUnitTest.java    |  21 +++++--
 .../mapInterface/PutAllGlobalLockJUnitTest.java |  21 +++++--
 .../PutOperationContextJUnitTest.java           |  16 +++++
 .../GetOperationContextImplJUnitTest.java       |  16 +++++
 .../partition/PartitionManagerDUnitTest.java    |  21 +++++--
 .../PartitionRegionHelperDUnitTest.java         |  21 +++++--
 .../BaseLineAndCompareQueryPerfJUnitTest.java   |  21 +++++--
 .../query/Bug32947ValueConstraintJUnitTest.java |  21 +++++--
 .../gemfire/cache/query/BugJUnitTest.java       |  21 +++++--
 .../gemfire/cache/query/CacheUtils.java         |  21 +++++--
 .../cache/query/PdxStringQueryJUnitTest.java    |  21 +++++--
 .../gemstone/gemfire/cache/query/PerfQuery.java |  22 ++++---
 .../gemfire/cache/query/QueryJUnitTest.java     |  21 +++++--
 .../cache/query/QueryServiceJUnitTest.java      |  21 +++++--
 .../gemfire/cache/query/QueryTestUtils.java     |  21 +++++--
 .../cache/query/QueryTestUtilsJUnitTest.java    |  21 +++++--
 .../gemfire/cache/query/RegionJUnitTest.java    |  21 +++++--
 .../cache/query/TypedIteratorJUnitTest.java     |  21 +++++--
 .../com/gemstone/gemfire/cache/query/Utils.java |  21 +++++--
 .../query/cq/dunit/CqQueryTestListener.java     |  21 +++++--
 .../gemfire/cache/query/data/Address.java       |  21 +++++--
 .../gemstone/gemfire/cache/query/data/City.java |  22 +++++--
 .../cache/query/data/CollectionHolder.java      |  22 +++++--
 .../cache/query/data/ComparableWrapper.java     |  22 +++++--
 .../gemfire/cache/query/data/Country.java       |  21 +++++--
 .../gemstone/gemfire/cache/query/data/Data.java |  22 +++++--
 .../gemfire/cache/query/data/District.java      |  22 +++++--
 .../gemfire/cache/query/data/Employee.java      |  21 +++++--
 .../gemfire/cache/query/data/Inventory.java     |  21 +++++--
 .../gemfire/cache/query/data/Keywords.java      |  21 +++++--
 .../gemfire/cache/query/data/Manager.java       |  21 +++++--
 .../gemfire/cache/query/data/Numbers.java       |  21 +++++--
 .../gemfire/cache/query/data/PhoneNo.java       |  21 +++++--
 .../gemfire/cache/query/data/Portfolio.java     |  22 +++++--
 .../gemfire/cache/query/data/PortfolioData.java |  21 +++++--
 .../gemfire/cache/query/data/PortfolioNoDS.java |  16 +++++
 .../gemfire/cache/query/data/PortfolioPdx.java  |  22 +++++--
 .../gemfire/cache/query/data/Position.java      |  21 +++++--
 .../gemfire/cache/query/data/PositionNoDS.java  |  16 +++++
 .../gemfire/cache/query/data/PositionPdx.java   |  21 +++++--
 .../query/data/ProhibitedSecurityQuote.java     |  21 +++++--
 .../gemfire/cache/query/data/Quote.java         |  21 +++++--
 .../gemfire/cache/query/data/Restricted.java    |  21 +++++--
 .../cache/query/data/SecurityMaster.java        |  21 +++++--
 .../gemfire/cache/query/data/State.java         |  21 +++++--
 .../gemfire/cache/query/data/Street.java        |  21 +++++--
 .../gemfire/cache/query/data/Student.java       |  23 ++++---
 .../gemfire/cache/query/data/Vehicle.java       |  21 +++++--
 .../gemfire/cache/query/data/Village.java       |  21 +++++--
 .../query/dunit/CloseCacheAuthorization.java    |  16 +++++
 .../query/dunit/CompactRangeIndexDUnitTest.java |  21 +++++--
 .../cache/query/dunit/CqTimeTestListener.java   |  21 +++++--
 .../cache/query/dunit/GroupByDUnitImpl.java     |  16 +++++
 .../dunit/GroupByPartitionedQueryDUnitTest.java |  16 +++++
 .../query/dunit/GroupByQueryDUnitTest.java      |  16 +++++
 .../cache/query/dunit/HashIndexDUnitTest.java   |  21 +++++--
 .../cache/query/dunit/HelperTestCase.java       |  16 +++++
 .../dunit/NonDistinctOrderByDUnitImpl.java      |  16 +++++
 .../NonDistinctOrderByPartitionedDUnitTest.java |  16 +++++
 .../query/dunit/PdxStringQueryDUnitTest.java    |  21 +++++--
 .../dunit/QueryAPITestPartitionResolver.java    |  22 ++++---
 .../cache/query/dunit/QueryAuthorization.java   |  21 +++++--
 .../dunit/QueryDataInconsistencyDUnitTest.java  |  24 ++++---
 .../dunit/QueryIndexUsingXMLDUnitTest.java      |  21 +++++--
 .../QueryParamsAuthorizationDUnitTest.java      |  21 +++++--
 .../QueryUsingFunctionContextDUnitTest.java     |  22 ++++---
 .../query/dunit/QueryUsingPoolDUnitTest.java    |  21 +++++--
 .../cache/query/dunit/RemoteQueryDUnitTest.java |  21 +++++--
 ...esourceManagerWithQueryMonitorDUnitTest.java |  21 +++++--
 .../query/dunit/SelectStarQueryDUnitTest.java   |  21 +++++--
 .../cache/query/facets/lang/Address.java        |  23 ++++---
 .../gemfire/cache/query/facets/lang/Course.java |  23 ++++---
 .../cache/query/facets/lang/Department.java     |  23 ++++---
 .../query/facets/lang/DerivedEmployee.java      |  22 ++++---
 .../cache/query/facets/lang/Employee.java       |  21 ++++---
 .../cache/query/facets/lang/Faculty.java        |  21 ++++---
 .../cache/query/facets/lang/G_Student.java      |  23 ++++---
 .../gemfire/cache/query/facets/lang/Person.java |  23 ++++---
 .../cache/query/facets/lang/Student.java        |  23 ++++---
 .../cache/query/facets/lang/UG_Student.java     |  23 ++++---
 .../gemfire/cache/query/facets/lang/Utils.java  |  20 ++++--
 .../ComparisonOperatorsJUnitTest.java           |  21 +++++--
 .../query/functional/ConstantsJUnitTest.java    |  21 +++++--
 .../query/functional/CountStarJUnitTest.java    |  21 +++++--
 .../CustomerOptimizationsJUnitTest.java         |  21 +++++--
 .../DistinctAndNonDistinctQueryJUnitTest.java   |  29 +++++----
 ...ctResultsWithDupValuesInRegionJUnitTest.java |  21 +++++--
 .../query/functional/FunctionJUnitTest.java     |  21 +++++--
 .../functional/GroupByPartitionedJUnitTest.java |  16 +++++
 .../functional/GroupByReplicatedJUnitTest.java  |  16 +++++
 .../cache/query/functional/GroupByTestImpl.java |  21 +++++--
 .../query/functional/GroupByTestInterface.java  |  16 +++++
 .../query/functional/INOperatorJUnitTest.java   |  21 +++++--
 .../functional/IUM6Bug32345ReJUnitTest.java     |  21 +++++--
 .../cache/query/functional/IUMJUnitTest.java    |  21 +++++--
 .../IUMRCompositeIteratorJUnitTest.java         |  21 +++++--
 .../IUMRMultiIndexesMultiRegionJUnitTest.java   |  21 +++++--
 .../IUMRShuffleIteratorsJUnitTest.java          |  21 +++++--
 .../functional/IUMRSingleRegionJUnitTest.java   |  21 +++++--
 ...ependentOperandsInWhereClause2JUnitTest.java |  21 +++++--
 .../IndexCreationDeadLockJUnitTest.java         |  21 +++++--
 .../functional/IndexCreationJUnitTest.java      |  21 +++++--
 .../IndexMaintenanceAsynchJUnitTest.java        |  21 +++++--
 .../functional/IndexOperatorJUnitTest.java      |  21 +++++--
 .../IndexPrimaryKeyUsageJUnitTest.java          |  21 +++++--
 .../IndexUsageInNestedQueryJUnitTest.java       |  21 +++++--
 .../IndexUsageWithAliasAsProjAtrbt.java         |  21 +++++--
 ...IndexUsageWithAliasAsProjAtrbtJUnitTest.java |  21 +++++--
 .../IndexUseMultFrmSnglCondJUnitTest.java       |  21 +++++--
 ...ndexWithSngleFrmAndMultCondQryJUnitTest.java |  21 +++++--
 .../functional/IteratorTypeDefEmpJUnitTest.java |  21 +++++--
 .../functional/IteratorTypeDefJUnitTest.java    |  21 +++++--
 .../IteratorTypeDefaultTypesJUnitTest.java      |  21 +++++--
 .../functional/IumMultConditionJUnitTest.java   |  21 +++++--
 .../functional/JavaSerializationJUnitTest.java  |  21 +++++--
 .../functional/LikePredicateJUnitTest.java      |  21 +++++--
 .../query/functional/LimitClauseJUnitTest.java  |  21 +++++--
 .../functional/LogicalOperatorsJUnitTest.java   |  21 +++++--
 .../cache/query/functional/MiscJUnitTest.java   |  21 +++++--
 .../functional/MultiIndexCreationJUnitTest.java |  16 +++++
 .../MultiRegionIndexUsageJUnitTest.java         |  21 +++++--
 .../functional/MultipleRegionsJUnitTest.java    |  21 +++++--
 .../NegativeNumberQueriesJUnitTest.java         |  21 +++++--
 .../query/functional/NestedQueryJUnitTest.java  |  21 +++++--
 .../NonDistinctOrderByPartitionedJUnitTest.java |  16 +++++
 .../NonDistinctOrderByReplicatedJUnitTest.java  |  16 +++++
 .../NonDistinctOrderByTestImplementation.java   |  21 +++++--
 .../query/functional/NumericQueryJUnitTest.java |  21 +++++--
 .../functional/OrderByPartitionedJUnitTest.java |  16 +++++
 .../functional/OrderByReplicatedJUnitTest.java  |  16 +++++
 .../functional/OrderByTestImplementation.java   |  21 +++++--
 .../functional/ParameterBindingJUnitTest.java   |  21 +++++--
 .../PdxGroupByPartitionedJUnitTest.java         |  16 +++++
 .../PdxGroupByReplicatedJUnitTest.java          |  16 +++++
 .../query/functional/PdxGroupByTestImpl.java    |  16 +++++
 .../query/functional/PdxOrderByJUnitTest.java   |  16 +++++
 .../functional/QRegionInterfaceJUnitTest.java   |  21 +++++--
 .../QueryREUpdateInProgressJUnitTest.java       |  21 +++++--
 .../functional/QueryUndefinedJUnitTest.java     |  21 +++++--
 .../functional/ReservedKeywordsJUnitTest.java   |  21 +++++--
 .../ResultsDataSerializabilityJUnitTest.java    |  21 +++++--
 .../query/functional/SelectToDateJUnitTest.java |  21 +++++--
 .../functional/StructMemberAccessJUnitTest.java |  21 +++++--
 .../query/functional/StructSetOrResultsSet.java |  21 +++++--
 .../query/functional/TestNewFunctionSSorRS.java |  21 +++++--
 .../CompiledAggregateFunctionJUnitTest.java     |  16 +++++
 .../CompiledGroupBySelectJUnitTest.java         |  16 +++++
 .../CompiledJunctionInternalsJUnitTest.java     |  21 +++++--
 .../internal/CopyOnReadQueryJUnitTest.java      |  21 +++++--
 .../internal/ExecutionContextJUnitTest.java     |  21 +++++--
 .../query/internal/IndexManagerJUnitTest.java   |  21 +++++--
 .../internal/NWayMergeResultsJUnitTest.java     |  16 +++++
 .../internal/OrderByComparatorJUnitTest.java    |  16 +++++
 .../internal/ProjectionAttributeJUnitTest.java  |  21 +++++--
 .../query/internal/QCompilerJUnitTest.java      |  21 +++++--
 ...ueryFromClauseCanonicalizationJUnitTest.java |  21 +++++--
 .../QueryObjectSerializationJUnitTest.java      |  21 +++++--
 .../QueryObserverCallbackJUnitTest.java         |  21 +++++--
 .../query/internal/QueryTraceJUnitTest.java     |  21 +++++--
 .../query/internal/QueryUtilsJUnitTest.java     |  21 +++++--
 .../query/internal/ResultsBagJUnitTest.java     |  21 +++++--
 .../ResultsBagLimitBehaviourJUnitTest.java      |  21 +++++--
 .../ResultsCollectionWrapperLimitJUnitTest.java |  21 +++++--
 .../SelectResultsComparatorJUnitTest.java       |  21 +++++--
 .../StructBagLimitBehaviourJUnitTest.java       |  21 +++++--
 .../query/internal/StructSetJUnitTest.java      |  21 +++++--
 .../internal/aggregate/AggregatorJUnitTest.java |  16 +++++
 ...syncIndexUpdaterThreadShutdownJUnitTest.java |  21 +++++--
 .../index/AsynchIndexMaintenanceJUnitTest.java  |  21 +++++--
 .../CompactRangeIndexIndexMapJUnitTest.java     |  21 +++++--
 .../index/CompactRangeIndexJUnitTest.java       |  21 +++++--
 ...rrentIndexInitOnOverflowRegionDUnitTest.java |  21 +++++--
 ...ndexOperationsOnOverflowRegionDUnitTest.java |  21 +++++--
 ...pdateWithInplaceObjectModFalseDUnitTest.java |  21 +++++--
 ...ConcurrentIndexUpdateWithoutWLDUnitTest.java |  21 +++++--
 .../index/CopyOnReadIndexDUnitTest.java         |  21 +++++--
 .../index/CopyOnReadIndexJUnitTest.java         |  21 +++++--
 .../DeclarativeIndexCreationJUnitTest.java      |  21 +++++--
 .../internal/index/HashIndexJUnitTest.java      |  21 +++++--
 .../index/IndexCreationInternalsJUnitTest.java  |  21 +++++--
 .../internal/index/IndexElemArrayJUnitTest.java |  21 +++++--
 .../internal/index/IndexHintJUnitTest.java      |  16 +++++
 .../query/internal/index/IndexJUnitTest.java    |  21 +++++--
 .../index/IndexMaintainceJUnitTest.java         |  21 +++++--
 .../index/IndexMaintenanceJUnitTest.java        |  21 +++++--
 .../index/IndexStatisticsJUnitTest.java         |  21 +++++--
 .../IndexTrackingQueryObserverDUnitTest.java    |  21 +++++--
 .../IndexTrackingQueryObserverJUnitTest.java    |  21 +++++--
 .../query/internal/index/IndexUseJUnitTest.java |  21 +++++--
 .../IndexedMergeEquiJoinScenariosJUnitTest.java |  21 +++++--
 ...itializeIndexEntryDestroyQueryDUnitTest.java |  21 +++++--
 .../internal/index/MapIndexStoreJUnitTest.java  |  21 +++++--
 .../MapRangeIndexMaintenanceJUnitTest.java      |  21 +++++--
 .../index/MultiIndexCreationDUnitTest.java      |  16 +++++
 .../NewDeclarativeIndexCreationJUnitTest.java   |  21 +++++--
 .../index/PdxCopyOnReadQueryJUnitTest.java      |  16 +++++
 ...gRegionCreationIndexUpdateTypeJUnitTest.java |  21 +++++--
 .../PutAllWithIndexPerfDUnitDisabledTest.java   |  21 +++++--
 .../internal/index/RangeIndexAPIJUnitTest.java  |  21 +++++--
 .../PRBasicIndexCreationDUnitTest.java          |  20 ++++--
 .../PRBasicIndexCreationDeadlockDUnitTest.java  |  20 ++++--
 .../PRBasicMultiIndexCreationDUnitTest.java     |  20 ++++--
 .../partitioned/PRBasicQueryDUnitTest.java      |  20 ++++--
 .../PRBasicRemoveIndexDUnitTest.java            |  21 +++++--
 .../PRColocatedEquiJoinDUnitTest.java           |  21 +++++--
 .../partitioned/PRIndexStatisticsJUnitTest.java |  21 +++++--
 .../partitioned/PRInvalidQueryDUnitTest.java    |  20 ++++--
 .../partitioned/PRInvalidQueryJUnitTest.java    |  21 +++++--
 .../partitioned/PRQueryCacheCloseDUnitTest.java |  20 ++++--
 .../PRQueryCacheClosedJUnitTest.java            |  21 +++++--
 .../query/partitioned/PRQueryDUnitHelper.java   |  20 ++++--
 .../query/partitioned/PRQueryDUnitTest.java     |  20 ++++--
 .../query/partitioned/PRQueryJUnitTest.java     |  21 +++++--
 .../partitioned/PRQueryNumThreadsJUnitTest.java |  21 +++++--
 .../query/partitioned/PRQueryPerfDUnitTest.java |  20 ++++--
 .../PRQueryRegionCloseDUnitTest.java            |  20 ++++--
 .../PRQueryRegionClosedJUnitTest.java           |  21 +++++--
 .../PRQueryRegionDestroyedDUnitTest.java        |  20 ++++--
 .../PRQueryRegionDestroyedJUnitTest.java        |  21 +++++--
 .../PRQueryRemoteNodeExceptionDUnitTest.java    |  21 +++++--
 .../gemfire/cache/query/transaction/Person.java |  21 +++++--
 .../query/transaction/QueryAndJtaJUnitTest.java |  21 +++++--
 .../internal/ConnectionCountProbeJUnitTest.java |  21 +++++--
 .../cache/snapshot/CacheSnapshotJUnitTest.java  |  21 +++++--
 .../snapshot/ParallelSnapshotDUnitTest.java     |  21 +++++--
 .../gemfire/cache/snapshot/RegionGenerator.java |  21 +++++--
 .../cache/snapshot/RegionSnapshotJUnitTest.java |  21 +++++--
 .../snapshot/SnapshotByteArrayDUnitTest.java    |  21 +++++--
 .../cache/snapshot/SnapshotDUnitTest.java       |  21 +++++--
 .../snapshot/SnapshotPerformanceDUnitTest.java  |  21 +++++--
 .../cache/snapshot/SnapshotTestCase.java        |  21 +++++--
 .../cache/snapshot/WanSnapshotJUnitTest.java    |  21 +++++--
 .../cache/util/PasswordUtilJUnitTest.java       |  21 +++++--
 .../gemfire/cache30/Bug34387DUnitTest.java      |  21 +++++--
 .../gemfire/cache30/Bug34948DUnitTest.java      |  21 +++++--
 .../gemfire/cache30/Bug35214DUnitTest.java      |  21 +++++--
 .../gemfire/cache30/Bug38013DUnitTest.java      |  21 +++++--
 .../gemfire/cache30/Bug38741DUnitTest.java      |  21 +++++--
 .../cache30/Bug40255JUnitDisabledTest.java      |  22 ++++---
 .../cache30/Bug40662JUnitDisabledTest.java      |  21 +++++--
 .../gemfire/cache30/Bug44418JUnitTest.java      |  21 +++++--
 .../gemfire/cache30/CacheCloseDUnitTest.java    |  21 +++++--
 .../gemfire/cache30/CacheListenerTestCase.java  |  21 +++++--
 .../gemfire/cache30/CacheLoaderTestCase.java    |  21 +++++--
 .../gemfire/cache30/CacheLogRollDUnitTest.java  |  21 +++++--
 .../gemfire/cache30/CacheMapTxnDUnitTest.java   |  22 +++++--
 ...cheRegionsReliablityStatsCheckDUnitTest.java |  21 +++++--
 .../cache30/CacheSerializableRunnable.java      |  21 +++++--
 .../cache30/CacheStatisticsDUnitTest.java       |  21 +++++--
 .../gemstone/gemfire/cache30/CacheTestCase.java |  21 +++++--
 .../gemfire/cache30/CacheWriterTestCase.java    |  21 +++++--
 .../cache30/CacheXMLPartitionResolver.java      |  21 +++++--
 .../gemfire/cache30/CacheXml30DUnitTest.java    |  21 +++++--
 .../gemfire/cache30/CacheXml40DUnitTest.java    |  21 +++++--
 .../gemfire/cache30/CacheXml41DUnitTest.java    |  21 +++++--
 .../gemfire/cache30/CacheXml45DUnitTest.java    |  21 +++++--
 .../gemfire/cache30/CacheXml51DUnitTest.java    |  21 +++++--
 .../gemfire/cache30/CacheXml55DUnitTest.java    |  21 +++++--
 .../gemfire/cache30/CacheXml57DUnitTest.java    |  21 +++++--
 .../gemfire/cache30/CacheXml58DUnitTest.java    |  21 +++++--
 .../gemfire/cache30/CacheXml60DUnitTest.java    |  21 +++++--
 .../gemfire/cache30/CacheXml61DUnitTest.java    |  21 +++++--
 .../gemfire/cache30/CacheXml65DUnitTest.java    |  22 +++++--
 .../gemfire/cache30/CacheXml66DUnitTest.java    |  21 +++++--
 .../gemfire/cache30/CacheXml70DUnitTest.java    |  22 +++++--
 .../gemfire/cache30/CacheXml80DUnitTest.java    |  22 +++++--
 .../gemfire/cache30/CacheXml81DUnitTest.java    |  22 ++++---
 .../gemfire/cache30/CacheXml90DUnitTest.java    |  22 +++++--
 .../gemfire/cache30/CacheXmlTestCase.java       |  16 +++++
 .../cache30/CachedAllEventsDUnitTest.java       |  21 +++++--
 .../gemfire/cache30/CallbackArgDUnitTest.java   |  21 +++++--
 .../cache30/CertifiableTestCacheListener.java   |  24 ++++---
 .../cache30/ClearMultiVmCallBkDUnitTest.java    |  22 +++++--
 .../gemfire/cache30/ClearMultiVmDUnitTest.java  |  22 +++++--
 .../cache30/ClientMembershipDUnitTest.java      |  21 +++++--
 .../ClientMembershipSelectorDUnitTest.java      |  16 +++++
 .../ClientRegisterInterestDUnitTest.java        |  21 +++++--
 ...ClientRegisterInterestSelectorDUnitTest.java |  16 +++++
 .../cache30/ClientServerCCEDUnitTest.java       |  21 +++++--
 .../gemfire/cache30/ClientServerTestCase.java   |  21 +++++--
 .../ConcurrentLeaveDuringGIIDUnitTest.java      |  21 +++++--
 ...ibutedNoAckAsyncOverflowRegionDUnitTest.java |  22 ++++---
 ...iskDistributedNoAckAsyncRegionDUnitTest.java |  22 ++++---
 .../DiskDistributedNoAckRegionTestCase.java     |  22 ++++---
 ...ributedNoAckSyncOverflowRegionDUnitTest.java |  22 ++++---
 .../gemfire/cache30/DiskRegionDUnitTest.java    |  21 +++++--
 .../gemfire/cache30/DiskRegionTestImpl.java     |  22 ++++---
 .../cache30/DistAckMapMethodsDUnitTest.java     |  22 +++++--
 ...ckOverflowRegionCCECompressionDUnitTest.java |  21 +++++--
 ...istributedAckOverflowRegionCCEDUnitTest.java |  21 +++++--
 ...tedAckOverflowRegionCCEOffHeapDUnitTest.java |  16 +++++
 ...PersistentRegionCCECompressionDUnitTest.java |  21 +++++--
 ...tributedAckPersistentRegionCCEDUnitTest.java |  22 +++++--
 ...dAckPersistentRegionCCEOffHeapDUnitTest.java |  16 +++++
 .../DistributedAckRegionCCEDUnitTest.java       |  22 +++++--
 ...DistributedAckRegionCCEOffHeapDUnitTest.java |  16 +++++
 ...istributedAckRegionCompressionDUnitTest.java |  21 +++++--
 .../cache30/DistributedAckRegionDUnitTest.java  |  21 +++++--
 .../DistributedAckRegionOffHeapDUnitTest.java   |  16 +++++
 .../DistributedNoAckRegionCCEDUnitTest.java     |  21 +++++--
 ...stributedNoAckRegionCCEOffHeapDUnitTest.java |  16 +++++
 ...tributedNoAckRegionCompressionDUnitTest.java |  21 +++++--
 .../DistributedNoAckRegionDUnitTest.java        |  21 +++++--
 .../DistributedNoAckRegionOffHeapDUnitTest.java |  16 +++++
 .../gemfire/cache30/DynamicRegionDUnitTest.java |  21 +++++--
 .../gemfire/cache30/GlobalLockingDUnitTest.java |  21 +++++--
 .../cache30/GlobalRegionCCEDUnitTest.java       |  22 +++++--
 .../GlobalRegionCCEOffHeapDUnitTest.java        |  16 +++++
 .../GlobalRegionCompressionDUnitTest.java       |  21 +++++--
 .../gemfire/cache30/GlobalRegionDUnitTest.java  |  21 +++++--
 .../cache30/GlobalRegionOffHeapDUnitTest.java   |  16 +++++
 .../cache30/LRUEvictionControllerDUnitTest.java |  21 +++++--
 .../gemfire/cache30/LocalRegionDUnitTest.java   |  21 +++++--
 .../MemLRUEvictionControllerDUnitTest.java      |  21 +++++--
 .../gemfire/cache30/MultiVMRegionTestCase.java  |  21 +++++--
 .../gemfire/cache30/MyGatewayEventFilter1.java  |  21 +++++--
 .../gemfire/cache30/MyGatewayEventFilter2.java  |  23 ++++---
 .../cache30/MyGatewayTransportFilter1.java      |  21 +++++--
 .../cache30/MyGatewayTransportFilter2.java      |  21 +++++--
 .../OffHeapLRUEvictionControllerDUnitTest.java  |  21 +++++--
 .../PRBucketSynchronizationDUnitTest.java       |  21 +++++--
 .../PartitionedRegionCompressionDUnitTest.java  |  21 +++++--
 .../cache30/PartitionedRegionDUnitTest.java     |  21 +++++--
 ...tionedRegionMembershipListenerDUnitTest.java |  22 +++++--
 .../PartitionedRegionOffHeapDUnitTest.java      |  16 +++++
 .../cache30/PreloadedRegionTestCase.java        |  21 +++++--
 .../gemfire/cache30/ProxyDUnitTest.java         |  21 +++++--
 .../cache30/PutAllCallBkRemoteVMDUnitTest.java  |  22 +++++--
 .../cache30/PutAllCallBkSingleVMDUnitTest.java  |  22 +++++--
 .../gemfire/cache30/PutAllMultiVmDUnitTest.java |  22 +++++--
 .../gemfire/cache30/QueueMsgDUnitTest.java      |  21 +++++--
 .../cache30/RRSynchronizationDUnitTest.java     |  21 +++++--
 .../gemfire/cache30/ReconnectDUnitTest.java     |  21 +++++--
 .../ReconnectedCacheServerDUnitTest.java        |  21 +++++--
 .../cache30/RegionAttributesTestCase.java       |  21 +++++--
 .../cache30/RegionExpirationDUnitTest.java      |  21 +++++--
 .../RegionMembershipListenerDUnitTest.java      |  21 +++++--
 .../RegionReliabilityDistAckDUnitTest.java      |  21 +++++--
 .../RegionReliabilityDistNoAckDUnitTest.java    |  21 +++++--
 .../RegionReliabilityGlobalDUnitTest.java       |  21 +++++--
 .../RegionReliabilityListenerDUnitTest.java     |  21 +++++--
 .../cache30/RegionReliabilityTestCase.java      |  21 +++++--
 .../gemfire/cache30/RegionTestCase.java         |  21 +++++--
 .../gemfire/cache30/ReliabilityTestCase.java    |  21 +++++--
 .../cache30/RemoveAllMultiVmDUnitTest.java      |  22 +++++--
 .../gemfire/cache30/RequiredRolesDUnitTest.java |  21 +++++--
 .../cache30/RolePerformanceDUnitTest.java       |  21 +++++--
 .../gemfire/cache30/SearchAndLoadDUnitTest.java |  21 +++++--
 .../cache30/SlowRecDUnitDisabledTest.java       |  21 +++++--
 .../gemfire/cache30/TXDistributedDUnitTest.java |  21 +++++--
 .../gemfire/cache30/TXOrderDUnitTest.java       |  21 +++++--
 .../cache30/TXRestrictionsDUnitTest.java        |  21 +++++--
 .../gemfire/cache30/TestCacheCallback.java      |  21 +++++--
 .../gemfire/cache30/TestCacheListener.java      |  21 +++++--
 .../gemfire/cache30/TestCacheLoader.java        |  21 +++++--
 .../gemfire/cache30/TestCacheWriter.java        |  21 +++++--
 .../gemfire/cache30/TestDiskRegion.java         |  21 +++++--
 .../gemstone/gemfire/cache30/TestHeapLRU.java   |  21 +++++--
 .../gemfire/cache30/TestPdxSerializer.java      |  21 +++++--
 .../cache30/TestTransactionListener.java        |  21 +++++--
 .../gemfire/cache30/TestTransactionWriter.java  |  21 +++++--
 .../AnalyzeSerializablesJUnitTest.java          |  21 +++++--
 .../codeAnalysis/ClassAndMethodDetails.java     |  23 ++++---
 .../gemfire/codeAnalysis/ClassAndMethods.java   |  23 ++++---
 .../codeAnalysis/ClassAndVariableDetails.java   |  23 ++++---
 .../gemfire/codeAnalysis/ClassAndVariables.java |  23 ++++---
 .../codeAnalysis/CompiledClassUtils.java        |  23 ++++---
 .../codeAnalysis/decode/CompiledAttribute.java  |  21 +++++--
 .../codeAnalysis/decode/CompiledClass.java      |  21 +++++--
 .../codeAnalysis/decode/CompiledCode.java       |  21 +++++--
 .../codeAnalysis/decode/CompiledField.java      |  21 +++++--
 .../codeAnalysis/decode/CompiledMethod.java     |  21 +++++--
 .../gemfire/codeAnalysis/decode/cp/Cp.java      |  21 +++++--
 .../gemfire/codeAnalysis/decode/cp/CpClass.java |  21 +++++--
 .../codeAnalysis/decode/cp/CpDouble.java        |  21 +++++--
 .../codeAnalysis/decode/cp/CpFieldref.java      |  18 +++++-
 .../gemfire/codeAnalysis/decode/cp/CpFloat.java |  18 +++++-
 .../codeAnalysis/decode/cp/CpInteger.java       |  18 +++++-
 .../decode/cp/CpInterfaceMethodref.java         |  18 +++++-
 .../gemfire/codeAnalysis/decode/cp/CpLong.java  |  21 +++++--
 .../codeAnalysis/decode/cp/CpMethodref.java     |  18 +++++-
 .../codeAnalysis/decode/cp/CpNameAndType.java   |  18 +++++-
 .../codeAnalysis/decode/cp/CpString.java        |  18 +++++-
 .../gemfire/codeAnalysis/decode/cp/CpUtf8.java  |  21 +++++--
 .../distributed/AbstractLauncherJUnitTest.java  |  20 ++++--
 .../AbstractLauncherJUnitTestCase.java          |  16 +++++
 .../AbstractLauncherServiceStatusJUnitTest.java |  21 +++++--
 .../AbstractLocatorLauncherJUnitTestCase.java   |  16 +++++
 .../AbstractServerLauncherJUnitTestCase.java    |  16 +++++
 .../gemfire/distributed/AuthInitializer.java    |  23 ++++---
 .../distributed/CommonLauncherTestSuite.java    |  20 ++++--
 .../distributed/DistributedMemberDUnitTest.java |  21 +++++--
 .../DistributedSystemConnectPerf.java           |  21 +++++--
 .../distributed/DistributedSystemDUnitTest.java |  21 +++++--
 .../distributed/DistributedTestSuite.java       |  16 +++++
 .../distributed/HostedLocatorsDUnitTest.java    |  16 +++++
 .../gemfire/distributed/JGroupsJUnitTest.java   |  21 +++++--
 .../LauncherMemberMXBeanJUnitTest.java          |  16 +++++
 .../gemfire/distributed/LauncherTestSuite.java  |  16 +++++
 .../gemfire/distributed/LocatorDUnitTest.java   |  21 +++++--
 .../gemfire/distributed/LocatorJUnitTest.java   |  21 +++++--
 .../distributed/LocatorLauncherJUnitTest.java   |  20 ++++--
 .../LocatorLauncherLocalFileJUnitTest.java      |  16 +++++
 .../LocatorLauncherLocalJUnitTest.java          |  16 +++++
 .../LocatorLauncherRemoteFileJUnitTest.java     |  16 +++++
 .../LocatorLauncherRemoteJUnitTest.java         |  16 +++++
 .../gemfire/distributed/MyAuthenticator.java    |  23 ++++---
 .../gemfire/distributed/MyPrincipal.java        |  18 +++++-
 .../gemfire/distributed/RoleDUnitTest.java      |  21 +++++--
 .../distributed/ServerLauncherJUnitTest.java    |  20 ++++--
 .../ServerLauncherLocalFileJUnitTest.java       |  20 ++++--
 .../ServerLauncherLocalJUnitTest.java           |  16 +++++
 .../ServerLauncherRemoteFileJUnitTest.java      |  16 +++++
 .../ServerLauncherRemoteJUnitTest.java          |  20 ++++--
 .../ServerLauncherWithSpringJUnitTest.java      |  16 +++++
 .../distributed/SystemAdminDUnitTest.java       |  21 +++++--
 .../AtomicLongWithTerminalStateJUnitTest.java   |  21 +++++--
 .../distributed/internal/Bug40751DUnitTest.java |  21 +++++--
 .../ConsoleDistributionManagerDUnitTest.java    |  21 +++++--
 .../distributed/internal/DateMessage.java       |  21 +++++--
 .../internal/DistributionAdvisorDUnitTest.java  |  21 +++++--
 .../internal/DistributionManagerDUnitTest.java  |  21 +++++--
 ...istributionManagerTimeDUnitDisabledTest.java |  21 +++++--
 .../GemFireTimeSyncServiceDUnitTest.java        |  21 +++++--
 .../InternalDistributedSystemJUnitTest.java     |  21 +++++--
 .../gemfire/distributed/internal/LDM.java       |  21 +++++--
 .../internal/LocalDistributionManagerTest.java  |  21 +++++--
 .../internal/LocatorLoadSnapshotJUnitTest.java  |  21 +++++--
 .../internal/ProduceDateMessages.java           |  21 +++++--
 .../internal/ProductUseLogDUnitTest.java        |  21 +++++--
 .../internal/ProductUseLogJUnitTest.java        |  21 +++++--
 .../internal/ServerLocatorJUnitTest.java        |  20 ++++--
 .../internal/SharedConfigurationJUnitTest.java  |  21 +++++--
 .../internal/StartupMessageDataJUnitTest.java   |  21 +++++--
 .../deadlock/DeadlockDetectorJUnitTest.java     |  21 +++++--
 .../deadlock/DependencyGraphJUnitTest.java      |  21 +++++--
 .../GemFireDeadlockDetectorDUnitTest.java       |  21 +++++--
 .../deadlock/UnsafeThreadLocalJUnitTest.java    |  21 +++++--
 .../locks/CollaborationJUnitDisabledTest.java   |  21 +++++--
 .../internal/locks/DLockGrantorHelper.java      |  21 +++++--
 ...entrantReadWriteWriteShareLockJUnitTest.java |  21 +++++--
 .../membership/MembershipJUnitTest.java         |  21 +++++--
 .../jgroup/MembershipManagerHelper.java         |  21 +++++--
 .../StreamingOperationManyDUnitTest.java        |  29 +++++----
 .../StreamingOperationOneDUnitTest.java         |  29 +++++----
 .../tcpserver/LocatorVersioningJUnitTest.java   |  17 ++++-
 ...cpServerBackwardCompatDUnitDisabledTest.java |  17 ++++-
 .../tcpserver/TcpServerJUnitDisabledTest.java   |  16 +++++
 .../support/DistributedSystemAdapter.java       |  21 +++++--
 .../gemfire/disttx/CacheMapDistTXDUnitTest.java |  16 +++++
 .../gemfire/disttx/DistTXDebugDUnitTest.java    |  16 +++++
 .../disttx/DistTXDistributedTestSuite.java      |  16 +++++
 .../gemfire/disttx/DistTXExpiryJUnitTest.java   |  16 +++++
 .../gemfire/disttx/DistTXJUnitTest.java         |  16 +++++
 .../disttx/DistTXManagerImplJUnitTest.java      |  16 +++++
 .../gemfire/disttx/DistTXOrderDUnitTest.java    |  16 +++++
 .../disttx/DistTXPersistentDebugDUnitTest.java  |  18 +++++-
 .../DistTXReleasesOffHeapOnCloseJUnitTest.java  |  16 +++++
 .../disttx/DistTXRestrictionsDUnitTest.java     |  16 +++++
 .../disttx/DistTXWithDeltaDUnitTest.java        |  16 +++++
 .../gemfire/disttx/DistTXWriterJUnitTest.java   |  16 +++++
 .../disttx/DistTXWriterOOMEJUnitTest.java       |  16 +++++
 .../disttx/DistributedTransactionDUnitTest.java |  18 +++++-
 .../gemfire/disttx/PRDistTXDUnitTest.java       |  16 +++++
 .../gemfire/disttx/PRDistTXJUnitTest.java       |  16 +++++
 .../disttx/PRDistTXWithVersionsDUnitTest.java   |  16 +++++
 ...entPartitionedRegionWithDistTXDUnitTest.java |  16 +++++
 .../gemfire/internal/ArrayEqualsJUnitTest.java  |  21 +++++--
 .../gemfire/internal/AvailablePortHelper.java   |  21 +++++--
 .../internal/AvailablePortJUnitTest.java        |  21 +++++--
 ...wardCompatibilitySerializationJUnitTest.java |  21 +++++--
 .../gemfire/internal/Bug49856JUnitTest.java     |  21 +++++--
 .../gemfire/internal/Bug51616JUnitTest.java     |  16 +++++
 .../gemfire/internal/ByteArrayData.java         |  21 +++++--
 .../gemstone/gemfire/internal/ClassBuilder.java |  21 +++++--
 .../ClassNotFoundExceptionDUnitTest.java        |  21 +++++--
 .../internal/ClassPathLoaderJUnitTest.java      |  21 +++++--
 .../internal/CopyOnWriteHashSetJUnitTest.java   |  21 +++++--
 .../internal/DataSerializableJUnitTest.java     |  21 +++++--
 .../gemstone/gemfire/internal/FDDUnitTest.java  |  17 +++--
 .../gemfire/internal/FileUtilJUnitTest.java     |  21 +++++--
 .../internal/GemFireStatSamplerJUnitTest.java   |  21 +++++--
 .../GemFireVersionIntegrationJUnitTest.java     |  21 +++++--
 .../internal/GemFireVersionJUnitTest.java       |  21 +++++--
 .../internal/HeapDataOutputStreamJUnitTest.java |  21 +++++--
 .../gemfire/internal/InlineKeyJUnitTest.java    |  21 +++++--
 .../gemfire/internal/JSSESocketJUnitTest.java   |  21 +++++--
 .../internal/JarClassLoaderJUnitTest.java       |  22 ++++---
 .../gemfire/internal/JarDeployerDUnitTest.java  |  22 ++++---
 .../com/gemstone/gemfire/internal/JavaExec.java |  21 +++++--
 .../gemfire/internal/LineWrapUnitJUnitTest.java |  21 +++++--
 .../gemstone/gemfire/internal/LongBuffer.java   |  21 +++++--
 .../gemfire/internal/NanoTimerJUnitTest.java    |  21 +++++--
 .../gemfire/internal/ObjIdMapJUnitTest.java     |  21 +++++--
 .../internal/OneTaskOnlyDecoratorJUnitTest.java |  21 +++++--
 .../internal/PdxDeleteFieldDUnitTest.java       |  16 +++++
 .../internal/PdxDeleteFieldJUnitTest.java       |  16 +++++
 .../gemfire/internal/PdxRenameDUnitTest.java    |  16 +++++
 .../gemfire/internal/PdxRenameJUnitTest.java    |  16 +++++
 .../PutAllOperationContextJUnitTest.java        |  21 +++++--
 .../internal/SSLConfigIntegrationJUnitTest.java |  16 +++++
 .../gemfire/internal/SSLConfigJUnitTest.java    |  20 ++++--
 ...hreadPoolExecutorWithKeepAliveJUnitTest.java |  21 +++++--
 .../internal/SimpleStatSamplerJUnitTest.java    |  21 +++++--
 .../gemfire/internal/SocketCloserJUnitTest.java |  16 +++++
 .../internal/SocketCloserWithWaitJUnitTest.java |  16 +++++
 .../StatArchiveWriterReaderJUnitTest.java       |  21 +++++--
 .../gemfire/internal/StatSamplerJUnitTest.java  |  21 +++++--
 .../gemfire/internal/StatSamplerTestCase.java   |  21 +++++--
 .../internal/UniqueIdGeneratorJUnitTest.java    |  21 +++++--
 .../internal/cache/AbstractRegionJUnitTest.java |  21 +++++--
 .../gemfire/internal/cache/BackupDUnitTest.java |  21 +++++--
 .../gemfire/internal/cache/BackupJUnitTest.java |  21 +++++--
 .../internal/cache/Bug33359DUnitTest.java       |  21 +++++--
 .../internal/cache/Bug33726DUnitTest.java       |  21 +++++--
 .../internal/cache/Bug33726JUnitTest.java       |  21 +++++--
 .../Bug34179TooManyFilesOpenJUnitTest.java      |  21 +++++--
 .../internal/cache/Bug34583JUnitTest.java       |  21 +++++--
 .../internal/cache/Bug37241DUnitTest.java       |  21 +++++--
 .../internal/cache/Bug37244JUnitTest.java       |  21 +++++--
 .../internal/cache/Bug37377DUnitTest.java       |  21 +++++--
 .../internal/cache/Bug37500JUnitTest.java       |  21 +++++--
 .../internal/cache/Bug39079DUnitTest.java       |  21 +++++--
 .../internal/cache/Bug40299DUnitTest.java       |  21 +++++--
 .../internal/cache/Bug40632DUnitTest.java       |  21 +++++--
 .../internal/cache/Bug41091DUnitTest.java       |  21 +++++--
 .../internal/cache/Bug41733DUnitTest.java       |  21 +++++--
 .../internal/cache/Bug41957DUnitTest.java       |  21 +++++--
 .../internal/cache/Bug42010StatsDUnitTest.java  |  21 +++++--
 .../internal/cache/Bug42055DUnitTest.java       |  21 +++++--
 .../internal/cache/Bug45164DUnitTest.java       |  21 +++++--
 .../internal/cache/Bug45934DUnitTest.java       |  21 +++++--
 .../internal/cache/Bug47667DUnitTest.java       |  21 +++++--
 .../internal/cache/Bug48182JUnitTest.java       |  16 +++++
 .../internal/cache/CacheAdvisorDUnitTest.java   |  21 +++++--
 .../cache/CacheLifecycleListenerJUnitTest.java  |  21 +++++--
 .../cache/ChunkValueWrapperJUnitTest.java       |  16 +++++
 .../internal/cache/ClearDAckDUnitTest.java      |  21 +++++--
 .../internal/cache/ClearGlobalDUnitTest.java    |  21 +++++--
 ...ssagesRegionCreationAndDestroyJUnitTest.java |  21 +++++--
 .../cache/ClientServerGetAllDUnitTest.java      |  21 +++++--
 ...ServerInvalidAndDestroyedEntryDUnitTest.java |  21 +++++--
 .../ClientServerTransactionCCEDUnitTest.java    |  21 +++++--
 .../cache/ClientServerTransactionDUnitTest.java |  21 +++++--
 .../cache/ComplexDiskRegionJUnitTest.java       |  21 +++++--
 .../ConcurrentDestroySubRegionDUnitTest.java    |  21 +++++--
 ...entFlushingAndRegionOperationsJUnitTest.java |  21 +++++--
 .../cache/ConcurrentMapLocalJUnitTest.java      |  21 +++++--
 .../cache/ConcurrentMapOpsDUnitTest.java        |  21 +++++--
 .../ConcurrentRegionOperationsJUnitTest.java    |  21 +++++--
 ...rentRollingAndRegionOperationsJUnitTest.java |  21 +++++--
 .../internal/cache/ConflationJUnitTest.java     |  21 +++++--
 .../cache/ConnectDisconnectDUnitTest.java       |  16 +++++
 .../cache/CustomerIDPartitionResolver.java      |  21 +++++--
 .../internal/cache/DeltaFaultInDUnitTest.java   |  21 +++++--
 .../cache/DeltaPropagationDUnitTest.java        |  21 +++++--
 .../cache/DeltaPropagationStatsDUnitTest.java   |  21 +++++--
 .../internal/cache/DeltaSizingDUnitTest.java    |  21 +++++--
 .../gemfire/internal/cache/DiskIFJUnitTest.java |  21 +++++--
 .../gemfire/internal/cache/DiskIdJUnitTest.java |  21 +++++--
 .../internal/cache/DiskInitFileJUnitTest.java   |  21 +++++--
 .../cache/DiskOfflineCompactionJUnitTest.java   |  21 +++++--
 .../internal/cache/DiskOldAPIsJUnitTest.java    |  21 +++++--
 ...iskRandomOperationsAndRecoveryJUnitTest.java |  21 +++++--
 .../cache/DiskRegByteArrayDUnitTest.java        |  21 +++++--
 .../cache/DiskRegCacheXmlJUnitTest.java         |  21 +++++--
 .../DiskRegCachexmlGeneratorJUnitTest.java      |  21 +++++--
 .../internal/cache/DiskRegCbkChkJUnitTest.java  |  21 +++++--
 .../DiskRegOplogSwtchingAndRollerJUnitTest.java |  21 +++++--
 .../cache/DiskRegRecoveryJUnitTest.java         |  21 +++++--
 .../cache/DiskRegionAsyncRecoveryJUnitTest.java |  21 +++++--
 ...RegionChangingRegionAttributesJUnitTest.java |  21 +++++--
 .../cache/DiskRegionClearJUnitTest.java         |  21 +++++--
 .../internal/cache/DiskRegionHelperFactory.java |  21 +++++--
 .../DiskRegionIllegalArguementsJUnitTest.java   |  21 +++++--
 ...iskRegionIllegalCacheXMLvaluesJUnitTest.java |  21 +++++--
 .../internal/cache/DiskRegionJUnitTest.java     |  21 +++++--
 .../internal/cache/DiskRegionProperties.java    |  21 +++++--
 .../internal/cache/DiskRegionTestingBase.java   |  21 +++++--
 .../cache/DiskStoreFactoryJUnitTest.java        |  21 +++++--
 .../cache/DiskWriteAttributesJUnitTest.java     |  21 +++++--
 ...DistrbutedRegionProfileOffHeapDUnitTest.java |  16 +++++
 .../cache/DistributedCacheTestCase.java         |  21 +++++--
 .../cache/EnumListenerEventJUnitTest.java       |  21 +++++--
 .../internal/cache/EventTrackerDUnitTest.java   |  21 +++++--
 .../cache/EvictionDUnitDisabledTest.java        |  21 +++++--
 .../cache/EvictionObjectSizerDUnitTest.java     |  21 +++++--
 .../internal/cache/EvictionStatsDUnitTest.java  |  21 +++++--
 .../internal/cache/EvictionTestBase.java        |  21 +++++--
 .../internal/cache/FaultingInJUnitTest.java     |  21 +++++--
 .../cache/FixedPRSinglehopDUnitTest.java        |  21 +++++--
 .../internal/cache/GIIDeltaDUnitTest.java       |  21 +++++--
 .../internal/cache/GIIFlowControlDUnitTest.java |  21 +++++--
 .../internal/cache/GridAdvisorDUnitTest.java    |  21 +++++--
 .../internal/cache/HABug36773DUnitTest.java     |  21 +++++--
 .../HAOverflowMemObjectSizerDUnitTest.java      |  21 +++++--
 .../cache/IncrementalBackupDUnitTest.java       |  21 +++++--
 .../cache/InterruptClientServerDUnitTest.java   |  21 +++++--
 .../internal/cache/InterruptDiskJUnitTest.java  |  21 +++++--
 ...InterruptsConserveSocketsFalseDUnitTest.java |  16 +++++
 .../internal/cache/InterruptsDUnitTest.java     |  21 +++++--
 .../internal/cache/IteratorDUnitTest.java       |  21 +++++--
 .../LIFOEvictionAlgoEnabledRegionJUnitTest.java |  21 +++++--
 ...victionAlgoMemoryEnabledRegionJUnitTest.java |  21 +++++--
 .../internal/cache/MapClearGIIDUnitTest.java    |  21 +++++--
 .../internal/cache/MapInterface2JUnitTest.java  |  21 +++++--
 .../internal/cache/MapInterfaceJUnitTest.java   |  21 +++++--
 .../MultipleOplogsRollingFeatureJUnitTest.java  |  21 +++++--
 .../cache/NetSearchMessagingDUnitTest.java      |  21 +++++--
 .../cache/OffHeapEvictionDUnitTest.java         |  21 +++++--
 .../cache/OffHeapEvictionStatsDUnitTest.java    |  21 +++++--
 .../gemfire/internal/cache/OffHeapTestUtil.java |  21 +++++--
 .../cache/OfflineSnapshotJUnitTest.java         |  21 +++++--
 .../gemfire/internal/cache/OldVLJUnitTest.java  |  21 +++++--
 .../cache/OldValueImporterTestBase.java         |  16 +++++
 .../cache/OplogEntryIdMapJUnitTest.java         |  21 +++++--
 .../cache/OplogEntryIdSetJUnitTest.java         |  21 +++++--
 .../gemfire/internal/cache/OplogJUnitTest.java  |  21 +++++--
 .../internal/cache/OplogRVVJUnitTest.java       |  21 +++++--
 .../cache/OrderedTombstoneMapJUnitTest.java     |  21 +++++--
 .../cache/P2PDeltaPropagationDUnitTest.java     |  21 +++++--
 .../internal/cache/PRBadToDataDUnitTest.java    |  21 +++++--
 .../cache/PRConcurrentMapOpsJUnitTest.java      |  21 +++++--
 .../cache/PRDataStoreMemoryJUnitTest.java       |  21 +++++--
 .../PRDataStoreMemoryOffHeapJUnitTest.java      |  16 +++++
 .../gemfire/internal/cache/PRTXJUnitTest.java   |  21 +++++--
 .../cache/PartitionAttributesImplJUnitTest.java |  16 +++++
 .../cache/PartitionListenerDUnitTest.java       |  21 +++++--
 ...dRegionAPIConserveSocketsFalseDUnitTest.java |  21 +++++--
 .../cache/PartitionedRegionAPIDUnitTest.java    |  20 ++++--
 .../PartitionedRegionAsSubRegionDUnitTest.java  |  20 ++++--
 ...gionBucketCreationDistributionDUnitTest.java |  20 ++++--
 .../PartitionedRegionCacheCloseDUnitTest.java   |  20 ++++--
 ...rtitionedRegionCacheLoaderForRootRegion.java |  21 +++++--
 ...artitionedRegionCacheLoaderForSubRegion.java |  21 +++++--
 ...rtitionedRegionCacheXMLExampleDUnitTest.java |  21 +++++--
 .../PartitionedRegionCreationDUnitTest.java     |  20 ++++--
 .../PartitionedRegionCreationJUnitTest.java     |  20 ++++--
 .../cache/PartitionedRegionDUnitTestCase.java   |  20 ++++--
 .../PartitionedRegionDataStoreJUnitTest.java    |  20 ++++--
 ...rtitionedRegionDelayedRecoveryDUnitTest.java |  21 +++++--
 .../PartitionedRegionDestroyDUnitTest.java      |  20 ++++--
 .../PartitionedRegionEntryCountDUnitTest.java   |  21 +++++--
 .../PartitionedRegionEvictionDUnitTest.java     |  21 +++++--
 .../cache/PartitionedRegionHADUnitTest.java     |  20 ++++--
 ...onedRegionHAFailureAndRecoveryDUnitTest.java |  20 ++++--
 .../cache/PartitionedRegionHelperJUnitTest.java |  21 +++++--
 .../PartitionedRegionInvalidateDUnitTest.java   |  21 +++++--
 ...artitionedRegionLocalMaxMemoryDUnitTest.java |  20 ++++--
 ...nedRegionLocalMaxMemoryOffHeapDUnitTest.java |  16 +++++
 .../PartitionedRegionMultipleDUnitTest.java     |  20 ++++--
 ...rtitionedRegionOffHeapEvictionDUnitTest.java |  18 +++++-
 .../cache/PartitionedRegionPRIDDUnitTest.java   |  21 +++++--
 .../cache/PartitionedRegionQueryDUnitTest.java  |  21 +++++--
 ...artitionedRegionQueryEvaluatorJUnitTest.java |  21 +++++--
 ...artitionedRegionRedundancyZoneDUnitTest.java |  21 +++++--
 ...tionedRegionSerializableObjectJUnitTest.java |  21 +++++--
 .../PartitionedRegionSingleHopDUnitTest.java    |  21 +++++--
 ...RegionSingleHopWithServerGroupDUnitTest.java |  21 +++++--
 ...onedRegionSingleNodeOperationsJUnitTest.java |  20 ++++--
 .../cache/PartitionedRegionSizeDUnitTest.java   |  20 ++++--
 .../cache/PartitionedRegionStatsDUnitTest.java  |  20 ++++--
 .../cache/PartitionedRegionStatsJUnitTest.java  |  21 +++++--
 .../cache/PartitionedRegionTestHelper.java      |  20 ++++--
 .../PartitionedRegionTestUtilsDUnitTest.java    |  20 ++++--
 .../PartitionedRegionWithSameNameDUnitTest.java |  21 +++++--
 .../PersistentPartitionedRegionJUnitTest.java   |  16 +++++
 .../internal/cache/PutAllDAckDUnitTest.java     |  21 +++++--
 .../internal/cache/PutAllGlobalDUnitTest.java   |  21 +++++--
 .../cache/RegionEntryFlagsJUnitTest.java        |  21 +++++--
 .../cache/RemotePutReplyMessageJUnitTest.java   |  16 +++++
 .../cache/RemoteTransactionCCEDUnitTest.java    |  16 +++++
 .../cache/RemoteTransactionDUnitTest.java       |  21 +++++--
 .../internal/cache/RemoveAllDAckDUnitTest.java  |  21 +++++--
 .../internal/cache/RemoveDAckDUnitTest.java     |  21 +++++--
 .../internal/cache/RemoveGlobalDUnitTest.java   |  21 +++++--
 .../internal/cache/RunCacheInOldGemfire.java    |  21 +++++--
 .../cache/SimpleDiskRegionJUnitTest.java        |  21 +++++--
 .../internal/cache/SizingFlagDUnitTest.java     |  21 +++++--
 .../internal/cache/SnapshotTestUtil.java        |  16 +++++
 .../internal/cache/SystemFailureDUnitTest.java  |  21 +++++--
 .../internal/cache/TXManagerImplJUnitTest.java  |  21 +++++--
 .../cache/TXReservationMgrJUnitTest.java        |  21 +++++--
 .../gemfire/internal/cache/TestDelta.java       |  21 +++++--
 .../internal/cache/TestHelperForHydraTests.java |  16 +++++
 .../internal/cache/TestNonSizerObject.java      |  21 +++++--
 .../internal/cache/TestObjectSizerImpl.java     |  21 +++++--
 .../gemfire/internal/cache/TestUtils.java       |  21 +++++--
 .../cache/TombstoneCreationJUnitTest.java       |  21 +++++--
 .../cache/TransactionsWithDeltaDUnitTest.java   |  21 +++++--
 .../internal/cache/UnitTestValueHolder.java     |  18 +++++-
 .../gemfire/internal/cache/UnzipUtil.java       |  21 +++++--
 .../internal/cache/UpdateVersionJUnitTest.java  |  21 +++++--
 .../gemfire/internal/cache/VLJUnitTest.java     |  21 +++++--
 .../cache/control/FilterByPathJUnitTest.java    |  21 +++++--
 .../cache/control/MemoryMonitorJUnitTest.java   |  21 +++++--
 .../control/MemoryMonitorOffHeapJUnitTest.java  |  21 +++++--
 .../control/MemoryThresholdsJUnitTest.java      |  16 +++++
 .../control/RebalanceOperationDUnitTest.java    |  21 +++++--
 .../control/TestMemoryThresholdListener.java    |  21 +++++--
 ...skRegOverflowAsyncGetInMemPerfJUnitTest.java |  21 +++++--
 ...iskRegOverflowAsyncJUnitPerformanceTest.java |  21 +++++--
 ...lowSyncGetInMemPerfJUnitPerformanceTest.java |  21 +++++--
 ...DiskRegOverflowSyncJUnitPerformanceTest.java |  21 +++++--
 ...egionOverflowAsyncRollingOpLogJUnitTest.java |  21 +++++--
 ...RegionOverflowSyncRollingOpLogJUnitTest.java |  21 +++++--
 .../DiskRegionPerfJUnitPerformanceTest.java     |  21 +++++--
 .../DiskRegionPersistOnlySyncJUnitTest.java     |  21 +++++--
 ...DiskRegionRollOpLogJUnitPerformanceTest.java |  21 +++++--
 ...ltiThreadedOplogPerJUnitPerformanceTest.java |  21 +++++--
 .../cache/execute/Bug51193DUnitTest.java        |  16 +++++
 .../ClientServerFunctionExecutionDUnitTest.java |  21 +++++--
 .../execute/ColocationFailoverDUnitTest.java    |  21 +++++--
 .../cache/execute/CustomResultCollector.java    |  21 +++++--
 .../execute/CustomerIDPartitionResolver.java    |  21 +++++--
 ...ributedRegionFunctionExecutionDUnitTest.java |  21 +++++--
 .../FunctionExecution_ExceptionDUnitTest.java   |  21 +++++--
 .../execute/FunctionServiceStatsDUnitTest.java  |  21 +++++--
 .../cache/execute/LocalDataSetDUnitTest.java    |  21 +++++--
 .../cache/execute/LocalDataSetFunction.java     |  21 +++++--
 .../execute/LocalDataSetIndexingDUnitTest.java  |  21 +++++--
 .../LocalFunctionExecutionDUnitTest.java        |  21 +++++--
 .../MemberFunctionExecutionDUnitTest.java       |  21 +++++--
 .../MultiRegionFunctionExecutionDUnitTest.java  |  21 +++++--
 .../execute/MyFunctionExecutionException.java   |  21 +++++--
 .../cache/execute/MyTransactionFunction.java    |  21 +++++--
 .../OnGroupsFunctionExecutionDUnitTest.java     |  21 +++++--
 ...ntServerFunctionExecutionNoAckDUnitTest.java |  21 +++++--
 ...tServerRegionFunctionExecutionDUnitTest.java |  21 +++++--
 ...egionFunctionExecutionFailoverDUnitTest.java |  21 +++++--
 ...onFunctionExecutionNoSingleHopDUnitTest.java |  21 +++++--
 ...onExecutionSelectorNoSingleHopDUnitTest.java |  21 +++++--
 ...gionFunctionExecutionSingleHopDUnitTest.java |  21 +++++--
 .../cache/execute/PRClientServerTestBase.java   |  21 +++++--
 .../cache/execute/PRColocationDUnitTest.java    |  21 +++++--
 .../execute/PRCustomPartitioningDUnitTest.java  |  21 +++++--
 .../execute/PRFunctionExecutionDUnitTest.java   |  21 +++++--
 .../PRFunctionExecutionTimeOutDUnitTest.java    |  21 +++++--
 ...ctionExecutionWithResultSenderDUnitTest.java |  21 +++++--
 .../execute/PRPerformanceTestDUnitTest.java     |  21 +++++--
 .../cache/execute/PRTransactionDUnitTest.java   |  21 +++++--
 .../PRTransactionWithVersionsDUnitTest.java     |  16 +++++
 .../internal/cache/execute/PerfFunction.java    |  21 +++++--
 .../internal/cache/execute/PerfTxFunction.java  |  21 +++++--
 .../cache/execute/PerformanceTestFunction.java  |  21 +++++--
 .../execute/SingleHopGetAllPutAllDUnitTest.java |  21 +++++--
 .../internal/cache/execute/TestFunction.java    |  21 +++++--
 .../internal/cache/execute/data/CustId.java     |  21 +++++--
 .../internal/cache/execute/data/Customer.java   |  21 +++++--
 .../internal/cache/execute/data/Order.java      |  21 +++++--
 .../internal/cache/execute/data/OrderId.java    |  21 +++++--
 .../internal/cache/execute/data/Shipment.java   |  21 +++++--
 .../internal/cache/execute/data/ShipmentId.java |  21 +++++--
 .../SimpleExtensionPointJUnitTest.java          |  21 +++++--
 .../extension/mock/AbstractMockExtension.java   |  21 +++++--
 .../mock/AbstractMockExtensionXmlGenerator.java |  21 +++++--
 .../mock/AlterMockCacheExtensionFunction.java   |  21 +++++--
 .../mock/AlterMockRegionExtensionFunction.java  |  21 +++++--
 .../mock/CreateMockCacheExtensionFunction.java  |  21 +++++--
 .../mock/CreateMockRegionExtensionFunction.java |  21 +++++--
 .../mock/DestroyMockCacheExtensionFunction.java |  21 +++++--
 .../DestroyMockRegionExtensionFunction.java     |  21 +++++--
 .../extension/mock/MockCacheExtension.java      |  21 +++++--
 .../mock/MockCacheExtensionXmlGenerator.java    |  21 +++++--
 .../extension/mock/MockExtensionCommands.java   |  21 +++++--
 .../extension/mock/MockExtensionXmlParser.java  |  21 +++++--
 .../extension/mock/MockRegionExtension.java     |  21 +++++--
 .../mock/MockRegionExtensionXmlGenerator.java   |  21 +++++--
 ...gionFunctionFunctionInvocationException.java |  21 +++++--
 .../functions/DistributedRegionFunction.java    |  21 +++++--
 .../cache/functions/LocalDataSetFunction.java   |  21 +++++--
 .../internal/cache/functions/TestFunction.java  |  21 +++++--
 .../ha/BlockingHARQAddOperationJUnitTest.java   |  21 +++++--
 .../cache/ha/BlockingHARQStatsJUnitTest.java    |  21 +++++--
 .../cache/ha/BlockingHARegionJUnitTest.java     |  21 +++++--
 .../ha/BlockingHARegionQueueJUnitTest.java      |  21 +++++--
 .../cache/ha/Bug36853EventsExpiryDUnitTest.java |  21 +++++--
 .../internal/cache/ha/Bug48571DUnitTest.java    |  21 +++++--
 .../internal/cache/ha/Bug48879DUnitTest.java    |  16 +++++
 .../internal/cache/ha/ConflatableObject.java    |  21 +++++--
 .../cache/ha/EventIdOptimizationDUnitTest.java  |  21 +++++--
 .../cache/ha/EventIdOptimizationJUnitTest.java  |  20 ++++--
 .../internal/cache/ha/FailoverDUnitTest.java    |  21 +++++--
 .../internal/cache/ha/HABugInPutDUnitTest.java  |  21 +++++--
 .../internal/cache/ha/HAClearDUnitTest.java     |  21 +++++--
 .../cache/ha/HAConflationDUnitTest.java         |  21 +++++--
 .../internal/cache/ha/HADuplicateDUnitTest.java |  21 +++++--
 .../cache/ha/HAEventIdPropagationDUnitTest.java |  21 +++++--
 .../internal/cache/ha/HAExpiryDUnitTest.java    |  21 +++++--
 .../internal/cache/ha/HAGIIBugDUnitTest.java    |  21 +++++--
 .../internal/cache/ha/HAGIIDUnitTest.java       |  25 +++++---
 .../gemfire/internal/cache/ha/HAHelper.java     |  21 +++++--
 .../cache/ha/HARQAddOperationJUnitTest.java     |  21 +++++--
 .../cache/ha/HARQueueNewImplDUnitTest.java      |  21 +++++--
 .../internal/cache/ha/HARegionDUnitTest.java    |  21 +++++--
 .../internal/cache/ha/HARegionJUnitTest.java    |  20 ++++--
 .../cache/ha/HARegionQueueDUnitTest.java        |  21 +++++--
 .../cache/ha/HARegionQueueJUnitTest.java        |  20 ++++--
 ...HARegionQueueStartStopJUnitDisabledTest.java |  21 +++++--
 .../ha/HARegionQueueStartStopJUnitTest.java     |  21 +++++--
 .../cache/ha/HARegionQueueStatsJUnitTest.java   |  20 ++++--
 .../cache/ha/HASlowReceiverDUnitTest.java       |  21 +++++--
 .../ha/OperationsPropagationDUnitTest.java      |  21 +++++--
 .../internal/cache/ha/PutAllDUnitTest.java      |  21 +++++--
 .../cache/ha/StatsBugDUnitDisabledTest.java     |  21 +++++--
 .../cache/ha/TestBlockingHARegionQueue.java     |  21 +++++--
 .../cache/ha/ThreadIdentifierJUnitTest.java     |  21 +++++--
 .../cache/locks/TXLockServiceDUnitTest.java     |  21 +++++--
 .../internal/cache/lru/LRUClockJUnitTest.java   |  21 +++++--
 .../cache/partitioned/Bug39356DUnitTest.java    |  21 +++++--
 .../cache/partitioned/Bug43684DUnitTest.java    |  21 +++++--
 .../cache/partitioned/Bug47388DUnitTest.java    |  21 +++++--
 .../cache/partitioned/Bug51400DUnitTest.java    |  21 +++++--
 .../partitioned/ElidedPutAllDUnitTest.java      |  21 +++++--
 .../OfflineMembersDetailsJUnitTest.java         |  21 +++++--
 .../partitioned/PartitionResolverDUnitTest.java |  21 +++++--
 .../PartitionedRegionLoadModelJUnitTest.java    |  21 +++++--
 .../PartitionedRegionLoaderWriterDUnitTest.java |  21 +++++--
 ...rtitionedRegionMetaDataCleanupDUnitTest.java |  21 +++++--
 .../partitioned/PersistPRKRFDUnitTest.java      |  21 +++++--
 ...tentColocatedPartitionedRegionDUnitTest.java |  21 +++++--
 .../PersistentPartitionedRegionDUnitTest.java   |  21 +++++--
 ...tentPartitionedRegionOldConfigDUnitTest.java |  21 +++++--
 .../PersistentPartitionedRegionTestBase.java    |  21 +++++--
 ...rtitionedRegionWithTransactionDUnitTest.java |  21 +++++--
 .../PutPutReplyMessageJUnitTest.java            |  16 +++++
 .../cache/partitioned/ShutdownAllDUnitTest.java |  21 +++++--
 ...treamingPartitionOperationManyDUnitTest.java |  28 +++++----
 ...StreamingPartitionOperationOneDUnitTest.java |  29 +++++----
 .../fixed/CustomerFixedPartitionResolver.java   |  21 +++++--
 .../fixed/FixedPartitioningDUnitTest.java       |  21 +++++--
 .../fixed/FixedPartitioningTestBase.java        |  21 +++++--
 ...ngWithColocationAndPersistenceDUnitTest.java |  21 +++++--
 .../cache/partitioned/fixed/MyDate1.java        |  21 +++++--
 .../cache/partitioned/fixed/MyDate2.java        |  21 +++++--
 .../cache/partitioned/fixed/MyDate3.java        |  21 +++++--
 .../fixed/QuarterPartitionResolver.java         |  21 +++++--
 .../SingleHopQuarterPartitionResolver.java      |  21 +++++--
 .../persistence/BackupInspectorJUnitTest.java   |  21 +++++--
 .../PersistentRVVRecoveryDUnitTest.java         |  21 +++++--
 .../PersistentRecoveryOrderDUnitTest.java       |  21 +++++--
 ...rsistentRecoveryOrderOldConfigDUnitTest.java |  21 +++++--
 .../PersistentReplicatedTestBase.java           |  21 +++++--
 .../TemporaryResultSetFactoryJUnitTest.java     |  21 +++++--
 .../cache/persistence/soplog/AppendLog.java     |  21 +++++--
 .../ArraySerializedComparatorJUnitTest.java     |  21 +++++--
 .../CompactionSortedOplogSetTestCase.java       |  21 +++++--
 .../persistence/soplog/CompactionTestCase.java  |  21 +++++--
 .../persistence/soplog/ComparisonTestCase.java  |  21 +++++--
 .../soplog/IndexComparatorJUnitTest.java        |  21 +++++--
 .../LexicographicalComparatorJUnitTest.java     |  21 +++++--
 .../soplog/RecoverableSortedOplogSet.java       |  21 +++++--
 .../soplog/SizeTieredCompactorJUnitTest.java    |  21 +++++--
 .../SizeTieredSortedOplogSetJUnitTest.java      |  16 +++++
 .../soplog/SortedBufferJUnitTest.java           |  16 +++++
 .../soplog/SortedOplogSetJUnitTest.java         |  21 +++++--
 .../soplog/SortedReaderTestCase.java            |  21 +++++--
 .../nofile/NoFileSortedOplogJUnitTest.java      |  21 +++++--
 .../GFSnapshotJUnitPerformanceTest.java         |  21 +++++--
 .../internal/cache/tier/Bug40396DUnitTest.java  |  21 +++++--
 .../tier/sockets/AcceptorImplJUnitTest.java     |  21 +++++--
 ...mpatibilityHigherVersionClientDUnitTest.java |  21 +++++--
 .../cache/tier/sockets/Bug36269DUnitTest.java   |  21 +++++--
 .../cache/tier/sockets/Bug36457DUnitTest.java   |  21 +++++--
 .../cache/tier/sockets/Bug36805DUnitTest.java   |  21 +++++--
 .../cache/tier/sockets/Bug36829DUnitTest.java   |  21 +++++--
 .../cache/tier/sockets/Bug36995DUnitTest.java   |  21 +++++--
 .../cache/tier/sockets/Bug37210DUnitTest.java   |  21 +++++--
 .../cache/tier/sockets/Bug37805DUnitTest.java   |  21 +++++--
 .../CacheServerMaxConnectionsJUnitTest.java     |  21 +++++--
 ...heServerSelectorMaxConnectionsJUnitTest.java |  16 +++++
 .../cache/tier/sockets/CacheServerTestUtil.java |  21 +++++--
 .../CacheServerTransactionsDUnitTest.java       |  21 +++++--
 ...acheServerTransactionsSelectorDUnitTest.java |  16 +++++
 .../tier/sockets/ClearPropagationDUnitTest.java |  21 +++++--
 .../tier/sockets/ClientConflationDUnitTest.java |  21 +++++--
 .../sockets/ClientHealthMonitorJUnitTest.java   |  21 +++++--
 .../ClientHealthMonitorSelectorJUnitTest.java   |  16 +++++
 .../sockets/ClientInterestNotifyDUnitTest.java  |  21 +++++--
 .../tier/sockets/ClientServerMiscDUnitTest.java |  21 +++++--
 .../ClientServerMiscSelectorDUnitTest.java      |  21 +++++--
 .../cache/tier/sockets/ConflationDUnitTest.java |  21 +++++--
 .../tier/sockets/ConnectionProxyJUnitTest.java  |  21 +++++--
 .../DataSerializerPropogationDUnitTest.java     |  21 +++++--
 .../cache/tier/sockets/DeltaEOFException.java   |  21 +++++--
 .../DestroyEntryPropagationDUnitTest.java       |  21 +++++--
 .../sockets/DurableClientBug39997DUnitTest.java |  21 +++++--
 .../DurableClientQueueSizeDUnitTest.java        |  17 ++++-
 .../DurableClientReconnectAutoDUnitTest.java    |  21 +++++--
 .../DurableClientReconnectDUnitTest.java        |  21 +++++--
 .../sockets/DurableClientStatsDUnitTest.java    |  21 +++++--
 .../sockets/DurableRegistrationDUnitTest.java   |  21 +++++--
 .../sockets/DurableResponseMatrixDUnitTest.java |  21 +++++--
 .../sockets/EventIDVerificationDUnitTest.java   |  21 +++++--
 .../EventIDVerificationInP2PDUnitTest.java      |  20 ++++--
 .../cache/tier/sockets/FaultyDelta.java         |  21 +++++--
 .../tier/sockets/FilterProfileJUnitTest.java    |  21 +++++--
 .../ForceInvalidateEvictionDUnitTest.java       |  21 +++++--
 ...ForceInvalidateOffHeapEvictionDUnitTest.java |  21 +++++--
 .../cache/tier/sockets/HABug36738DUnitTest.java |  21 +++++--
 .../cache/tier/sockets/HAInterestBaseTest.java  |  21 +++++--
 .../sockets/HAInterestDistributedTestCase.java  |  16 +++++
 .../tier/sockets/HAInterestPart1DUnitTest.java  |  21 +++++--
 .../tier/sockets/HAInterestPart2DUnitTest.java  |  21 +++++--
 .../sockets/HAStartupAndFailoverDUnitTest.java  |  21 +++++--
 .../internal/cache/tier/sockets/HaHelper.java   |  16 +++++
 .../tier/sockets/InterestListDUnitTest.java     |  21 +++++--
 .../sockets/InterestListEndpointDUnitTest.java  |  21 +++++--
 .../InterestListEndpointPRDUnitTest.java        |  21 +++++--
 .../InterestListEndpointSelectorDUnitTest.java  |  16 +++++
 .../sockets/InterestListFailoverDUnitTest.java  |  21 +++++--
 .../sockets/InterestListRecoveryDUnitTest.java  |  21 +++++--
 .../sockets/InterestRegrListenerDUnitTest.java  |  21 +++++--
 .../sockets/InterestResultPolicyDUnitTest.java  |  21 +++++--
 .../sockets/NewRegionAttributesDUnitTest.java   |  21 +++++--
 .../tier/sockets/ObjectPartListJUnitTest.java   |  21 +++++--
 .../tier/sockets/RedundancyLevelJUnitTest.java  |  20 ++++--
 .../sockets/RedundancyLevelPart1DUnitTest.java  |  21 +++++--
 .../sockets/RedundancyLevelPart2DUnitTest.java  |  21 +++++--
 .../sockets/RedundancyLevelPart3DUnitTest.java  |  21 +++++--
 .../tier/sockets/RedundancyLevelTestBase.java   |  21 +++++--
 .../tier/sockets/RegionCloseDUnitTest.java      |  21 +++++--
 ...erInterestBeforeRegionCreationDUnitTest.java |  21 +++++--
 .../sockets/RegisterInterestKeysDUnitTest.java  |  21 +++++--
 .../RegisterInterestKeysPRDUnitTest.java        |  21 +++++--
 .../sockets/ReliableMessagingDUnitTest.java     |  21 +++++--
 .../sockets/UnregisterInterestDUnitTest.java    |  21 +++++--
 .../sockets/UpdatePropagationDUnitTest.java     |  21 +++++--
 .../sockets/UpdatePropagationPRDUnitTest.java   |  21 +++++--
 .../VerifyEventIDGenerationInP2PDUnitTest.java  |  20 ++++--
 ...UpdatesFromNonInterestEndPointDUnitTest.java |  21 +++++--
 .../cache/versions/RVVExceptionJUnitTest.java   |  21 +++++--
 .../versions/RegionVersionHolderJUnitTest.java  |  21 +++++--
 .../RegionVersionHolderRandomJUnitTest.java     |  21 +++++--
 ...RegionVersionHolderSmallBitSetJUnitTest.java |  21 +++++--
 .../versions/RegionVersionVectorJUnitTest.java  |  21 +++++--
 .../cache/wan/CompressionConstants.java         |  21 +++++--
 .../cache/wan/CompressionInputStream.java       |  21 +++++--
 .../cache/wan/CompressionOutputStream.java      |  21 +++++--
 .../cache/wan/CustomAsyncEventListener.java     |  21 +++++--
 .../gemfire/internal/cache/wan/Filter70.java    |  21 +++++--
 .../cache/wan/MyAsyncEventListener.java         |  21 +++++--
 .../cache/wan/MyAsyncEventListener2.java        |  21 +++++--
 .../cache/wan/MyDistributedSystemListener.java  |  21 +++++--
 .../cache/wan/MyGatewaySenderEventListener.java |  21 +++++--
 .../wan/MyGatewaySenderEventListener2.java      |  21 +++++--
 .../cache/wan/MyGatewayTransportFilter1.java    |  21 +++++--
 .../cache/wan/MyGatewayTransportFilter2.java    |  21 +++++--
 .../cache/wan/MyGatewayTransportFilter3.java    |  21 +++++--
 .../cache/wan/MyGatewayTransportFilter4.java    |  21 +++++--
 .../internal/cache/wan/QueueListener.java       |  21 +++++--
 .../AsyncEventQueueValidationsJUnitTest.java    |  21 +++++--
 .../xmlcache/AbstractXmlParserJUnitTest.java    |  21 +++++--
 .../cache/xmlcache/CacheXmlParserJUnitTest.java |  21 +++++--
 .../xmlcache/CacheXmlVersionJUnitTest.java      |  17 ++++-
 .../PivotalEntityResolverJUnitTest.java         |  21 +++++--
 .../cache/xmlcache/RegionCreationJUnitTest.java |  21 +++++--
 .../xmlcache/XmlGeneratorUtilsJUnitTest.java    |  21 +++++--
 .../classpathloaderjunittest/DoesExist.java     |  16 +++++
 .../CompressionCacheConfigDUnitTest.java        |  21 +++++--
 .../CompressionCacheListenerDUnitTest.java      |  21 +++++--
 ...ompressionCacheListenerOffHeapDUnitTest.java |  16 +++++
 .../CompressionRegionConfigDUnitTest.java       |  21 +++++--
 .../CompressionRegionFactoryDUnitTest.java      |  21 +++++--
 .../CompressionRegionOperationsDUnitTest.java   |  21 +++++--
 ...ressionRegionOperationsOffHeapDUnitTest.java |  16 +++++
 .../compression/CompressionStatsDUnitTest.java  |  21 +++++--
 .../compression/SnappyCompressorJUnitTest.java  |  23 ++++---
 .../datasource/AbstractPoolCacheJUnitTest.java  |  21 +++++--
 .../internal/datasource/CleanUpJUnitTest.java   |  21 +++++--
 .../ConnectionPoolCacheImplJUnitTest.java       |  21 +++++--
 .../datasource/ConnectionPoolingJUnitTest.java  |  21 +++++--
 .../datasource/DataSourceFactoryJUnitTest.java  |  21 +++++--
 .../internal/datasource/RestartJUnitTest.java   |  21 +++++--
 .../internal/i18n/BasicI18nJUnitTest.java       |  21 +++++--
 .../io/CompositeOutputStreamJUnitTest.java      |  21 +++++--
 .../gemfire/internal/jndi/ContextJUnitTest.java |  21 +++++--
 .../internal/jta/BlockingTimeOutJUnitTest.java  |  21 +++++--
 .../gemfire/internal/jta/CacheUtils.java        |  21 +++++--
 .../internal/jta/DataSourceJTAJUnitTest.java    |  21 +++++--
 .../internal/jta/ExceptionJUnitTest.java        |  21 +++++--
 .../jta/GlobalTransactionJUnitTest.java         |  21 +++++--
 .../gemstone/gemfire/internal/jta/JTAUtils.java |  21 +++++--
 .../internal/jta/JtaIntegrationJUnitTest.java   |  16 +++++
 .../gemstone/gemfire/internal/jta/SyncImpl.java |  21 +++++--
 .../internal/jta/TransactionImplJUnitTest.java  |  21 +++++--
 .../jta/TransactionManagerImplJUnitTest.java    |  21 +++++--
 .../jta/TransactionTimeOutJUnitTest.java        |  21 +++++--
 .../jta/UserTransactionImplJUnitTest.java       |  21 +++++--
 .../internal/jta/dunit/CommitThread.java        |  21 +++++--
 .../internal/jta/dunit/ExceptionsDUnitTest.java |  21 +++++--
 .../jta/dunit/IdleTimeOutDUnitTest.java         |  21 +++++--
 .../jta/dunit/LoginTimeOutDUnitTest.java        |  21 +++++--
 .../jta/dunit/MaxPoolSizeDUnitTest.java         |  21 +++++--
 .../internal/jta/dunit/RollbackThread.java      |  21 +++++--
 .../jta/dunit/TransactionTimeOutDUnitTest.java  |  21 +++++--
 .../dunit/TxnManagerMultiThreadDUnitTest.java   |  21 +++++--
 .../internal/jta/dunit/TxnTimeOutDUnitTest.java |  21 +++++--
 .../internal/jta/functional/CacheJUnitTest.java |  21 +++++--
 .../jta/functional/TestXACacheLoader.java       |  21 +++++--
 .../internal/lang/ClassUtilsJUnitTest.java      |  21 ++++---
 .../internal/lang/InOutParameterJUnitTest.java  |  21 ++++---
 .../internal/lang/InitializerJUnitTest.java     |  21 +++++--
 .../internal/lang/ObjectUtilsJUnitTest.java     |  21 ++++---
 .../internal/lang/StringUtilsJUnitTest.java     |  21 +++++--
 .../internal/lang/SystemUtilsJUnitTest.java     |  21 ++++---
 .../internal/lang/ThreadUtilsJUnitTest.java     |  21 ++++---
 .../DistributedSystemLogFileJUnitTest.java      |  16 +++++
 .../logging/LocatorLogFileJUnitTest.java        |  16 +++++
 .../logging/LogServiceIntegrationJUnitTest.java |  16 +++++
 .../LogServiceIntegrationTestSupport.java       |  16 +++++
 .../internal/logging/LogServiceJUnitTest.java   |  16 +++++
 .../LogWriterDisabledPerformanceTest.java       |  16 +++++
 .../logging/LogWriterImplJUnitTest.java         |  16 +++++
 .../logging/LogWriterPerformanceTest.java       |  16 +++++
 .../logging/LoggingIntegrationTestSuite.java    |  16 +++++
 .../logging/LoggingPerformanceTestCase.java     |  16 +++++
 .../internal/logging/LoggingUnitTestSuite.java  |  16 +++++
 .../logging/MergeLogFilesJUnitTest.java         |  21 +++++--
 .../gemfire/internal/logging/NullLogWriter.java |  16 +++++
 .../internal/logging/SortLogFileJUnitTest.java  |  21 +++++--
 .../internal/logging/TestLogWriterFactory.java  |  16 +++++
 .../logging/log4j/AlertAppenderJUnitTest.java   |  16 +++++
 .../logging/log4j/ConfigLocatorJUnitTest.java   |  16 +++++
 .../log4j/FastLoggerIntegrationJUnitTest.java   |  16 +++++
 .../logging/log4j/FastLoggerJUnitTest.java      |  16 +++++
 .../FastLoggerWithDefaultConfigJUnitTest.java   |  16 +++++
 .../log4j/LocalizedMessageJUnitTest.java        |  16 +++++
 .../log4j/Log4J2DisabledPerformanceTest.java    |  16 +++++
 .../logging/log4j/Log4J2PerformanceTest.java    |  16 +++++
 .../log4j/Log4jIntegrationTestSuite.java        |  16 +++++
 .../logging/log4j/Log4jUnitTestSuite.java       |  16 +++++
 .../log4j/LogWriterAppenderJUnitTest.java       |  16 +++++
 .../LogWriterLoggerDisabledPerformanceTest.java |  16 +++++
 .../log4j/LogWriterLoggerPerformanceTest.java   |  16 +++++
 .../internal/net/SocketUtilsJUnitTest.java      |  21 ++++---
 .../offheap/ByteArrayMemoryChunkJUnitTest.java  |  16 +++++
 .../offheap/ConcurrentBagJUnitTest.java         |  16 +++++
 .../internal/offheap/DataTypeJUnitTest.java     |  16 +++++
 .../DirectByteBufferMemoryChunkJUnitTest.java   |  16 +++++
 .../offheap/FreeListOffHeapRegionJUnitTest.java |  16 +++++
 .../HeapByteBufferMemoryChunkJUnitTest.java     |  16 +++++
 .../internal/offheap/InlineKeyJUnitTest.java    |  16 +++++
 .../offheap/MemoryChunkJUnitTestBase.java       |  16 +++++
 .../offheap/NullOffHeapMemoryStats.java         |  16 +++++
 .../offheap/NullOutOfOffHeapMemoryListener.java |  16 +++++
 .../internal/offheap/OffHeapIndexJUnitTest.java |  16 +++++
 .../internal/offheap/OffHeapRegionBase.java     |  16 +++++
 .../offheap/OffHeapStorageJUnitTest.java        |  16 +++++
 .../offheap/OffHeapValidationJUnitTest.java     |  16 +++++
 .../OffHeapWriteObjectAsByteArrayJUnitTest.java |  16 +++++
 .../OldFreeListOffHeapRegionJUnitTest.java      |  16 +++++
 .../offheap/OutOfOffHeapMemoryDUnitTest.java    |  16 +++++
 ...mpleMemoryAllocatorFillPatternJUnitTest.java |  16 +++++
 .../offheap/SimpleMemoryAllocatorJUnitTest.java |  16 +++++
 ...moryAllocatorLifecycleListenerJUnitTest.java |  16 +++++
 .../TxReleasesOffHeapOnCloseJUnitTest.java      |  16 +++++
 .../offheap/UnsafeMemoryChunkJUnitTest.java     |  16 +++++
 .../BlockingProcessStreamReaderJUnitTest.java   |  16 +++++
 .../LocalProcessControllerJUnitTest.java        |  21 +++++--
 .../process/LocalProcessLauncherDUnitTest.java  |  21 +++++--
 .../process/LocalProcessLauncherJUnitTest.java  |  21 +++++--
 ...NonBlockingProcessStreamReaderJUnitTest.java |  16 +++++
 .../internal/process/PidFileJUnitTest.java      |  16 +++++
 .../ProcessControllerFactoryJUnitTest.java      |  16 +++++
 .../process/ProcessStreamReaderTestCase.java    |  16 +++++
 .../gemfire/internal/process/mbean/Process.java |  16 +++++
 .../internal/process/mbean/ProcessMBean.java    |  16 +++++
 ...tractSignalNotificationHandlerJUnitTest.java |  21 ++++---
 .../internal/size/ObjectSizerJUnitTest.java     |  21 +++++--
 .../internal/size/ObjectTraverserJUnitTest.java |  21 +++++--
 .../internal/size/ObjectTraverserPerf.java      |  21 +++++--
 .../size/SizeClassOnceObjectSizerJUnitTest.java |  21 +++++--
 .../gemfire/internal/size/SizeTestUtil.java     |  16 +++++
 .../size/WellKnownClassSizerJUnitTest.java      |  21 +++++--
 .../internal/statistics/DummyStatistics.java    |  21 +++++--
 .../statistics/SampleCollectorJUnitTest.java    |  21 +++++--
 .../statistics/StatMonitorHandlerJUnitTest.java |  21 +++++--
 .../statistics/StatisticsDUnitTest.java         |  21 +++++--
 .../statistics/StatisticsMonitorJUnitTest.java  |  21 +++++--
 .../internal/statistics/TestSampleHandler.java  |  21 +++++--
 .../statistics/TestStatArchiveWriter.java       |  21 +++++--
 .../statistics/TestStatisticsManager.java       |  21 +++++--
 .../statistics/TestStatisticsSampler.java       |  21 +++++--
 .../statistics/ValueMonitorJUnitTest.java       |  21 +++++--
 .../internal/stats50/AtomicStatsJUnitTest.java  |  21 +++++--
 .../util/AbortableTaskServiceJUnitTest.java     |  21 +++++--
 .../internal/util/ArrayUtilsJUnitTest.java      |  18 ++++--
 .../gemfire/internal/util/BytesJUnitTest.java   |  21 +++++--
 .../internal/util/CollectionUtilsJUnitTest.java |  18 ++++--
 .../internal/util/DelayedActionJUnitTest.java   |  21 +++++--
 .../gemfire/internal/util/IOUtilsJUnitTest.java |  21 +++++--
 .../gemfire/internal/util/SerializableImpl.java |  21 +++++--
 .../util/SerializableImplWithValue.java         |  21 +++++--
 .../gemfire/internal/util/Valuable.java         |  21 +++++--
 .../CompactConcurrentHashSetJUnitTest.java      |  16 +++++
 .../ConcurrentHashMapIteratorJUnitTest.java     |  21 +++++--
 .../concurrent/ReentrantSemaphoreJUnitTest.java |  21 +++++--
 .../SemaphoreReadWriteLockJUnitTest.java        |  21 +++++--
 .../cm/ConcurrentHashMapJUnitTest.java          |  21 +++++--
 .../concurrent/cm/CountedMapLoopsJUnitTest.java |  21 +++++--
 .../concurrent/cm/IntMapCheckJUnitTest.java     |  21 +++++--
 .../util/concurrent/cm/LoopHelpers.java         |  21 +++++--
 .../util/concurrent/cm/MapCheckJUnitTest.java   |  21 +++++--
 .../util/concurrent/cm/MapLoopsJUnitTest.java   |  21 +++++--
 .../util/concurrent/cm/RLJBarJUnitTest.java     |  21 +++++--
 .../concurrent/cm/StringMapLoopsJUnitTest.java  |  21 +++++--
 .../management/CacheManagementDUnitTest.java    |  22 ++++---
 .../management/ClientHealthStatsDUnitTest.java  |  20 ++++--
 .../gemfire/management/CompositeStats.java      |  21 +++++--
 .../gemfire/management/CompositeTestMBean.java  |  21 +++++--
 .../gemfire/management/CompositeTestMXBean.java |  16 +++++
 .../management/CompositeTypeTestDUnitTest.java  |  21 +++++--
 .../gemfire/management/CustomMBean.java         |  22 ++++---
 .../gemfire/management/CustomMXBean.java        |  20 ++++--
 .../management/DLockManagementDUnitTest.java    |  20 ++++--
 .../DataBrowserJSONValidationJUnitTest.java     |  21 +++++--
 .../management/DiskManagementDUnitTest.java     |  20 ++++--
 .../management/DistributedSystemDUnitTest.java  |  20 ++++--
 .../management/LocatorManagementDUnitTest.java  |  21 +++++--
 .../gemstone/gemfire/management/MBeanUtil.java  |  20 ++++--
 .../gemfire/management/ManagementTestBase.java  |  21 +++++--
 .../MemberMBeanAttributesDUnitTest.java         |  20 ++++--
 .../management/OffHeapManagementDUnitTest.java  |  16 +++++
 .../gemfire/management/QueryDataDUnitTest.java  |  21 +++++--
 .../management/RegionManagementDUnitTest.java   |  20 ++++--
 .../gemfire/management/TypedJsonJUnitTest.java  |  21 +++++--
 ...ersalMembershipListenerAdapterDUnitTest.java |  21 +++++--
 .../stats/AsyncEventQueueStatsJUnitTest.java    |  21 +++++--
 .../bean/stats/CacheServerStatsJUnitTest.java   |  21 +++++--
 .../bean/stats/DiskStatsJUnitTest.java          |  21 +++++--
 .../stats/DistributedSystemStatsDUnitTest.java  |  21 +++++--
 .../stats/DistributedSystemStatsJUnitTest.java  |  21 +++++--
 .../stats/GatewayReceiverStatsJUnitTest.java    |  21 +++++--
 .../bean/stats/GatewaySenderStatsJUnitTest.java |  21 +++++--
 .../bean/stats/MBeanStatsTestCase.java          |  21 +++++--
 .../bean/stats/MemberLevelStatsJUnitTest.java   |  21 +++++--
 .../bean/stats/RegionStatsJUnitTest.java        |  21 +++++--
 .../bean/stats/StatsRateJUnitTest.java          |  21 +++++--
 .../internal/JettyHelperJUnitTest.java          |  21 +++++--
 .../cli/ClasspathScanLoadHelperJUnitTest.java   |  21 +++++--
 .../internal/cli/CliUtilDUnitTest.java          |  21 +++++--
 .../internal/cli/CommandManagerJUnitTest.java   |  21 +++++--
 .../cli/CommandSeparatorEscapeJUnitTest.java    |  16 +++++
 .../internal/cli/DataCommandJsonJUnitTest.java  |  16 +++++
 .../internal/cli/GfshParserJUnitTest.java       |  21 +++++--
 .../cli/annotations/CliArgumentJUnitTest.java   |  21 +++++--
 .../AbstractCommandsSupportJUnitTest.java       |  21 ++++---
 .../commands/DiskStoreCommandsJUnitTest.java    |  21 ++++---
 .../HTTPServiceSSLSupportJUnitTest.java         |  20 ++++--
 .../cli/commands/IndexCommandsJUnitTest.java    |  21 ++++---
 .../RegionPathConverterJUnitTest.java           |  21 +++++--
 .../internal/cli/domain/AbstractImpl.java       |  16 +++++
 .../management/internal/cli/domain/Impl1.java   |  16 +++++
 .../management/internal/cli/domain/Impl12.java  |  16 +++++
 .../internal/cli/domain/Interface1.java         |  16 +++++
 .../internal/cli/domain/Interface2.java         |  16 +++++
 .../management/internal/cli/domain/Stock.java   |  21 +++++--
 .../management/internal/cli/dto/Car.java        |  16 +++++
 .../management/internal/cli/dto/Key1.java       |  21 +++++--
 .../management/internal/cli/dto/Key2.java       |  21 +++++--
 .../internal/cli/dto/ObjectWithCharAttr.java    |  21 +++++--
 .../management/internal/cli/dto/Value1.java     |  21 +++++--
 .../management/internal/cli/dto/Value2.java     |  21 +++++--
 .../DescribeDiskStoreFunctionJUnitTest.java     |  21 +++++--
 .../ListDiskStoresFunctionJUnitTest.java        |  21 +++++--
 .../functions/ListIndexFunctionJUnitTest.java   |  21 ++++---
 .../cli/parser/ParserUtilsJUnitTest.java        |  21 +++++--
 .../preprocessor/PreprocessorJUnitTest.java     |  21 +++++--
 .../PreprocessorUtilsJUnitTest.java             |  21 +++++--
 .../cli/shell/GfshConfigInitFileJUnitTest.java  |  16 +++++
 .../shell/GfshExecutionStrategyJUnitTest.java   |  21 +++++--
 .../cli/shell/GfshInitFileJUnitTest.java        |  16 +++++
 .../SharedConfigurationDUnitTest.java           |  21 +++++--
 .../configuration/ZipUtilsJUnitTest.java        |  21 +++++--
 .../domain/CacheElementJUnitTest.java           |  21 +++++--
 .../utils/XmlUtilsAddNewNodeJUnitTest.java      |  21 +++++--
 .../configuration/utils/XmlUtilsJUnitTest.java  |  21 +++++--
 .../internal/pulse/TestClientIdsDUnitTest.java  |  22 ++++---
 .../internal/pulse/TestFunctionsDUnitTest.java  |  22 ++++---
 .../internal/pulse/TestHeapDUnitTest.java       |  23 ++++---
 .../internal/pulse/TestLocatorsDUnitTest.java   |  22 ++++---
 .../pulse/TestSubscriptionsDUnitTest.java       |  20 ++++--
 .../internal/security/JSONAuthCodeTest.java     |  16 +++++
 .../security/JSONAuthorizationTest.java         |  16 +++++
 .../security/ResourceOperationJUnit.java        |  16 +++++
 .../ReadOpFileAccessControllerJUnitTest.java    |  21 +++++--
 .../WanCommandsControllerJUnitTest.java         |  16 +++++
 .../gemfire/management/model/EmptyObject.java   |  21 +++++--
 .../gemstone/gemfire/management/model/Item.java |  21 +++++--
 .../gemfire/management/model/Order.java         |  21 +++++--
 .../gemfire/management/model/SubOrder.java      |  21 +++++--
 .../DomainObjectsAsValuesJUnitTest.java         |  21 +++++--
 .../GemcachedBinaryClientJUnitTest.java         |  21 +++++--
 .../GemcachedDevelopmentJUnitTest.java          |  21 +++++--
 .../gemfire/memcached/IntegrationJUnitTest.java |  21 +++++--
 .../gemfire/pdx/AutoSerializableJUnitTest.java  |  21 +++++--
 .../gemfire/pdx/ByteSourceJUnitTest.java        |  16 +++++
 .../ClientsWithVersioningRetryDUnitTest.java    |  21 +++++--
 .../com/gemstone/gemfire/pdx/DSInsidePdx.java   |  23 ++++---
 .../pdx/DistributedSystemIdDUnitTest.java       |  21 +++++--
 .../com/gemstone/gemfire/pdx/DomainObject.java  |  21 +++++--
 .../gemstone/gemfire/pdx/DomainObjectBad.java   |  16 +++++
 .../gemfire/pdx/DomainObjectClassLoadable.java  |  16 +++++
 .../gemfire/pdx/DomainObjectPdxAuto.java        |  21 +++++--
 ...DomainObjectPdxAutoNoDefaultConstructor.java |  21 +++++--
 .../java/com/gemstone/gemfire/pdx/Employee.java |  23 ++++---
 .../pdx/JSONPdxClientServerDUnitTest.java       |  23 ++++---
 .../com/gemstone/gemfire/pdx/NestedPdx.java     |  21 +++++--
 .../gemfire/pdx/NonDelegatingLoader.java        |  23 ++++---
 .../OffHeapByteBufferByteSourceJUnitTest.java   |  16 +++++
 .../gemfire/pdx/OffHeapByteSourceJUnitTest.java |  16 +++++
 .../pdx/PDXAsyncEventQueueDUnitTest.java        |  22 ++++---
 .../gemfire/pdx/PdxAttributesJUnitTest.java     |  21 +++++--
 .../gemfire/pdx/PdxClientServerDUnitTest.java   |  21 +++++--
 .../pdx/PdxDeserializationDUnitTest.java        |  21 +++++--
 .../pdx/PdxFormatterPutGetJUnitTest.java        |  21 +++++--
 .../com/gemstone/gemfire/pdx/PdxInsideDS.java   |  23 ++++---
 .../pdx/PdxInstanceFactoryJUnitTest.java        |  21 +++++--
 .../gemfire/pdx/PdxInstanceJUnitTest.java       |  21 +++++--
 .../gemfire/pdx/PdxSerializableDUnitTest.java   |  21 +++++--
 .../gemfire/pdx/PdxSerializableJUnitTest.java   |  21 +++++--
 .../gemfire/pdx/PdxStringJUnitTest.java         |  21 +++++--
 .../gemfire/pdx/PdxTypeExportDUnitTest.java     |  21 +++++--
 .../gemfire/pdx/SeparateClassloaderPdx.java     |  23 ++++---
 .../com/gemstone/gemfire/pdx/SimpleClass.java   |  23 ++++---
 .../com/gemstone/gemfire/pdx/SimpleClass1.java  |  21 +++++--
 .../com/gemstone/gemfire/pdx/SimpleClass2.java  |  21 +++++--
 .../gemfire/pdx/TestObjectForPdxFormatter.java  |  21 +++++--
 .../gemfire/pdx/VersionClassLoader.java         |  22 +++++--
 .../gemstone/gemfire/redis/AuthJUnitTest.java   |  18 +++++-
 .../gemfire/redis/ConcurrentStartTest.java      |  16 +++++
 .../gemstone/gemfire/redis/HashesJUnitTest.java |  16 +++++
 .../gemstone/gemfire/redis/ListsJUnitTest.java  |  16 +++++
 .../gemfire/redis/RedisDistDUnitTest.java       |  16 +++++
 .../gemstone/gemfire/redis/SetsJUnitTest.java   |  16 +++++
 .../gemfire/redis/SortedSetsJUnitTest.java      |  16 +++++
 .../gemfire/redis/StringsJunitTest.java         |  16 +++++
 .../web/controllers/AddFreeItemToOrders.java    |  21 +++++--
 .../rest/internal/web/controllers/Customer.java |  21 +++++--
 .../internal/web/controllers/DateTimeUtils.java |  16 +++++
 .../rest/internal/web/controllers/Gender.java   |  16 +++++
 .../internal/web/controllers/GetAllEntries.java |  21 +++++--
 .../web/controllers/GetDeliveredOrders.java     |  21 +++++--
 .../internal/web/controllers/GetRegions.java    |  21 +++++--
 .../web/controllers/GetValueForKey.java         |  21 +++++--
 .../rest/internal/web/controllers/Item.java     |  21 +++++--
 .../rest/internal/web/controllers/Order.java    |  21 +++++--
 .../rest/internal/web/controllers/Person.java   |  21 +++++--
 .../web/controllers/PutKeyFunction.java         |  21 +++++--
 .../web/controllers/RestAPITestBase.java        |  16 +++++
 .../internal/web/controllers/RestTestUtils.java |  21 +++++--
 .../gemfire/test/golden/ExecutableProcess.java  |  16 +++++
 .../gemfire/test/golden/FailOutputTestCase.java |  16 +++++
 .../golden/FailWithErrorInOutputJUnitTest.java  |  16 +++++
 .../FailWithExtraLineInOutputJUnitTest.java     |  16 +++++
 ...WithLineMissingFromEndOfOutputJUnitTest.java |  16 +++++
 ...hLineMissingFromMiddleOfOutputJUnitTest.java |  16 +++++
 .../FailWithLoggerErrorInOutputJUnitTest.java   |  16 +++++
 .../FailWithLoggerFatalInOutputJUnitTest.java   |  16 +++++
 .../FailWithLoggerWarnInOutputJUnitTest.java    |  16 +++++
 .../golden/FailWithProblemInOutputTestCase.java |  16 +++++
 .../golden/FailWithSevereInOutputJUnitTest.java |  16 +++++
 ...hTimeoutOfWaitForOutputToMatchJUnitTest.java |  16 +++++
 .../FailWithWarningInOutputJUnitTest.java       |  16 +++++
 .../gemfire/test/golden/GoldenComparator.java   |  16 +++++
 .../test/golden/GoldenStringComparator.java     |  16 +++++
 .../gemfire/test/golden/GoldenTestCase.java     |  16 +++++
 .../golden/GoldenTestFrameworkTestSuite.java    |  16 +++++
 .../gemfire/test/golden/PassJUnitTest.java      |  16 +++++
 .../golden/PassWithExpectedErrorJUnitTest.java  |  16 +++++
 .../golden/PassWithExpectedProblemTestCase.java |  16 +++++
 .../golden/PassWithExpectedSevereJUnitTest.java |  16 +++++
 .../PassWithExpectedWarningJUnitTest.java       |  16 +++++
 .../test/golden/RegexGoldenComparator.java      |  16 +++++
 .../test/golden/StringGoldenComparator.java     |  16 +++++
 .../gemfire/test/process/MainLauncher.java      |  16 +++++
 .../test/process/MainLauncherJUnitTest.java     |  16 +++++
 .../gemfire/test/process/OutputFormatter.java   |  16 +++++
 .../test/process/ProcessOutputReader.java       |  16 +++++
 .../test/process/ProcessStreamReader.java       |  16 +++++
 .../process/ProcessTestFrameworkTestSuite.java  |  16 +++++
 .../gemfire/test/process/ProcessWrapper.java    |  16 +++++
 .../test/process/ProcessWrapperJUnitTest.java   |  16 +++++
 .../gemstone/gemfire/util/JSR166TestCase.java   |  22 +++++--
 .../gemstone/gemfire/util/test/TestUtil.java    |  16 +++++
 .../protocols/CacheTimeSlowDownDUnitTest.java   |  17 ++++-
 .../GemFireTimeSyncProtocolDUnitTest.java       |  17 ++++-
 .../JGroupsFailureDetectionJUnitTest.java       |  16 +++++
 .../protocols/JGroupsVersioningJUnitTest.java   |  21 +++++--
 .../com/gemstone/persistence/admin/Logger.java  |  21 +++++--
 .../gemstone/persistence/logging/Formatter.java |  21 +++++--
 .../gemstone/persistence/logging/Handler.java   |  21 +++++--
 .../com/gemstone/persistence/logging/Level.java |  21 +++++--
 .../gemstone/persistence/logging/LogRecord.java |  21 +++++--
 .../gemstone/persistence/logging/Logger.java    |  21 +++++--
 .../persistence/logging/SimpleFormatter.java    |  21 +++++--
 .../persistence/logging/StreamHandler.java      |  21 +++++--
 .../test/java/com/gemstone/sequence/Arrow.java  |  21 +++++--
 .../java/com/gemstone/sequence/Lifeline.java    |  21 +++++--
 .../com/gemstone/sequence/LifelineState.java    |  21 +++++--
 .../java/com/gemstone/sequence/LineMapper.java  |  21 +++++--
 .../com/gemstone/sequence/SequenceDiagram.java  |  21 +++++--
 .../com/gemstone/sequence/SequencePanel.java    |  21 +++++--
 .../com/gemstone/sequence/StateColorMap.java    |  21 +++++--
 .../java/com/gemstone/sequence/TimeAxis.java    |  21 +++++--
 .../com/gemstone/sequence/ZoomingPanel.java     |  21 +++++--
 .../sequence/gemfire/DefaultLineMapper.java     |  21 +++++--
 .../gemfire/GemfireSequenceDisplay.java         |  21 +++++--
 .../sequence/gemfire/HydraLineMapper.java       |  21 +++++--
 .../sequence/gemfire/SelectGraphDialog.java     |  21 +++++--
 .../com/main/MyDistributedSystemListener.java   |  21 +++++--
 .../com/main/WANBootStrapping_Site1_Add.java    |  21 +++++--
 .../com/main/WANBootStrapping_Site1_Remove.java |  21 +++++--
 .../com/main/WANBootStrapping_Site2_Add.java    |  21 +++++--
 .../com/main/WANBootStrapping_Site2_Remove.java |  21 +++++--
 .../src/test/java/dunit/AsyncInvocation.java    |  21 +++++--
 .../src/test/java/dunit/BounceResult.java       |  16 +++++
 gemfire-core/src/test/java/dunit/DUnitEnv.java  |  21 +++++--
 .../test/java/dunit/DistributedTestCase.java    |  21 +++++--
 gemfire-core/src/test/java/dunit/Host.java      |  21 +++++--
 .../src/test/java/dunit/RMIException.java       |  21 +++++--
 .../src/test/java/dunit/RemoteDUnitVMIF.java    |  16 +++++
 .../src/test/java/dunit/RepeatableRunnable.java |  16 +++++
 .../test/java/dunit/SerializableCallable.java   |  21 +++++--
 .../test/java/dunit/SerializableRunnable.java   |  21 +++++--
 gemfire-core/src/test/java/dunit/VM.java        |  21 +++++--
 .../src/test/java/dunit/standalone/ChildVM.java |  21 +++++--
 .../java/dunit/standalone/DUnitLauncher.java    |  21 +++++--
 .../java/dunit/standalone/ProcessManager.java   |  34 +++++++---
 .../java/dunit/standalone/RemoteDUnitVM.java    |  21 +++++--
 .../dunit/standalone/StandAloneDUnitEnv.java    |  21 +++++--
 .../test/java/dunit/tests/BasicDUnitTest.java   |  21 +++++--
 .../src/test/java/dunit/tests/TestFailure.java  |  21 +++++--
 .../src/test/java/dunit/tests/VMDUnitTest.java  |  21 +++++--
 gemfire-core/src/test/java/hydra/GsRandom.java  |  21 +++++--
 .../test/java/hydra/HydraRuntimeException.java  |  21 +++++--
 gemfire-core/src/test/java/hydra/Log.java       |  21 +++++--
 .../src/test/java/hydra/LogVersionHelper.java   |  21 +++++--
 .../src/test/java/hydra/MethExecutor.java       |  21 +++++--
 .../src/test/java/hydra/MethExecutorResult.java |  21 +++++--
 .../src/test/java/hydra/SchedulingOrder.java    |  21 +++++--
 .../src/test/java/hydra/log/AnyLogWriter.java   |  21 +++++--
 .../java/hydra/log/CircularOutputStream.java    |  21 +++++--
 .../parReg/query/unittest/NewPortfolio.java     |  21 +++++--
 .../java/parReg/query/unittest/Position.java    |  22 ++++---
 .../src/test/java/perffmwk/Formatter.java       |  22 ++++---
 .../templates/security/DummyAuthenticator.java  |  21 +++++--
 .../templates/security/DummyAuthorization.java  |  21 +++++--
 .../security/FunctionSecurityPrmsHolder.java    |  21 +++++--
 .../security/LdapUserAuthenticator.java         |  21 +++++--
 .../java/templates/security/PKCSAuthInit.java   |  21 +++++--
 .../templates/security/PKCSAuthenticator.java   |  21 +++++--
 .../java/templates/security/PKCSPrincipal.java  |  21 +++++--
 .../security/UserPasswordAuthInit.java          |  21 +++++--
 .../templates/security/UsernamePrincipal.java   |  21 +++++--
 .../templates/security/XmlAuthorization.java    |  21 +++++--
 .../templates/security/XmlErrorHandler.java     |  21 +++++--
 .../src/test/java/util/TestException.java       |  21 +++++--
 .../org/jgroups/ShunnedAddressException.java    |  16 +++++
 .../com/gemstone/org/jgroups/SuspectMember.java |  16 ++++-
 .../org/jgroups/debug/JChannelTestHook.java     |  16 +++++
 .../gemstone/org/jgroups/protocols/AUTH.java    |  16 +++++
 .../gemstone/org/jgroups/protocols/FRAG3.java   |  17 ++++-
 .../org/jgroups/spi/GFBasicAdapter.java         |  16 +++++
 .../gemstone/org/jgroups/spi/GFPeerAdapter.java |  16 +++++
 .../org/jgroups/stack/BoundedLinkedHashMap.java |  21 +++++--
 .../org/jgroups/stack/GFBasicAdapterImpl.java   |  16 +++++
 .../org/jgroups/stack/GFPeerAdapterImpl.java    |  16 +++++
 .../org/jgroups/stack/SockCreatorImpl.java      |  16 +++++
 .../org/jgroups/util/ConnectionWatcher.java     |  16 +++++
 .../org/jgroups/util/ExternalStrings.java       |  21 +++++--
 .../gemstone/org/jgroups/util/GFLogWriter.java  |  16 +++++
 .../org/jgroups/util/GFStringIdImpl.java        |  16 +++++
 .../gemstone/org/jgroups/util/SockCreator.java  |  16 +++++
 .../org/jgroups/util/StreamableFixedID.java     |  16 +++++
 .../com/gemstone/org/jgroups/util/StringId.java |  21 +++++--
 .../org/jgroups/util/VersionedStreamable.java   |  16 +++++
 .../gemstone/org/jgroups/JChannelJUnitTest.java |  16 +++++
 .../test/junit/categories/DistributedTest.java  |  16 +++++
 .../categories/DistributedTransactionsTest.java |  16 +++++
 .../test/junit/categories/IntegrationTest.java  |  16 +++++
 .../test/junit/categories/PerformanceTest.java  |  16 +++++
 .../gemfire/test/junit/categories/UnitTest.java |  16 +++++
 .../gemfire/test/junit/categories/WanTest.java  |  16 +++++
 .../test/junit/rules/ExpectedTimeout.java       |  16 +++++
 .../junit/rules/ExpectedTimeoutJUnitTest.java   |  16 +++++
 .../gemfire/cache/util/AutoBalancer.java        |  18 +++++-
 .../cache/util/AutoBalancerJUnitTest.java       |  16 +++++
 .../connector/internal/RegionMetadata.java      |  16 +++++
 .../gemfirefunctions/QueryFunction.java         |  18 +++++-
 .../RetrieveRegionFunction.java                 |  16 +++++
 .../RetrieveRegionMetadataFunction.java         |  16 +++++
 .../StructStreamingResultSender.java            |  16 +++++
 .../gemfire/spark/connector/Employee.java       |  16 +++++
 .../spark/connector/JavaApiIntegrationTest.java |  16 +++++
 .../gemfire/spark/connector/Portfolio.java      |  16 +++++
 .../gemfire/spark/connector/Position.java       |  16 +++++
 .../spark/connector/BasicIntegrationTest.scala  |  16 +++++
 .../RDDJoinRegionIntegrationTest.scala          |  16 +++++
 .../RetrieveRegionIntegrationTest.scala         |  16 +++++
 .../gemfire/spark/connector/package.scala       |  16 +++++
 .../connector/testkit/GemFireCluster.scala      |  16 +++++
 .../spark/connector/testkit/GemFireRunner.scala |  16 +++++
 .../spark/connector/testkit/IOUtils.scala       |  16 +++++
 .../spark/streaming/ManualClockHelper.scala     |  16 +++++
 .../spark/streaming/TestInputDStream.scala      |  16 +++++
 .../javaapi/GemFireJavaDStreamFunctions.java    |  16 +++++
 .../GemFireJavaPairDStreamFunctions.java        |  16 +++++
 .../javaapi/GemFireJavaPairRDDFunctions.java    |  16 +++++
 .../javaapi/GemFireJavaRDDFunctions.java        |  16 +++++
 .../javaapi/GemFireJavaSQLContextFunctions.java |  16 +++++
 .../GemFireJavaSparkContextFunctions.java       |  16 +++++
 .../connector/javaapi/GemFireJavaUtil.java      |  16 +++++
 .../spark/connector/GemFireConnection.scala     |  16 +++++
 .../spark/connector/GemFireConnectionConf.scala |  16 +++++
 .../connector/GemFireConnectionManager.scala    |  16 +++++
 .../connector/GemFireFunctionDeployer.scala     |  16 +++++
 .../connector/GemFireKryoRegistrator.scala      |  16 +++++
 .../connector/GemFirePairRDDFunctions.scala     |  16 +++++
 .../spark/connector/GemFireRDDFunctions.scala   |  16 +++++
 .../connector/GemFireSQLContextFunctions.scala  |  16 +++++
 .../GemFireSparkContextFunctions.scala          |  16 +++++
 .../internal/DefaultGemFireConnection.scala     |  16 +++++
 .../DefaultGemFireConnectionManager.scala       |  16 +++++
 .../connector/internal/LocatorHelper.scala      |  16 +++++
 .../StructStreamingResultCollector.scala        |  16 +++++
 .../connector/internal/oql/QueryParser.scala    |  16 +++++
 .../spark/connector/internal/oql/QueryRDD.scala |  18 +++++-
 .../internal/oql/QueryResultCollector.scala     |  18 +++++-
 .../connector/internal/oql/RDDConverter.scala   |  18 +++++-
 .../connector/internal/oql/RowBuilder.scala     |  16 +++++
 .../connector/internal/oql/SchemaBuilder.scala  |  16 +++++
 .../internal/oql/UndefinedSerializer.scala      |  16 +++++
 .../connector/internal/rdd/GemFireJoinRDD.scala |  16 +++++
 .../internal/rdd/GemFireOuterJoinRDD.scala      |  16 +++++
 .../internal/rdd/GemFireRDDPartition.scala      |  16 +++++
 .../internal/rdd/GemFireRDDPartitioner.scala    |  16 +++++
 .../rdd/GemFireRDDPartitionerImpl.scala         |  16 +++++
 .../internal/rdd/GemFireRDDWriter.scala         |  16 +++++
 .../internal/rdd/GemFireRegionRDD.scala         |  16 +++++
 .../javaapi/GemFireJavaRegionRDD.scala          |  16 +++++
 .../spark/connector/javaapi/JavaAPIHelper.scala |  16 +++++
 .../gemfire/spark/connector/package.scala       |  16 +++++
 .../streaming/GemFireDStreamFunctions.scala     |  18 +++++-
 .../spark/connector/streaming/package.scala     |  16 +++++
 .../gemfire/spark/connector/JavaAPITest.java    |  18 +++++-
 .../connector/GemFireFunctionDeployerTest.scala |  16 +++++
 .../DefaultGemFireConnectionManagerTest.scala   |  16 +++++
 ...tStreamingResultSenderAndCollectorTest.scala |  16 +++++
 .../internal/oql/QueryParserTest.scala          |  18 +++++-
 .../connector/ConnectorImplicitsTest.scala      |  16 +++++
 .../connector/GemFireConnectionConfTest.scala   |  16 +++++
 .../connector/GemFireDStreamFunctionsTest.scala |  16 +++++
 .../connector/GemFireRDDFunctionsTest.scala     |  16 +++++
 .../spark/connector/LocatorHelperTest.scala     |  16 +++++
 .../rdd/GemFireRDDPartitionerTest.scala         |  16 +++++
 .../connector/rdd/GemFireRegionRDDTest.scala    |  16 +++++
 .../basic-demos/src/main/java/demo/Emp.java     |  16 +++++
 .../src/main/java/demo/OQLJavaDemo.java         |  16 +++++
 .../src/main/java/demo/PairRDDSaveJavaDemo.java |  16 +++++
 .../src/main/java/demo/RDDSaveJavaDemo.java     |  16 +++++
 .../src/main/java/demo/RegionToRDDJavaDemo.java |  16 +++++
 .../src/main/scala/demo/NetworkWordCount.scala  |  16 +++++
 .../project/Dependencies.scala                  |  16 +++++
 .../project/GemFireSparkBuild.scala             |  16 +++++
 gemfire-spark-connector/project/Settings.scala  |  16 +++++
 .../web/controllers/AbstractBaseController.java |  21 +++++--
 .../web/controllers/BaseControllerAdvice.java   |  21 +++++--
 .../web/controllers/CommonCrudController.java   |  21 +++++--
 .../controllers/FunctionAccessController.java   |  21 +++++--
 .../web/controllers/PdxBasedCrudController.java |  21 +++++--
 .../web/controllers/QueryAccessController.java  |  21 +++++--
 .../web/controllers/support/JSONTypes.java      |  16 +++++
 .../controllers/support/QueryResultTypes.java   |  21 +++++--
 .../web/controllers/support/RegionData.java     |  21 +++++--
 .../controllers/support/RegionEntryData.java    |  21 +++++--
 .../support/RestServersResultCollector.java     |  16 +++++
 .../web/controllers/support/UpdateOp.java       |  21 +++++--
 .../DataTypeNotSupportedException.java          |  21 +++++--
 .../web/exception/GemfireRestException.java     |  21 +++++--
 .../web/exception/MalformedJsonException.java   |  21 +++++--
 .../web/exception/RegionNotFoundException.java  |  21 +++++--
 .../exception/ResourceNotFoundException.java    |  21 +++++--
 ...stomMappingJackson2HttpMessageConverter.java |  16 +++++
 .../web/swagger/config/RestApiPathProvider.java |  16 +++++
 .../web/swagger/config/SwaggerConfig.java       |  16 +++++
 .../rest/internal/web/util/ArrayUtils.java      |  21 +++++--
 .../rest/internal/web/util/DateTimeUtils.java   |  21 +++++--
 .../internal/web/util/IdentifiableUtils.java    |  21 +++++--
 .../rest/internal/web/util/JSONUtils.java       |  21 +++++--
 .../rest/internal/web/util/JsonWriter.java      |  21 +++++--
 .../rest/internal/web/util/NumberUtils.java     |  21 +++++--
 .../rest/internal/web/util/ValidationUtils.java |  21 +++++--
 .../internal/web/AbstractWebTestCase.java       |  21 +++++--
 .../ShellCommandsControllerJUnitTest.java       |  21 +++++--
 ...entVariablesHandlerInterceptorJUnitTest.java |  21 +++++--
 .../internal/web/domain/LinkIndexJUnitTest.java |  21 +++++--
 .../internal/web/domain/LinkJUnitTest.java      |  21 +++++--
 .../domain/QueryParameterSourceJUnitTest.java   |  21 +++++--
 .../web/http/ClientHttpRequestJUnitTest.java    |  21 +++++--
 ...ableObjectHttpMessageConverterJUnitTest.java |  21 +++++--
 .../RestHttpOperationInvokerJUnitTest.java      |  21 +++++--
 .../SimpleHttpOperationInvokerJUnitTest.java    |  21 +++++--
 .../web/util/ConvertUtilsJUnitTest.java         |  21 +++++--
 .../internal/web/util/UriUtilsJUnitTest.java    |  21 +++++--
 gradle/wrapper/gradle-wrapper.jar               | Bin 51018 -> 53637 bytes
 gradle/wrapper/gradle-wrapper.properties        |   4 +-
 gradlew                                         |  12 ++--
 4946 files changed, 74757 insertions(+), 25301 deletions(-)
----------------------------------------------------------------------



[11/50] [abbrv] incubator-geode git commit: GEODE-429: Remove hdfsStore gfsh commands

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunctionJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunctionJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunctionJUnitTest.java
deleted file mode 100644
index f3c66b0..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DescribeHDFSStoreFunctionJUnitTest.java
+++ /dev/null
@@ -1,364 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-
-package com.gemstone.gemfire.management.internal.cli.functions;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.logging.log4j.Logger;
-import org.jmock.Expectations;
-import org.jmock.Mockery;
-import org.jmock.lib.legacy.ClassImposteriser;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.execute.ResultSender;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.management.internal.cli.util.HDFSStoreNotFoundException;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-/**
- * The DescribeHDFSStoreFunctionJUnitTest test suite class tests the contract
- * and functionality of the DescribeHDFSStoreFunction class. </p>
- * 
- * @author Namrata Thanvi
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder
- * @see com.gemstone.gemfire.management.internal.cli.functions.DescribeHDFSStoreFunction
- * @see org.jmock.Expectations
- * @see org.jmock.Mockery
- * @see org.junit.Assert
- * @see org.junit.Test
- */
-@SuppressWarnings( { "unused" })
-@Category({IntegrationTest.class, HoplogTest.class})
-public class DescribeHDFSStoreFunctionJUnitTest {
-
-  private static final Logger logger = LogService.getLogger();
-
-  private Mockery mockContext;
-
-  @Before
-  public void setup() {
-    mockContext = new Mockery() {
-      {
-        setImposteriser(ClassImposteriser.INSTANCE);
-      }
-    };
-  }
-
-  @After
-  public void tearDown() {
-    mockContext.assertIsSatisfied();
-    mockContext = null;
-  }
-
-  @Test
-  public void testExecute() throws Throwable {
-    final String hdfsStoreName = "mockHdfsStore";
-    final String memberId = "mockMemberId";
-    final String memberName = "mockMemberName";
-
-    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-
-    final HDFSStoreImpl mockHdfsStore = createMockHDFSStore(hdfsStoreName, "hdfs://localhost:9000", "testDir", 1024, 20, .25f,
-        null, 20, 20, null, false, 0, 1024, false, false, true, 20, 20, 10, 100);
-
-    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-    final LogService mockLogService = mockContext.mock(LogService.class, "LogService");
-
-    final TestResultSender testResultSender = new TestResultSender();
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockCache).findHDFSStore(hdfsStoreName);
-        will(returnValue(mockHdfsStore));
-        oneOf(mockMember).getName();
-        will(returnValue(memberName));
-        oneOf(mockFunctionContext).getArguments();
-        will(returnValue(hdfsStoreName));
-        oneOf(mockFunctionContext).getResultSender();
-        will(returnValue(testResultSender));
-      }
-    });
-
-    final DescribeHDFSStoreFunction function = createDescribeHDFSStoreFunction(mockCache, mockMember);
-
-    function.execute(mockFunctionContext);
-
-    final List<?> results = testResultSender.getResults();
-
-    assertNotNull(results);
-    assertEquals(1, results.size());
-
-    final HDFSStoreConfigHolder hdfsStoreDetails = (HDFSStoreConfigHolder)results.get(0);
-
-    assertNotNull(hdfsStoreDetails);
-    assertEquals(hdfsStoreName, hdfsStoreDetails.getName());
-    assertEquals("hdfs://localhost:9000", hdfsStoreDetails.getNameNodeURL());
-    assertEquals("testDir", hdfsStoreDetails.getHomeDir());
-    assertEquals(1024, hdfsStoreDetails.getWriteOnlyFileRolloverSize());
-    assertEquals(20, hdfsStoreDetails.getWriteOnlyFileRolloverInterval());
-    assertFalse(hdfsStoreDetails.getMinorCompaction());
-    assertEquals("0.25", Float.toString(hdfsStoreDetails.getBlockCacheSize()));
-    assertNull(hdfsStoreDetails.getHDFSClientConfigFile());
-    assertTrue(hdfsStoreDetails.getMajorCompaction());
-    assertEquals(20, hdfsStoreDetails.getMajorCompactionInterval());
-    assertEquals(20, hdfsStoreDetails.getMajorCompactionThreads());
-    assertEquals(10, hdfsStoreDetails.getMinorCompactionThreads());
-    assertEquals(100, hdfsStoreDetails.getPurgeInterval());
-
-    assertEquals(20, hdfsStoreDetails.getBatchSize());
-    assertEquals(20, hdfsStoreDetails.getBatchInterval());
-    assertNull(hdfsStoreDetails.getDiskStoreName());
-    assertFalse(hdfsStoreDetails.getSynchronousDiskWrite());
-    assertEquals(0, hdfsStoreDetails.getDispatcherThreads());
-    assertEquals(1024, hdfsStoreDetails.getMaxMemory());
-    assertFalse(hdfsStoreDetails.getBufferPersistent());
-  }
-
-  
-  @Test
-  public void testExecuteOnMemberHavingANonGemFireCache() throws Throwable {
-    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-
-    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final TestResultSender testResultSender = new TestResultSender();
-
-    mockContext.checking(new Expectations() {{
-      exactly(0).of(mockFunctionContext).getResultSender();
-      will(returnValue(testResultSender));
-      
-    }});
-
-    final DescribeHDFSStoreFunction function = createDescribeHDFSStoreFunction(mockCache , mockMember);
-
-    function.execute(mockFunctionContext);
-
-    final List<?> results = testResultSender.getResults();
-
-    assertNotNull(results);
-    assertTrue(results.isEmpty());
-  }
-
-  
-  @Test(expected = HDFSStoreNotFoundException.class)
-  public void testExecuteThrowingResourceNotFoundException() throws Throwable{    
-    final String hdfsStoreName = "testHdfsStore";
-    final String memberId = "mockMemberId";
-    final String memberName = "mockMemberName";
-
-    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
-
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-
-    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-
-    final TestResultSender testResultSender = new TestResultSender();
-
-    mockContext.checking(new Expectations() {{
-      oneOf(mockCache).findHDFSStore(hdfsStoreName);
-      will(returnValue(null));
-      oneOf(mockMember).getName();
-      will(returnValue(memberName));
-      oneOf(mockFunctionContext).getArguments();
-      will(returnValue(hdfsStoreName));
-      oneOf(mockFunctionContext).getResultSender();
-      will(returnValue(testResultSender));
-    }});
-
-    final DescribeHDFSStoreFunction function = createDescribeHDFSStoreFunction(mockCache,mockMember);
-
-    function.execute(mockFunctionContext);
-
-    try {
-      testResultSender.getResults();
-    }
-    catch (HDFSStoreNotFoundException e) {
-      assertEquals(String.format("A hdfs store with name (%1$s) was not found on member (%2$s).",
-        hdfsStoreName, memberName), e.getMessage());
-      throw e;
-    }
-  }
-  
-  
-  @Test(expected = RuntimeException.class)
-  public void testExecuteThrowingRuntimeException() throws Throwable {
-    final String hdfsStoreName = "testHdfsStore";
-    final String memberId = "mockMemberId";
-    final String memberName = "mockMemberName";
-
-    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
-
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-
-    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-
-    final TestResultSender testResultSender = new TestResultSender();
-
-    mockContext.checking(new Expectations() {{
-      oneOf(mockCache).findHDFSStore(hdfsStoreName);
-      will(throwException(new RuntimeException("ExpectedStrings")));
-      oneOf(mockMember).getName();
-      will(returnValue(memberName));
-      oneOf(mockFunctionContext).getArguments();
-      will(returnValue(hdfsStoreName));
-      oneOf(mockFunctionContext).getResultSender();
-      will(returnValue(testResultSender));
-    }});
-
-    final DescribeHDFSStoreFunction function = createDescribeHDFSStoreFunction(mockCache, mockMember);
-
-    function.execute(mockFunctionContext);
-
-    try {
-      testResultSender.getResults();
-    }
-    catch (RuntimeException e) {
-      assertEquals("ExpectedStrings", e.getMessage());
-      throw e;
-    }
-  }
-  
-  
-  protected HDFSStoreImpl createMockHDFSStore(final String storeName, final String namenode, final String homeDir,
-      final int maxFileSize, final int fileRolloverInterval, final float blockCachesize, final String clientConfigFile,
-      final int batchSize, final int batchInterval, final String diskStoreName, final boolean syncDiskwrite,
-      final int dispatcherThreads, final int maxMemory, final boolean bufferPersistent, final boolean minorCompact,
-      final boolean majorCompact, final int majorCompactionInterval, final int majorCompactionThreads,
-      final int minorCompactionThreads, final int purgeInterval) {
-
-    final HDFSStoreImpl mockHdfsStore = mockContext.mock(HDFSStoreImpl.class, storeName);
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockHdfsStore).getMajorCompaction();
-        will(returnValue(majorCompact));
-        oneOf(mockHdfsStore).getMajorCompactionInterval();
-        will(returnValue(majorCompactionInterval));
-        oneOf(mockHdfsStore).getMajorCompactionThreads();
-        will(returnValue(majorCompactionThreads));
-        oneOf(mockHdfsStore).getMinorCompactionThreads();
-        will(returnValue(minorCompactionThreads));
-        oneOf(mockHdfsStore).getPurgeInterval();
-        will(returnValue(purgeInterval));
-        oneOf(mockHdfsStore).getInputFileCountMax();
-        will(returnValue(10));
-        oneOf(mockHdfsStore).getInputFileSizeMax();
-        will(returnValue(1024));
-        oneOf(mockHdfsStore).getInputFileCountMin();
-        will(returnValue(2));
-        oneOf(mockHdfsStore).getBatchSize();
-        will(returnValue(batchSize));
-        oneOf(mockHdfsStore).getBatchInterval();
-        will(returnValue(batchInterval));
-        oneOf(mockHdfsStore).getDiskStoreName();
-        will(returnValue(diskStoreName));
-        oneOf(mockHdfsStore).getSynchronousDiskWrite();
-        will(returnValue(syncDiskwrite));
-        oneOf(mockHdfsStore).getBufferPersistent();
-        will(returnValue(bufferPersistent));
-        oneOf(mockHdfsStore).getDispatcherThreads();
-        will(returnValue(dispatcherThreads));
-        oneOf(mockHdfsStore).getMaxMemory();
-        will(returnValue(maxMemory));
-        oneOf(mockHdfsStore).getName();
-        will(returnValue(storeName));
-        oneOf(mockHdfsStore).getNameNodeURL();
-        will(returnValue(namenode));
-        oneOf(mockHdfsStore).getHomeDir();
-        will(returnValue(homeDir));
-        oneOf(mockHdfsStore).getWriteOnlyFileRolloverSize();
-        will(returnValue(maxFileSize));
-        oneOf(mockHdfsStore).getWriteOnlyFileRolloverInterval();
-        will(returnValue(fileRolloverInterval));
-        oneOf(mockHdfsStore).getMinorCompaction();
-        will(returnValue(minorCompact));
-        oneOf(mockHdfsStore).getBlockCacheSize();
-        will(returnValue(blockCachesize));
-        allowing(mockHdfsStore).getHDFSClientConfigFile();
-        will(returnValue(clientConfigFile));
-      }
-    });
-    return mockHdfsStore;
-  }
-
-  protected TestDescribeHDFSStoreFunction createDescribeHDFSStoreFunction(final Cache cache, DistributedMember member) {
-    return new TestDescribeHDFSStoreFunction(cache, member);
-  }
-
-  protected static class TestDescribeHDFSStoreFunction extends DescribeHDFSStoreFunction {
-    private static final long serialVersionUID = 1L;
-
-    private final Cache cache;
-
-    private final DistributedMember member;
-
-    public TestDescribeHDFSStoreFunction(final Cache cache, DistributedMember member) {
-      this.cache = cache;
-      this.member = member;
-    }
-
-    @Override
-    protected Cache getCache() {
-      return this.cache;
-    }
-
-    @Override
-    protected DistributedMember getDistributedMemberId(Cache cache) {
-      return member;
-    }
-  }
-
-  protected static class TestResultSender implements ResultSender {
-
-    private final List<Object> results = new LinkedList<Object>();
-
-    private Throwable t;
-
-    protected List<Object> getResults() throws Throwable {
-      if (t != null) {
-        throw t;
-      }
-      return Collections.unmodifiableList(results);
-    }
-
-    public void lastResult(final Object lastResult) {
-      results.add(lastResult);
-    }
-
-    public void sendResult(final Object oneResult) {
-      results.add(oneResult);
-    }
-
-    public void sendException(final Throwable t) {
-      this.t = t;
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunctionJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunctionJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunctionJUnitTest.java
deleted file mode 100644
index 08e18ec..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/DestroyHDFSStoreFunctionJUnitTest.java
+++ /dev/null
@@ -1,305 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-
-package com.gemstone.gemfire.management.internal.cli.functions;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.logging.log4j.Logger;
-import org.jmock.Expectations;
-import org.jmock.Mockery;
-import org.jmock.lib.legacy.ClassImposteriser;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheClosedException;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.execute.ResultSender;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-/**
- * The DestroyHDFSStoreFunctionJUnitTest test suite class tests the contract and
- * functionality of the DestroyHDFSStoreFunction class. </p>
- * 
- * @author Namrata Thanvi
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder
- * @see com.gemstone.gemfire.management.internal.cli.functions.DestroyHDFSStoreFunction
- * @see org.jmock.Expectations
- * @see org.jmock.Mockery
- * @see org.junit.Assert
- * @see org.junit.Test
- */
-@SuppressWarnings( { "unused" })
-@Category({IntegrationTest.class, HoplogTest.class})
-public class DestroyHDFSStoreFunctionJUnitTest {
-
-  private static final Logger logger = LogService.getLogger();
-
-  private Mockery mockContext;
-
-  @Before
-  public void setup() {
-    mockContext = new Mockery() {
-      {
-        setImposteriser(ClassImposteriser.INSTANCE);
-      }
-    };
-  }
-
-  @After
-  public void tearDown() {
-    mockContext.assertIsSatisfied();
-    mockContext = null;
-  }
-
-  @Test
-  public void testExecute() throws Throwable {
-    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-    final HDFSStoreImpl mockHdfsStore = mockContext.mock(HDFSStoreImpl.class, "HDFSStoreImpl");
-    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-
-    final String hdfsStoreName = "mockHdfsStore";
-    final String memberId = "mockMemberId";
-    final String memberName = "mockMemberName";
-    final TestResultSender testResultSender = new TestResultSender();
-    final DestroyHDFSStoreFunction function = createDestroyHDFSStoreFunction(mockCache, mockMember, xmlEntity);
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockCache).findHDFSStore(hdfsStoreName);
-        will(returnValue(mockHdfsStore));
-        one(mockHdfsStore).destroy();
-        oneOf(mockMember).getId();
-        will(returnValue(memberId));
-        exactly(2).of(mockMember).getName();
-        will(returnValue(memberName));
-        oneOf(mockFunctionContext).getArguments();
-        will(returnValue(hdfsStoreName));
-        oneOf(mockFunctionContext).getResultSender();
-        will(returnValue(testResultSender));
-      }
-    });
-
-    function.execute(mockFunctionContext);
-
-    final List<?> results = testResultSender.getResults();
-
-    assertNotNull(results);
-    assertEquals(1, results.size());
-
-    final CliFunctionResult result = (CliFunctionResult)results.get(0);
-    assertEquals(memberName, result.getMemberIdOrName());
-    assertEquals("Success", result.getMessage());
-
-  }
-
-  @Test
-  @SuppressWarnings("unchecked")
-  public void testExecuteOnMemberHavingNoHDFSStore() throws Throwable {
-    final String hdfsStoreName = "mockHdfsStore";
-    final String memberId = "mockMemberId";
-    final String memberName = "mockMemberName";
-
-    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-
-    final TestResultSender testResultSender = new TestResultSender();
-    final DestroyHDFSStoreFunction function = createDestroyHDFSStoreFunction(mockCache, mockMember, xmlEntity);
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockCache).findHDFSStore(hdfsStoreName);
-        will(returnValue(null));
-        oneOf(mockMember).getId();
-        will(returnValue(memberId));
-        exactly(2).of(mockMember).getName();
-        will(returnValue(memberName));
-        oneOf(mockFunctionContext).getArguments();
-        will(returnValue(hdfsStoreName));
-        oneOf(mockFunctionContext).getResultSender();
-        will(returnValue(testResultSender));
-      }
-    });
-
-    function.execute(mockFunctionContext);
-
-    final List<?> results = testResultSender.getResults();
-
-    assertNotNull(results);
-    assertEquals(1, results.size());
-
-    final CliFunctionResult result = (CliFunctionResult)results.get(0);
-    assertEquals(memberName, result.getMemberIdOrName());
-    assertEquals("Hdfs store not found on this member", result.getMessage());
-  }
-
-  @Test
-  public void testExecuteOnMemberWithNoCache() throws Throwable {
-    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "MockFunctionContext");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
-    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-
-    final String hdfsStoreName = "mockHdfsStore";
-
-    final TestResultSender testResultSender = new TestResultSender();
-    final DestroyHDFSStoreFunction function = new TestDestroyHDFSStoreFunction(mockCache, mockMember, xmlEntity) {
-      private static final long serialVersionUID = 1L;
-
-      @Override
-      protected Cache getCache() {
-        throw new CacheClosedException("Expected");
-      }
-    };
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockFunctionContext).getArguments();
-        will(returnValue(hdfsStoreName));
-        oneOf(mockFunctionContext).getResultSender();
-        will(returnValue(testResultSender));
-      }
-    });
-
-    function.execute(mockFunctionContext);
-    final List<?> results = testResultSender.getResults();
-
-    assertNotNull(results);
-    assertEquals(1, results.size());
-
-    final CliFunctionResult result = (CliFunctionResult)results.get(0);
-    assertEquals("", result.getMemberIdOrName());
-    assertNull(result.getMessage());
-  }
-
-  @Test
-  public void testExecuteHandleRuntimeException() throws Throwable {
-    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-
-    final String hdfsStoreName = "mockHdfsStore";
-    final String memberId = "mockMemberId";
-    final String memberName = "mockMemberName";
-
-    final TestResultSender testResultSender = new TestResultSender();
-    final DestroyHDFSStoreFunction function = createDestroyHDFSStoreFunction(mockCache, mockMember, xmlEntity);
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockMember).getId();
-        will(returnValue(memberId));
-        exactly(2).of(mockMember).getName();
-        will(returnValue(memberName));
-        oneOf(mockFunctionContext).getArguments();
-        will(returnValue(hdfsStoreName));
-        oneOf(mockCache).findHDFSStore(hdfsStoreName);
-        will(throwException(new RuntimeException("expected")));
-        oneOf(mockFunctionContext).getResultSender();
-        will(returnValue(testResultSender));
-      }
-    });
-
-    function.execute(mockFunctionContext);
-    final List<?> results = testResultSender.getResults();
-
-    assertNotNull(results);
-    assertEquals(1, results.size());
-
-    final CliFunctionResult result = (CliFunctionResult)results.get(0);
-    assertEquals(memberName, result.getMemberIdOrName());
-    assertEquals("expected", result.getThrowable().getMessage());
-
-  }
-
-  protected TestDestroyHDFSStoreFunction createDestroyHDFSStoreFunction(final Cache cache, DistributedMember member,
-      XmlEntity xml) {
-    return new TestDestroyHDFSStoreFunction(cache, member, xml);
-  }
-
-  protected static class TestDestroyHDFSStoreFunction extends DestroyHDFSStoreFunction {
-    private static final long serialVersionUID = 1L;
-
-    private final Cache cache;
-
-    private final DistributedMember member;
-
-    private final XmlEntity xml;
-
-    public TestDestroyHDFSStoreFunction(final Cache cache, DistributedMember member, XmlEntity xml) {
-      this.cache = cache;
-      this.member = member;
-      this.xml = xml;
-    }
-
-    @Override
-    protected Cache getCache() {
-      return this.cache;
-    }
-
-    @Override
-    protected DistributedMember getDistributedMember(Cache cache) {
-      return member;
-    }
-
-    @Override
-    protected XmlEntity getXMLEntity(String storeName) {
-      return xml;
-    }
-
-  }
-
-  protected static class TestResultSender implements ResultSender {
-
-    private final List<Object> results = new LinkedList<Object>();
-
-    private Throwable t;
-
-    protected List<Object> getResults() throws Throwable {
-      if (t != null) {
-        throw t;
-      }
-      return Collections.unmodifiableList(results);
-    }
-
-    public void lastResult(final Object lastResult) {
-      results.add(lastResult);
-    }
-
-    public void sendResult(final Object oneResult) {
-      results.add(oneResult);
-    }
-
-    public void sendException(final Throwable t) {
-      this.t = t;
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunctionJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunctionJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunctionJUnitTest.java
deleted file mode 100644
index 11bc430..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/ListHDFSStoresFunctionJUnitTest.java
+++ /dev/null
@@ -1,319 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-
-package com.gemstone.gemfire.management.internal.cli.functions;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Set;
-
-import org.jmock.Expectations;
-import org.jmock.Mockery;
-import org.jmock.lib.legacy.ClassImposteriser;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheClosedException;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.execute.ResultSender;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.management.internal.cli.functions.ListHDFSStoresFunction.HdfsStoreDetails;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-/**
- * The ListHDFSStoreFunctionJUnitTest test suite class tests the contract and functionality of the
- * ListHDFSStoreFunction.
- * </p>
- * @author Namrata Thanvi
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder
- * @see com.gemstone.gemfire.management.internal.cli.functions.ListHDFSStoresFunction
- * @see org.jmock.Expectations
- * @see org.jmock.Mockery
- * @see org.junit.Assert
- * @see org.junit.Test
- */
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class ListHDFSStoresFunctionJUnitTest {
-  private Mockery mockContext;
-
-  @Before
-  public void setup() {
-    mockContext = new Mockery() {
-      {
-        setImposteriser(ClassImposteriser.INSTANCE);
-      }
-    };
-  }
-
-  @After
-  public void tearDown() {
-    mockContext.assertIsSatisfied();
-    mockContext = null;
-  }
-
-  
-  @Test
-  public void testExecute() throws Throwable {
-    final String memberId = "mockMemberId";
-    final String memberName = "mockMemberName";
-    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-    final TestResultSender testResultSender = new TestResultSender();
-
-    final HDFSStoreImpl mockHdfsStoreOne = mockContext.mock(HDFSStoreImpl.class, "HDFSStoreOne");
-    final HDFSStoreImpl mockHdfsStoreTwo = mockContext.mock(HDFSStoreImpl.class, "HDFSStoreTwo");
-    final HDFSStoreImpl mockHdfsStoreThree = mockContext.mock(HDFSStoreImpl.class, "HDFSStoreThree");
-
-    final List<HDFSStoreImpl> mockHdfsStores = new ArrayList<HDFSStoreImpl>();
-
-    mockHdfsStores.add(mockHdfsStoreOne);
-    mockHdfsStores.add(mockHdfsStoreTwo);
-    mockHdfsStores.add(mockHdfsStoreThree);
-
-    final List<String> storeNames = new ArrayList<String>();
-    storeNames.add("hdfsStoreOne");
-    storeNames.add("hdfsStoreTwo");
-    storeNames.add("hdfsStoreThree");
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockCache).getHDFSStores();
-        will(returnValue(mockHdfsStores));
-        exactly(3).of(mockMember).getId();
-        will(returnValue(memberId));
-        exactly(3).of(mockMember).getName();
-        will(returnValue(memberName));
-        oneOf(mockHdfsStoreOne).getName();
-        will(returnValue(storeNames.get(0)));       
-        oneOf(mockHdfsStoreTwo).getName();
-        will(returnValue(storeNames.get(1)));        
-        oneOf(mockHdfsStoreThree).getName();
-        will(returnValue(storeNames.get(2)));        
-        oneOf(mockFunctionContext).getResultSender();
-        will(returnValue(testResultSender));
-      }
-    });
-
-    final ListHDFSStoresFunction function = createListHDFSStoresFunction(mockCache, mockMember);
-
-    function.execute(mockFunctionContext);
-
-    final List<?> results = testResultSender.getResults();
-
-    assertNotNull(results);
-    assertEquals(1, results.size());
-
-    final Set<HdfsStoreDetails> listHdfsStoreFunctionresults = (Set<HdfsStoreDetails>)results.get(0);
-
-    assertNotNull(listHdfsStoreFunctionresults);
-    assertEquals(3, listHdfsStoreFunctionresults.size());
-
-    Collections.sort(storeNames);
-
-    for (HdfsStoreDetails listHdfsStoreFunctionresult : listHdfsStoreFunctionresults) {
-      assertTrue(storeNames.contains(listHdfsStoreFunctionresult.getStoreName()));
-      assertTrue(storeNames.remove(listHdfsStoreFunctionresult.getStoreName()));
-      assertEquals(memberId, listHdfsStoreFunctionresult.getMemberId());
-      assertEquals(memberName, listHdfsStoreFunctionresult.getMemberName());
-    }
-  }
-  
-  
-  @Test(expected = CacheClosedException.class)
-  public void testExecuteOnMemberWithNoCache() throws Throwable {
-    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "MockFunctionContext");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final TestListHDFSStoresFunction testListHdfsStoresFunction = 
-          new TestListHDFSStoresFunction(mockContext.mock(Cache.class, "MockCache"), mockMember) {
-      @Override protected Cache getCache() {
-        throw new CacheClosedException("Expected");
-      }
-    };
-
-    final TestResultSender testResultSender = new TestResultSender();
-
-    mockContext.checking(new Expectations() {{
-      oneOf(mockFunctionContext).getResultSender();
-      will(returnValue(testResultSender));
-    }});
-
-    testListHdfsStoresFunction.execute(mockFunctionContext);
-
-    try {
-      testResultSender.getResults();
-    }
-    catch (CacheClosedException expected) {
-      assertEquals("Expected", expected.getMessage());
-      throw expected;
-    }
-  }  
-  
-  @Test
-  @SuppressWarnings("unchecked")
-  public void testExecuteOnMemberHavingNoHDFSStores() throws Throwable {
-    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-
-    final TestResultSender testResultSender = new TestResultSender();
-
-    mockContext.checking(new Expectations() {{
-      oneOf(mockCache).getHDFSStores();
-      will(returnValue(Collections.emptyList()));
-      oneOf(mockFunctionContext).getResultSender();
-      will(returnValue(testResultSender));
-    }});
-
-    final ListHDFSStoresFunction function = createListHDFSStoresFunction(mockCache, mockMember);
-
-    function.execute(mockFunctionContext);
-
-    final List<?> results = testResultSender.getResults();
-
-    assertNotNull(results);
-    assertEquals(1, results.size());
-
-    final Set<HdfsStoreDetails> hdfsStoreDetails = (Set<HdfsStoreDetails>) results.get(0);
-
-    assertNotNull(hdfsStoreDetails);
-    assertTrue(hdfsStoreDetails.isEmpty());
-  }
-  
-  @Test
-  @SuppressWarnings("unchecked")
-  public void testExecuteOnMemberWithANonGemFireCache() throws Throwable {
-    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-
-    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-
-    final TestResultSender testResultSender = new TestResultSender();
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockFunctionContext).getResultSender();
-        will(returnValue(testResultSender));
-      }
-    });
-
-    final ListHDFSStoresFunction function = createListHDFSStoresFunction(mockCache, null);
-
-    function.execute(mockFunctionContext);
-
-    final List<?> results = testResultSender.getResults();
-
-    assertNotNull(results);
-    assertEquals(1, results.size());
-
-    final Set<HdfsStoreDetails> hdfsStoreDetails = (Set<HdfsStoreDetails>)results.get(0);
-
-    assertNotNull(hdfsStoreDetails);
-    assertTrue(hdfsStoreDetails.isEmpty());
-  }
-  
-  
-  @Test(expected = RuntimeException.class)
-  public void testExecuteThrowsRuntimeException() throws Throwable {
-    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
-
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-
-    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-
-    final TestResultSender testResultSender = new TestResultSender();
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockCache).getHDFSStores();
-        will(throwException(new RuntimeException("expected")));
-        oneOf(mockFunctionContext).getResultSender();
-        will(returnValue(testResultSender));
-      }
-    });
-
-    final ListHDFSStoresFunction function = createListHDFSStoresFunction(mockCache, mockMember);
-
-    function.execute(mockFunctionContext);
-
-    try {
-      testResultSender.getResults();
-    } catch (Throwable throwable) {
-      assertTrue(throwable instanceof RuntimeException);
-      assertEquals("expected", throwable.getMessage());
-      throw throwable;
-    }
-  }
-  
-  protected ListHDFSStoresFunction createListHDFSStoresFunction(final Cache cache, DistributedMember member) {
-    return new TestListHDFSStoresFunction(cache, member);
-  }
-    
-  protected static class TestListHDFSStoresFunction extends ListHDFSStoresFunction {
-    private static final long serialVersionUID = 1L;
-
-    private final Cache cache;
-
-    DistributedMember member;
-
-    @Override
-    protected DistributedMember getDistributedMemberId(Cache cache) {
-      return member;
-    }
-
-    public TestListHDFSStoresFunction(final Cache cache, DistributedMember member) {
-      assert cache != null: "The Cache cannot be null!";
-      this.cache = cache;
-      this.member = member;
-    }
-
-    @Override
-    protected Cache getCache() {
-      return cache;
-    }
-  }
-
-  protected static class TestResultSender implements ResultSender {
-
-    private final List<Object> results = new LinkedList<Object>();
-
-    private Throwable t;
-
-    protected List<Object> getResults() throws Throwable {
-      if (t != null) {
-        throw t;
-      }
-      return Collections.unmodifiableList(results);
-    }
-
-    public void lastResult(final Object lastResult) {
-      results.add(lastResult);
-    }
-
-    public void sendResult(final Object oneResult) {
-      results.add(oneResult);
-    }
-
-    public void sendException(final Throwable t) {
-      this.t = t;
-    }
-  }
-}



[04/50] [abbrv] incubator-geode git commit: GEODE-441: fix broken test loop condition and remove index

Posted by ds...@apache.org.
GEODE-441: fix broken test loop condition and remove index


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/17d00616
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/17d00616
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/17d00616

Branch: refs/heads/develop
Commit: 17d00616fdace860ee8ea485f21aebb3a7bc19d4
Parents: f5a44dc
Author: Darrel Schneider <ds...@pivotal.io>
Authored: Mon Oct 19 16:28:29 2015 -0700
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Mon Oct 19 16:29:58 2015 -0700

----------------------------------------------------------------------
 .../java/com/gemstone/gemfire/redis/SortedSetsJUnitTest.java     | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/17d00616/gemfire-core/src/test/java/com/gemstone/gemfire/redis/SortedSetsJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/redis/SortedSetsJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/redis/SortedSetsJUnitTest.java
index 4adfe07..0650406 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/redis/SortedSetsJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/redis/SortedSetsJUnitTest.java
@@ -263,8 +263,8 @@ public class SortedSetsJUnitTest {
       }
       jedis.zadd(key, scoreMembers);
       Collections.sort(expected, new EntryCmp());
-      for (int i = expected.size(); i <= 0; i--) {
-        Entry<String, Double> remEntry = expected.remove(i);
+      for (int i = expected.size(); i > 0; i--) {
+        Entry<String, Double> remEntry = expected.remove(i-1);
         String rem = remEntry.getKey();
         Double val = remEntry.getValue();
         assertEquals(val, jedis.zscore(key, rem));


[42/50] [abbrv] incubator-geode git commit: GEODE-392: Some enhancements to GemFireDeadlockDetectorDUnitTest

Posted by ds...@apache.org.
GEODE-392: Some enhancements to GemFireDeadlockDetectorDUnitTest

I originally thought this test might be the cause of this bug, so I made
a few changes to make sure it no additional threads are still running
when the test exits. I think that is still a good change to have, even
though those threads were not the cause of the issue.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/4a42443b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/4a42443b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/4a42443b

Branch: refs/heads/develop
Commit: 4a42443bdddda7c65bad48064f5b6b2895e48055
Parents: ca4991d
Author: Dan Smith <up...@apache.org>
Authored: Fri Oct 23 10:55:02 2015 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Fri Oct 23 11:01:51 2015 -0700

----------------------------------------------------------------------
 .../GemFireDeadlockDetectorDUnitTest.java       | 49 +++++++++++++++-----
 1 file changed, 37 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/4a42443b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/deadlock/GemFireDeadlockDetectorDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/deadlock/GemFireDeadlockDetectorDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/deadlock/GemFireDeadlockDetectorDUnitTest.java
index 01af1f9..32d21e5 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/deadlock/GemFireDeadlockDetectorDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/deadlock/GemFireDeadlockDetectorDUnitTest.java
@@ -22,8 +22,10 @@ import com.gemstone.gemfire.cache.execute.FunctionService;
 import com.gemstone.gemfire.cache.execute.ResultCollector;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.distributed.DistributedLockService;
+import com.gemstone.gemfire.distributed.LockServiceDestroyedException;
 import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
 
+import dunit.AsyncInvocation;
 import dunit.Host;
 import dunit.SerializableCallable;
 import dunit.SerializableRunnable;
@@ -41,11 +43,22 @@ public class GemFireDeadlockDetectorDUnitTest extends CacheTestCase {
   
   @Override
   public void tearDown2() throws Exception {
+    disconnectAllFromDS();
+  }
+
+  private void stopStuckThreads() {
     invokeInEveryVM(new SerializableRunnable() {
       
       public void run() {
         for(Thread thread: stuckThreads) {
           thread.interrupt();
+          disconnectFromDS();
+          try {
+            thread.join(30000);
+            assertTrue(!thread.isAlive());
+          } catch (InterruptedException e) {
+            fail("interrupted", e);
+          }
         }
       }
     });
@@ -74,7 +87,7 @@ public class GemFireDeadlockDetectorDUnitTest extends CacheTestCase {
   private static final Lock lock = new ReentrantLock();
   
   
-  public void testDistributedDeadlockWithFunction() throws InterruptedException {
+  public void testDistributedDeadlockWithFunction() throws Throwable {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
@@ -86,21 +99,24 @@ public class GemFireDeadlockDetectorDUnitTest extends CacheTestCase {
     
     
     //This thread locks the lock member1 first, then member2.
-    lockTheLocks(vm0, member2);
+    AsyncInvocation async1 = lockTheLocks(vm0, member2);
     //This thread locks the lock member2 first, then member1.
-    lockTheLocks(vm1, member1);
+    AsyncInvocation async2 = lockTheLocks(vm1, member1);
     
     Thread.sleep(5000);
     GemFireDeadlockDetector detect = new GemFireDeadlockDetector();
     LinkedList<Dependency> deadlock = detect.find().findCycle();
     getLogWriter().info("Deadlock=" + DeadlockDetector.prettyFormat(deadlock));
     assertEquals(8, deadlock.size());
+    stopStuckThreads();
+    async1.getResult(30000);
+    async2.getResult(30000);
   }
   
   
 
-  private void lockTheLocks(VM vm0, final InternalDistributedMember member) {
-    vm0.invokeAsync(new SerializableRunnable() {
+  private AsyncInvocation lockTheLocks(VM vm0, final InternalDistributedMember member) {
+    return vm0.invokeAsync(new SerializableRunnable() {
 
       public void run() {
         lock.lock();
@@ -117,13 +133,13 @@ public class GemFireDeadlockDetectorDUnitTest extends CacheTestCase {
     });
   }
   
-  public void testDistributedDeadlockWithDLock() throws InterruptedException {
+  public void testDistributedDeadlockWithDLock() throws Throwable {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
     
-    lockTheDLocks(vm0, "one", "two");
-    lockTheDLocks(vm1, "two", "one");
+    AsyncInvocation async1 = lockTheDLocks(vm0, "one", "two");
+    AsyncInvocation async2 = lockTheDLocks(vm1, "two", "one");
     getSystem();
     GemFireDeadlockDetector detect = new GemFireDeadlockDetector();
     
@@ -139,10 +155,14 @@ public class GemFireDeadlockDetectorDUnitTest extends CacheTestCase {
     assertTrue(deadlock != null);
     getLogWriter().info("Deadlock=" + DeadlockDetector.prettyFormat(deadlock));
     assertEquals(4, deadlock.size());
+    stopStuckThreads();
+    disconnectAllFromDS();
+    async1.getResult(30000);
+    async2.getResult(30000);
   } 
 
-  private void lockTheDLocks(VM vm, final String first, final String second) {
-    vm.invokeAsync(new SerializableRunnable() {
+  private AsyncInvocation lockTheDLocks(VM vm, final String first, final String second) {
+    return vm.invokeAsync(new SerializableRunnable() {
       
       public void run() {
         getCache();
@@ -154,7 +174,11 @@ public class GemFireDeadlockDetectorDUnitTest extends CacheTestCase {
         } catch (InterruptedException e) {
           e.printStackTrace();
         }
-        dls.lock(second, 10 * 1000, -1);
+        try {
+          dls.lock(second, 10 * 1000, -1);
+        } catch(LockServiceDestroyedException expected) {
+          //this is ok, the test is terminating
+        }
         
       }
     });
@@ -172,7 +196,7 @@ public class GemFireDeadlockDetectorDUnitTest extends CacheTestCase {
   
   private static class TestFunction implements Function {
     
-    private static final int LOCK_WAIT_TIME = 1000;
+    private static final int LOCK_WAIT_TIME = 5;
 
     public boolean hasResult() {
       return true;
@@ -186,6 +210,7 @@ public class GemFireDeadlockDetectorDUnitTest extends CacheTestCase {
       } catch (InterruptedException e) {
         //ingore
       }
+      stuckThreads.remove(Thread.currentThread());
       context.getResultSender().lastResult(null);
     }
 


[23/50] [abbrv] incubator-geode git commit: GEODE-429: Remove HdfsStore Junit and Dunits

Posted by ds...@apache.org.
GEODE-429: Remove HdfsStore Junit and Dunits


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/74c3156a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/74c3156a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/74c3156a

Branch: refs/heads/develop
Commit: 74c3156aaa0d29ccc4ec0b4c9a53659d2c9eb003
Parents: 1b4fd2f
Author: Ashvin Agrawal <as...@apache.org>
Authored: Mon Oct 19 14:58:00 2015 -0700
Committer: Ashvin Agrawal <as...@apache.org>
Committed: Wed Oct 21 08:55:23 2015 -0700

----------------------------------------------------------------------
 .../ColocatedRegionWithHDFSDUnitTest.java       |  189 ----
 .../hdfs/internal/HDFSEntriesSetJUnitTest.java  |  228 ----
 .../internal/HdfsStoreMutatorJUnitTest.java     |  191 ----
 .../hdfs/internal/RegionWithHDFSTestBase.java   |  715 ------------
 .../internal/hoplog/BaseHoplogTestCase.java     |  389 -------
 .../hoplog/CardinalityEstimatorJUnitTest.java   |  188 ----
 .../hoplog/HDFSCacheLoaderJUnitTest.java        |  106 --
 .../hoplog/HDFSCompactionManagerJUnitTest.java  |  449 --------
 .../hoplog/HDFSRegionDirectorJUnitTest.java     |   97 --
 .../internal/hoplog/HDFSStatsJUnitTest.java     |  250 -----
 .../HDFSUnsortedHoplogOrganizerJUnitTest.java   |  297 -----
 .../HdfsSortedOplogOrganizerJUnitTest.java      | 1045 ------------------
 .../hoplog/HfileSortedOplogJUnitTest.java       |  540 ---------
 .../hoplog/SortedOplogListIterJUnitTest.java    |  178 ---
 .../hoplog/TieredCompactionJUnitTest.java       |  904 ---------------
 .../hoplog/mapreduce/GFKeyJUnitTest.java        |   50 -
 .../mapreduce/HDFSSplitIteratorJUnitTest.java   |  265 -----
 .../hoplog/mapreduce/HoplogUtilJUnitTest.java   |  305 -----
 18 files changed, 6386 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java
deleted file mode 100644
index 44206dc..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/ColocatedRegionWithHDFSDUnitTest.java
+++ /dev/null
@@ -1,189 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Map;
-
-import com.gemstone.gemfire.cache.AttributesFactory;
-import com.gemstone.gemfire.cache.DataPolicy;
-import com.gemstone.gemfire.cache.EvictionAction;
-import com.gemstone.gemfire.cache.EvictionAttributes;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-
-import dunit.AsyncInvocation;
-import dunit.SerializableCallable;
-import dunit.VM;
-
-/**
- * A class for testing the basic HDFS functionality
- * 
- * @author Hemant Bhanawat
- */
-@SuppressWarnings({"serial", "rawtypes", "unchecked", "deprecation"})
-public class ColocatedRegionWithHDFSDUnitTest extends RegionWithHDFSTestBase {
-
-  public ColocatedRegionWithHDFSDUnitTest(String name) {
-    super(name);
-  }
-
-  @Override
-  protected SerializableCallable getCreateRegionCallable(
-      final int totalnumOfBuckets, final int batchSizeMB,
-      final int maximumEntries, final String folderPath,
-      final String uniqueName, final int batchInterval,
-      final boolean queuePersistent, final boolean writeonly,
-      final long timeForRollover, final long maxFileSize) {
-    SerializableCallable createRegion = new SerializableCallable() {
-      public Object call() throws Exception {
-        HDFSStoreFactory hsf = getCache().createHDFSStoreFactory();
-        hsf.setBatchSize(batchSizeMB);
-        hsf.setBufferPersistent(queuePersistent);
-        hsf.setMaxMemory(3);
-        hsf.setBatchInterval(batchInterval);
-        hsf.setHomeDir(tmpDir + "/" + folderPath);
-        homeDir = new File(tmpDir + "/" + folderPath).getCanonicalPath();
-        hsf.setHomeDir(homeDir);
-        hsf.create(uniqueName);
-
-        AttributesFactory af = new AttributesFactory();
-        af.setDataPolicy(DataPolicy.PARTITION);
-        PartitionAttributesFactory paf = new PartitionAttributesFactory();
-        paf.setTotalNumBuckets(totalnumOfBuckets);
-        paf.setRedundantCopies(1);
-
-        af.setHDFSStoreName(uniqueName);
-        af.setPartitionAttributes(paf.create());
-        af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(
-            maximumEntries, EvictionAction.LOCAL_DESTROY));
-
-        af.setHDFSWriteOnly(writeonly);
-        Region r1 = createRootRegion(uniqueName + "-r1", af.create());
-
-        paf.setColocatedWith(uniqueName + "-r1");
-        af.setPartitionAttributes(paf.create());
-        af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(
-            maximumEntries, EvictionAction.LOCAL_DESTROY));
-        Region r2 = createRootRegion(uniqueName + "-r2", af.create());
-
-        ((LocalRegion) r1).setIsTest();
-        ((LocalRegion) r2).setIsTest();
-
-        return 0;
-      }
-    };
-    return createRegion;
-  }
-
-  @Override
-  protected void doPuts(String uniqueName, int start, int end) {
-    Region r1 = getRootRegion(uniqueName + "-r1");
-    Region r2 = getRootRegion(uniqueName + "-r2");
-
-    for (int i = start; i < end; i++) {
-      r1.put("K" + i, "V" + i);
-      r2.put("K" + i, "V" + i);
-    }
-  }
-
-  protected AsyncInvocation doAsyncPuts(VM vm, final String regionName,
-      final int start, final int end, final String suffix) throws Exception {
-    return vm.invokeAsync(new SerializableCallable() {
-      public Object call() throws Exception {
-        Region r1 = getRootRegion(regionName + "-r1");
-        Region r2 = getRootRegion(regionName + "-r2");
-
-        getCache().getLogger().info("Putting entries ");
-        for (int i = start; i < end; i++) {
-          r1.put("K" + i, "V" + i + suffix);
-          r2.put("K" + i, "V" + i + suffix);
-        }
-        return null;
-      }
-
-    });
-  }
-
-  protected void doPutAll(final String uniqueName, Map map) {
-    Region r1 = getRootRegion(uniqueName + "-r1");
-    Region r2 = getRootRegion(uniqueName + "-r2");
-    r1.putAll(map);
-    r2.putAll(map);
-  }
-
-  @Override
-  protected void doDestroys(String uniqueName, int start, int end) {
-    Region r1 = getRootRegion(uniqueName + "-r1");
-    Region r2 = getRootRegion(uniqueName + "-r2");
-
-    for (int i = start; i < end; i++) {
-      r1.destroy("K" + i);
-      r2.destroy("K" + i);
-    }
-  }
-
-  @Override
-  protected void checkWithGet(String uniqueName, int start, int end,
-      boolean expectValue) {
-    Region r1 = getRootRegion(uniqueName + "-r1");
-    Region r2 = getRootRegion(uniqueName + "-r2");
-    for (int i = start; i < end; i++) {
-      String expected = expectValue ? "V" + i : null;
-      assertEquals("Mismatch on key " + i, expected, r1.get("K" + i));
-      assertEquals("Mismatch on key " + i, expected, r2.get("K" + i));
-    }
-  }
-
-  protected void checkWithGetAll(String uniqueName, ArrayList arrayl) {
-    Region r1 = getRootRegion(uniqueName + "-r1");
-    Region r2 = getRootRegion(uniqueName + "-r2");
-    Map map1 = r1.getAll(arrayl);
-    Map map2 = r2.getAll(arrayl);
-    for (Object e : map1.keySet()) {
-      String v = e.toString().replaceFirst("K", "V");
-      assertTrue("Reading entries failed for key " + e + " where value = "
-          + map1.get(e), v.equals(map1.get(e)));
-      assertTrue("Reading entries failed for key " + e + " where value = "
-          + map2.get(e), v.equals(map2.get(e)));
-    }
-  }
-
-  @Override
-  protected void verifyHDFSData(VM vm, String uniqueName) throws Exception {
-    HashMap<String, HashMap<String, String>> filesToEntriesMap = createFilesAndEntriesMap(
-        vm, uniqueName, uniqueName + "-r1");
-    HashMap<String, String> entriesMap = new HashMap<String, String>();
-    for (Map.Entry<String, HashMap<String, String>> e : filesToEntriesMap
-        .entrySet()) {
-      entriesMap.putAll(e.getValue());
-    }
-
-    verifyInEntriesMap(entriesMap, 1, 50, "vm0");
-    verifyInEntriesMap(entriesMap, 40, 100, "vm1");
-    verifyInEntriesMap(entriesMap, 40, 100, "vm2");
-    verifyInEntriesMap(entriesMap, 90, 150, "vm3");
-
-    filesToEntriesMap = createFilesAndEntriesMap(vm, uniqueName, uniqueName
-        + "-r2");
-    entriesMap = new HashMap<String, String>();
-    for (Map.Entry<String, HashMap<String, String>> e : filesToEntriesMap
-        .entrySet()) {
-      entriesMap.putAll(e.getValue());
-    }
-
-    verifyInEntriesMap(entriesMap, 1, 50, "vm0");
-    verifyInEntriesMap(entriesMap, 40, 100, "vm1");
-    verifyInEntriesMap(entriesMap, 40, 100, "vm2");
-    verifyInEntriesMap(entriesMap, 90, 150, "vm3");
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
deleted file mode 100644
index 3085a66..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
+++ /dev/null
@@ -1,228 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.fs.Path;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.Operation;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionFactory;
-import com.gemstone.gemfire.cache.RegionShortcut;
-import com.gemstone.gemfire.cache.asyncqueue.internal.ParallelAsyncEventQueueImpl;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.cache.hdfs.internal.SortedListForAsyncQueueJUnitTest.KeyValue;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.cache.BucketRegion;
-import com.gemstone.gemfire.internal.cache.CachedDeserializable;
-import com.gemstone.gemfire.internal.cache.EntryEventImpl;
-import com.gemstone.gemfire.internal.cache.EnumListenerEvent;
-import com.gemstone.gemfire.internal.cache.EventID;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.LocalRegion.IteratorType;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.internal.cache.wan.GatewaySenderAttributes;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-@SuppressWarnings("rawtypes")
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSEntriesSetJUnitTest extends TestCase {
-  private GemFireCacheImpl cache;
-  private HDFSStoreImpl store;
-  private PartitionedRegion region;
-  private BucketRegion bucket;
-  private HDFSParallelGatewaySenderQueue queue;
-  
-  private HDFSBucketRegionQueue brq;
-  private HoplogOrganizer hdfs;
-  
-  public void setUp() throws Exception {
-    System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");
-    cache = (GemFireCacheImpl) new CacheFactory()
-        .set("mcast-port", "0")
-        .set("log-level", "info")
-        .create();
-    
-    HDFSStoreFactory hsf = this.cache.createHDFSStoreFactory();
-    hsf.setHomeDir("hoplogs");
-    store = (HDFSStoreImpl) hsf.create("test");
-
-    PartitionAttributesFactory paf = new PartitionAttributesFactory();
-    paf.setTotalNumBuckets(1);
-    
-    RegionFactory rf = cache.createRegionFactory(RegionShortcut.PARTITION);
-//    rf.setHDFSStoreName("test");
-    region = (PartitionedRegion) rf.setPartitionAttributes(paf.create()).create("test");
-    
-    // prime the region so buckets get created
-    region.put("test", "test");
-    GatewaySenderAttributes g = new GatewaySenderAttributes();
-    g.isHDFSQueue = true;
-    g.id = "HDFSEntriesSetJUnitTest_Queue";
-    ParallelAsyncEventQueueImpl gatewaySender = new ParallelAsyncEventQueueImpl(cache, g);
-    Set<Region> set = new HashSet<Region>();
-    set.add(region);
-    
-    queue = new HDFSParallelGatewaySenderQueue(gatewaySender, set, 0, 1);
-    brq = (HDFSBucketRegionQueue)((PartitionedRegion) queue.getRegion()).getDataStore().getLocalBucketById(0);
-    bucket = region.getDataStore().getLocalBucketById(0);
-        
-    HdfsRegionManager mgr = HDFSRegionDirector.getInstance().manageRegion(region, "test", null);
-    hdfs =  mgr.<SortedHoplogPersistedEvent>create(0);
-    AbstractHoplogOrganizer.JUNIT_TEST_RUN = true;
-  }
-  
-  public void tearDown() throws Exception {
-    store.getFileSystem().delete(new Path("hoplogs"), true);
-    hdfs.close();
-    
-    cache.close();
-  }
-  
-  public void testEmptyIterator() throws Exception {
-    checkIteration(Collections.<String>emptyList(), new KeyValue[] { }, new KeyValue[] { });
-  }
-  
-  public void testQueueOnlyIterator() throws Exception {
-    KeyValue[] qvals = new KeyValue[] {
-      new KeyValue("K0", "0"),
-      new KeyValue("K1", "1"),
-      new KeyValue("K2", "2"),
-      new KeyValue("K3", "3"),
-      new KeyValue("K4", "4")
-    };
-    checkIteration(getExpected(), qvals, new KeyValue[] { });
-  }
-  
-  public void testHdfsOnlyIterator() throws Exception {
-    KeyValue[] hvals = new KeyValue[] {
-      new KeyValue("K0", "0"),
-      new KeyValue("K1", "1"),
-      new KeyValue("K2", "2"),
-      new KeyValue("K3", "3"),
-      new KeyValue("K4", "4")
-    };
-    checkIteration(getExpected(), new KeyValue[] { }, hvals);
-  }
-  
-  public void testUnevenIterator() throws Exception {
-    KeyValue[] qvals = new KeyValue[] {
-        new KeyValue("K0", "0"),
-        new KeyValue("K2", "2"),
-      };
-
-    KeyValue[] hvals = new KeyValue[] {
-      new KeyValue("K1", "1"),
-      new KeyValue("K3", "3"),
-      new KeyValue("K4", "4")
-    };
-    
-    checkIteration(getExpected(), qvals, hvals);
-  }
-
-  public void testEitherOrIterator() throws Exception {
-    KeyValue[] qvals = new KeyValue[] {
-        new KeyValue("K0", "0"),
-        new KeyValue("K2", "2"),
-        new KeyValue("K4", "4")
-      };
-
-    KeyValue[] hvals = new KeyValue[] {
-      new KeyValue("K1", "1"),
-      new KeyValue("K3", "3")
-    };
-    
-    checkIteration(getExpected(), qvals, hvals);
-  }
-
-  public void testDuplicateIterator() throws Exception {
-    KeyValue[] qvals = new KeyValue[] {
-        new KeyValue("K0", "0"),
-        new KeyValue("K1", "1"),
-        new KeyValue("K2", "2"),
-        new KeyValue("K3", "3"),
-        new KeyValue("K4", "4"),
-        new KeyValue("K4", "4")
-      };
-
-    KeyValue[] hvals = new KeyValue[] {
-        new KeyValue("K0", "0"),
-        new KeyValue("K1", "1"),
-        new KeyValue("K2", "2"),
-        new KeyValue("K3", "3"),
-        new KeyValue("K4", "4"),
-        new KeyValue("K4", "4")
-    };
-    
-    checkIteration(getExpected(), qvals, hvals);
-  }
-
-  private List<String> getExpected() {
-    List<String> expected = new ArrayList<String>();
-    expected.add("0");
-    expected.add("1");
-    expected.add("2");
-    expected.add("3");
-    expected.add("4");
-    return expected;
-  }
-  
-  private void checkIteration(List<String> expected, KeyValue[] qvals, KeyValue[] hvals) 
-  throws Exception {
-    int seq = 0;
-    List<PersistedEventImpl> evts = new ArrayList<PersistedEventImpl>();
-    for (KeyValue kv : hvals) {
-      evts.add(new SortedHDFSQueuePersistedEvent(getNewEvent(kv.key, kv.value, seq++)));
-    }
-    hdfs.flush(evts.iterator(), evts.size());
-
-    for (KeyValue kv : qvals) {
-      queue.put(getNewEvent(kv.key, kv.value, seq++));
-    }
-
-    List<String> actual = new ArrayList<String>();
-    Iterator vals = new HDFSEntriesSet(bucket, brq, hdfs, IteratorType.VALUES, null).iterator();
-    while (vals.hasNext()) {
-      Object val = vals.next();
-      if(val instanceof CachedDeserializable) {
-        val = ((CachedDeserializable) val).getDeserializedForReading();
-      }
-      actual.add((String) val);
-    }
-    
-    assertEquals(expected, actual);
-  }
-  
-  private HDFSGatewayEventImpl getNewEvent(Object key, Object value, long seq) throws Exception {
-    EntryEventImpl evt = EntryEventImpl.create(region, Operation.CREATE,
-        key, value, null, false, (DistributedMember) cache.getMyId());
-    
-    evt.setEventId(new EventID(cache.getDistributedSystem()));
-    HDFSGatewayEventImpl event = new HDFSGatewayEventImpl(EnumListenerEvent.AFTER_CREATE, evt, null, true, 0);
-    event.setShadowKey(seq);
-    
-    return event;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HdfsStoreMutatorJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HdfsStoreMutatorJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HdfsStoreMutatorJUnitTest.java
deleted file mode 100644
index b8cbb0d..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HdfsStoreMutatorJUnitTest.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.BaseHoplogTestCase;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HdfsStoreMutatorJUnitTest extends BaseHoplogTestCase {
-  public void testMutatorInitialState() {
-    HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
-    assertEquals(-1, mutator.getWriteOnlyFileRolloverInterval());
-    assertEquals(-1, mutator.getWriteOnlyFileRolloverSize());
-    
-    assertEquals(-1, mutator.getInputFileCountMax());
-    assertEquals(-1, mutator.getInputFileSizeMax());
-    assertEquals(-1, mutator.getInputFileCountMin());
-    assertEquals(-1, mutator.getMinorCompactionThreads());
-    assertNull(mutator.getMinorCompaction());
-    
-    assertEquals(-1, mutator.getMajorCompactionInterval());
-    assertEquals(-1, mutator.getMajorCompactionThreads());
-    assertNull(mutator.getMajorCompaction());
-    
-    assertEquals(-1, mutator.getPurgeInterval());
-    
-    assertEquals(-1, mutator.getBatchSize());
-    assertEquals(-1, mutator.getBatchInterval());
-  }
-  
-  public void testMutatorSetInvalidValue() {
-    HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
-
-    try {
-      mutator.setWriteOnlyFileRolloverInterval(-3);
-      fail();
-    } catch (IllegalArgumentException e) {
-      // expected
-    }
-    try {
-      mutator.setWriteOnlyFileRolloverSize(-5);
-      fail();
-    } catch (IllegalArgumentException e) {
-      // expected
-    }
-    
-    try {
-      mutator.setInputFileCountMin(-1);
-      fail();
-    } catch (IllegalArgumentException e) {
-      // expected
-    }
-    try {
-      mutator.setInputFileCountMax(-1);
-      fail();
-    } catch (IllegalArgumentException e) {
-      // expected
-    }
-    try {
-      mutator.setInputFileSizeMax(-1);
-      fail();
-    } catch (IllegalArgumentException e) {
-      // expected
-    }
-    try {
-      mutator.setMinorCompactionThreads(-9);
-      fail();
-    } catch (IllegalArgumentException e) {
-      // expected
-    }
-    try {
-      mutator.setMajorCompactionInterval(-6);
-      fail();
-    } catch (IllegalArgumentException e) {
-      // expected
-    }
-    try {
-      mutator.setMajorCompactionThreads(-1);
-      fail();
-    } catch (IllegalArgumentException e) {
-      // expected
-    }
-    try {
-      mutator.setPurgeInterval(-4);
-      fail();
-    } catch (IllegalArgumentException e) {
-      // expected
-    }
-/*    try {
-      qMutator.setBatchSizeMB(-985);
-      fail();
-    } catch (IllegalArgumentException e) {
-      // expected
-    }
-    try {
-      qMutator.setBatchTimeInterval(-695);
-      fail();
-    } catch (IllegalArgumentException e) {
-      // expected
-    }
-*/    
-    try {
-      mutator.setInputFileCountMin(10);
-      mutator.setInputFileCountMax(5);
-      hdfsStore.alter(mutator);
-      fail();
-    } catch (IllegalArgumentException e) {
-      // expected
-    }
-  }
-  
-  public void testMutatorReturnsUpdatedValues() {
-    HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
-    
-    mutator.setWriteOnlyFileRolloverInterval(121);
-    mutator.setWriteOnlyFileRolloverSize(234);
-    
-    mutator.setInputFileCountMax(87);
-    mutator.setInputFileSizeMax(45);
-    mutator.setInputFileCountMin(34);
-    mutator.setMinorCompactionThreads(843);
-    mutator.setMinorCompaction(false);
-
-    mutator.setMajorCompactionInterval(26);
-    mutator.setMajorCompactionThreads(92);
-    mutator.setMajorCompaction(false);
-    
-    mutator.setPurgeInterval(328);
-    
-    mutator.setBatchSize(985);
-    mutator.setBatchInterval(695);
-    
-    assertEquals(121, mutator.getWriteOnlyFileRolloverInterval());
-    assertEquals(234, mutator.getWriteOnlyFileRolloverSize());
-    
-    assertEquals(87, mutator.getInputFileCountMax());
-    assertEquals(45, mutator.getInputFileSizeMax());
-    assertEquals(34, mutator.getInputFileCountMin());
-    assertEquals(843, mutator.getMinorCompactionThreads());
-    assertFalse(mutator.getMinorCompaction());
-    
-    assertEquals(26, mutator.getMajorCompactionInterval());
-    assertEquals(92, mutator.getMajorCompactionThreads());
-    assertFalse(mutator.getMajorCompaction());
-    
-    assertEquals(328, mutator.getPurgeInterval());
-    
-    assertEquals(985, mutator.getBatchSize());
-    assertEquals(695, mutator.getBatchInterval());
-    
-    // repeat the cycle once more
-    mutator.setWriteOnlyFileRolloverInterval(14);
-    mutator.setWriteOnlyFileRolloverSize(56);
-    
-    mutator.setInputFileCountMax(93);
-    mutator.setInputFileSizeMax(85);
-    mutator.setInputFileCountMin(64);
-    mutator.setMinorCompactionThreads(59);
-    mutator.setMinorCompaction(true);
-    
-    mutator.setMajorCompactionInterval(26);
-    mutator.setMajorCompactionThreads(92);
-    mutator.setMajorCompaction(false);
-    
-    mutator.setPurgeInterval(328);
-    
-    assertEquals(14, mutator.getWriteOnlyFileRolloverInterval());
-    assertEquals(56, mutator.getWriteOnlyFileRolloverSize());
-    
-    assertEquals(93, mutator.getInputFileCountMax());
-    assertEquals(85, mutator.getInputFileSizeMax());
-    assertEquals(64, mutator.getInputFileCountMin());
-    assertEquals(59, mutator.getMinorCompactionThreads());
-    assertTrue(mutator.getMinorCompaction());
-    
-    assertEquals(26, mutator.getMajorCompactionInterval());
-    assertEquals(92, mutator.getMajorCompactionThreads());
-    assertFalse(mutator.getMajorCompaction());
-    
-    assertEquals(328, mutator.getPurgeInterval());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSTestBase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSTestBase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSTestBase.java
deleted file mode 100644
index 3330574..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSTestBase.java
+++ /dev/null
@@ -1,715 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocatedFileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueStats;
-import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogSetReader.HoplogIterator;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.SequenceFileHoplog;
-import com.gemstone.gemfire.cache30.CacheTestCase;
-import com.gemstone.gemfire.internal.FileUtil;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
-import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerHelper;
-
-import dunit.AsyncInvocation;
-import dunit.Host;
-import dunit.SerializableCallable;
-import dunit.SerializableRunnable;
-import dunit.VM;
-
-@SuppressWarnings({"serial", "rawtypes", "unchecked"})
-public abstract class RegionWithHDFSTestBase extends CacheTestCase {
-
-  protected String tmpDir;
-
-  public static String homeDir = null;
-
-  protected abstract void checkWithGetAll(String uniqueName, ArrayList arrayl);
-
-  protected abstract void checkWithGet(String uniqueName, int start,
-      int end, boolean expectValue);
-
-  protected abstract void doDestroys(final String uniqueName, int start, int end);
-
-  protected abstract void doPutAll(final String uniqueName, Map map);
-
-  protected abstract void doPuts(final String uniqueName, int start, int end);
-
-  protected abstract SerializableCallable getCreateRegionCallable(final int totalnumOfBuckets, final int batchSizeMB,
-      final int maximumEntries, final String folderPath, final String uniqueName, final int batchInterval, final boolean queuePersistent, 
-      final boolean writeonly, final long timeForRollover, final long maxFileSize);
-  
-  protected abstract void verifyHDFSData(VM vm, String uniqueName) throws Exception ;
-  
-  protected abstract AsyncInvocation doAsyncPuts(VM vm, final String regionName, 
-      final int start, final int end, final String suffix) throws Exception;
-  
-  public RegionWithHDFSTestBase(String name) {
-    super(name);
-  }
-
-  @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-    for (int h = 0; h < Host.getHostCount(); h++) {
-      Host host = Host.getHost(h);
-      SerializableCallable cleanUp = cleanUpStoresAndDisconnect();
-      for (int v = 0; v < host.getVMCount(); v++) {
-        VM vm = host.getVM(v);
-        // This store will be deleted by the first VM itself. Invocations from
-        // subsequent VMs will be no-op.
-        vm.invoke(cleanUp);
-      }
-    }
-  }
-
-  public SerializableCallable cleanUpStoresAndDisconnect() throws Exception {
-    SerializableCallable cleanUp = new SerializableCallable("cleanUpStoresAndDisconnect") {
-      public Object call() throws Exception {
-        disconnectFromDS();
-        File file;
-        if (homeDir != null) {
-          file = new File(homeDir);
-          FileUtil.delete(file);
-          homeDir = null;
-        }
-        file = new File(tmpDir);
-        FileUtil.delete(file);
-        return 0;
-      }
-    };
-    return cleanUp;
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    tmpDir = /*System.getProperty("java.io.tmpdir") + "/" +*/ "RegionWithHDFSBasicDUnitTest_" + System.nanoTime();
-  }
-  
-  int createServerRegion(VM vm, final int totalnumOfBuckets, 
-      final int batchSize, final int maximumEntries, final String folderPath, 
-      final String uniqueName, final int batchInterval) {
-    return createServerRegion(vm, totalnumOfBuckets, 
-        batchSize, maximumEntries, folderPath, 
-        uniqueName, batchInterval, false, false);
-  }
-
-  protected int createServerRegion(VM vm, final int totalnumOfBuckets, 
-      final int batchSizeMB, final int maximumEntries, final String folderPath, 
-      final String uniqueName, final int batchInterval, final boolean writeonly,
-      final boolean queuePersistent) {
-    return createServerRegion(vm, totalnumOfBuckets, 
-        batchSizeMB, maximumEntries, folderPath, 
-        uniqueName, batchInterval, writeonly, queuePersistent, -1, -1);
-  }
-  protected int createServerRegion(VM vm, final int totalnumOfBuckets, 
-      final int batchSizeMB, final int maximumEntries, final String folderPath, 
-      final String uniqueName, final int batchInterval, final boolean writeonly,
-      final boolean queuePersistent, final long timeForRollover, final long maxFileSize) {
-    SerializableCallable createRegion = getCreateRegionCallable(
-        totalnumOfBuckets, batchSizeMB, maximumEntries, folderPath, uniqueName,
-        batchInterval, queuePersistent, writeonly, timeForRollover, maxFileSize);
-
-    return (Integer) vm.invoke(createRegion);
-  }
-  protected AsyncInvocation createServerRegionAsync(VM vm, final int totalnumOfBuckets, 
-      final int batchSizeMB, final int maximumEntries, final String folderPath, 
-      final String uniqueName, final int batchInterval, final boolean writeonly,
-      final boolean queuePersistent) {
-    SerializableCallable createRegion = getCreateRegionCallable(
-        totalnumOfBuckets, batchSizeMB, maximumEntries, folderPath, uniqueName,
-        batchInterval, queuePersistent, writeonly, -1, -1);
-
-    return vm.invokeAsync(createRegion);
-  }
-  protected AsyncInvocation createServerRegionAsync(VM vm, final int totalnumOfBuckets, 
-      final int batchSizeMB, final int maximumEntries, final String folderPath, 
-      final String uniqueName, final int batchInterval, final boolean writeonly,
-      final boolean queuePersistent, final long timeForRollover, final long maxFileSize) {
-    SerializableCallable createRegion = getCreateRegionCallable(
-        totalnumOfBuckets, batchSizeMB, maximumEntries, folderPath, uniqueName,
-        batchInterval, queuePersistent, writeonly, timeForRollover, maxFileSize);
-
-    return vm.invokeAsync(createRegion);
-  }
-  
-  /**
-   * Does puts, gets, destroy and getAll. Since there are many updates 
-   * most of the time the data is not found in memory and queue and 
-   * is fetched from HDFS
-   * @throws Throwable 
-   */
-  public void testGetFromHDFS() throws Throwable {
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    final String uniqueName = getName();
-    final String homeDir = "../../testGetFromHDFS";
-    
-    createServerRegion(vm0, 7, 1, 50, homeDir, uniqueName, 50, false, true);
-    createServerRegion(vm1, 7, 1, 50, homeDir, uniqueName, 50, false, true);
-    
-    // Do some puts
-    vm0.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        doPuts(uniqueName, 0, 40);
-        return null;
-      }
-    });
-    
-    // Do some puts and destroys 
-    // some order manipulation has been done because of an issue: 
-    // " a higher version update on a key can be batched and 
-    // sent to HDFS before a lower version update on the same key 
-    // is batched and sent to HDFS. This will cause the latest 
-    // update on a key in an older file. Hence, a fetch from HDFS 
-    // will return an older update from a newer file."
-    
-    vm1.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        doPuts(uniqueName, 40, 50);
-        doDestroys(uniqueName, 40, 50);
-        doPuts(uniqueName, 50, 100);
-        doPuts(uniqueName, 30, 40);
-        return null;
-      }
-    });
-    
-    // do some more puts and destroy 
-    // some order manipulation has been done because of an issue: 
-    // " a higher version update on a key can be batched and 
-    // sent to HDFS before a lower version update on the same key 
-    // is batched and sent to HDFS. This will cause the latest 
-    // update on a key in an older file. Hence, a fetch from HDFS 
-    // will return an older update from a newer file."
-    vm1.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        doPuts(uniqueName, 80, 90);
-        doDestroys(uniqueName, 80, 90);
-        doPuts(uniqueName, 110, 200);
-        doPuts(uniqueName, 90, 110);
-        return null;
-      }
-      
-    });
-    
-    // get and getall the values and compare them. 
-    SerializableCallable checkData = new SerializableCallable() {
-      public Object call() throws Exception {
-        checkWithGet(uniqueName, 0, 40, true);
-        checkWithGet(uniqueName, 40, 50, false);
-        checkWithGet(uniqueName, 50, 80, true);
-        checkWithGet(uniqueName, 80, 90, false);
-        checkWithGet(uniqueName, 90, 200, true);
-        checkWithGet(uniqueName, 200, 201, false);
-        
-        ArrayList arrayl = new ArrayList();
-        for (int i =0; i< 200; i++) {
-          String k = "K" + i;
-          if ( !((40 <= i && i < 50) ||   (80 <= i && i < 90)))
-            arrayl.add(k);
-        }
-        checkWithGetAll(uniqueName, arrayl);
-        
-        return null;
-      }
-    };
-    vm1.invoke(checkData);
-    
-    //Restart the members and verify that we can still get the data
-    closeCache(vm0);
-    closeCache(vm1);
-    AsyncInvocation async0 = createServerRegionAsync(vm0, 7, 1, 50, homeDir, uniqueName, 50, false, true);
-    AsyncInvocation async1 = createServerRegionAsync(vm1, 7, 1, 50, homeDir, uniqueName, 50, false, true);
-    
-    async0.getResult();
-    async1.getResult();
-    
-    
-    // get and getall the values and compare them.
-    vm1.invoke(checkData);
-  
-    //TODO:HDFS we are just reading the files here. Need to verify 
-    // once the folder structure is finalized. 
-    dumpFiles(vm1, uniqueName);
-    
-  }
-
-  /**
-   * puts a few entries (keys with multiple updates ). Gets them immediately. 
-   * High probability that it gets it from async queue. 
-   */
-  public void testGetForAsyncQueue() {
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    
-    final String uniqueName = getName();
-    final String homeDir = "../../testGetForAsyncQueue";
-    
-    createServerRegion(vm0, 2, 5, 1, homeDir, uniqueName, 10000);
-    createServerRegion(vm1, 2, 5, 1, homeDir, uniqueName, 10000);
-    
-    vm0.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        doPuts(uniqueName, 0, 4);
-        return null;
-      }
-    });
-    vm1.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        doPuts(uniqueName, 0, 2);
-        doDestroys(uniqueName, 2, 3);
-        doPuts(uniqueName, 3, 7);
-        
-        checkWithGet(uniqueName, 0, 2, true);
-        checkWithGet(uniqueName, 2, 3, false);
-        checkWithGet(uniqueName, 3, 7, true);
-        return null;
-      }
-    });
-  }
-
-  /**
-   * puts a few entries (keys with multiple updates ). Calls getAll immediately. 
-   * High probability that it gets it from async queue. 
-   */
-  public void testGetAllForAsyncQueue() {
-    
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    
-    final String uniqueName = getName();
-    createServerRegion(vm0, 2, 5, 2, uniqueName, uniqueName, 10000);
-    createServerRegion(vm1, 2, 5, 2, uniqueName, uniqueName, 10000);
-    
-    vm0.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        doPuts(uniqueName, 0, 4);
-        return null;
-      }
-    });
-    vm1.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        doPuts(uniqueName, 1, 5);
-  
-        ArrayList arrayl = new ArrayList();
-        for (int i =0; i< 5; i++) {
-          String k = "K" + i;
-          arrayl.add(k);
-        }
-        checkWithGetAll(uniqueName, arrayl);
-        return null;
-      }
-    });
-  }
-
-  /**
-   * puts a few entries (keys with multiple updates ). Calls getAll immediately. 
-   * High probability that it gets it from async queue. 
-   */
-  public void testPutAllForAsyncQueue() {
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    
-    final String uniqueName = getName();
-    final String homeDir = "../../testPutAllForAsyncQueue";
-    createServerRegion(vm0, 2, 5, 2, homeDir, uniqueName, 10000);
-    createServerRegion(vm1, 2, 5, 2, homeDir, uniqueName, 10000);
-    
-    vm0.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        HashMap putAllmap = new HashMap();
-        for (int i =0; i< 4; i++)
-          putAllmap.put("K" + i, "V"+ i );
-        doPutAll(uniqueName, putAllmap);
-        return null;
-      }
-    });
-    vm1.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        HashMap putAllmap = new HashMap();
-        for (int i =1; i< 5; i++)
-          putAllmap.put("K" + i, "V"+ i );
-        doPutAll(uniqueName, putAllmap);
-        checkWithGet(uniqueName, 0, 5, true);
-        return null;
-      }
-    });
-  }
-
-  /**
-   * Does putAll and get. Since there are many updates 
-   * most of the time the data is not found in memory and queue and 
-   * is fetched from HDFS
-   */
-  public void _testPutAllAndGetFromHDFS() {
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    
-    final String uniqueName = getName();
-    final String homeDir = "../../testPutAllAndGetFromHDFS";
-    createServerRegion(vm0, 7, 1, 500, homeDir, uniqueName, 500);
-    createServerRegion(vm1, 7, 1, 500, homeDir, uniqueName, 500);
-    
-    // Do some puts
-    vm0.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-          
-        HashMap putAllmap = new HashMap();
-        
-        for (int i =0; i< 500; i++)
-          putAllmap.put("K" + i, "V"+ i );
-        doPutAll(uniqueName, putAllmap);
-        return null;
-      }
-    });
-    
-    // Do putAll and some  destroys 
-    vm1.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        HashMap putAllmap = new HashMap();
-        for (int i = 500; i< 1000; i++)
-          putAllmap.put("K" + i, "V"+ i );
-        doPutAll(uniqueName, putAllmap);
-        return null;
-      }
-    });
-    
-    // do some more puts 
-    // some order manipulation has been done because of an issue: 
-    // " a higher version update on a key can be batched and 
-    // sent to HDFS before a lower version update on the same key 
-    // is batched and sent to HDFS. This will cause the latest 
-    // update on a key in an older file. Hence, a fetch from HDFS 
-    // will return an older update from a newer file."
-    vm1.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        HashMap putAllmap = new HashMap();
-        for (int i =1100; i< 2000; i++)
-          putAllmap.put("K" + i, "V"+ i );
-        doPutAll(uniqueName, putAllmap);
-        putAllmap = new HashMap();
-        for (int i = 900; i< 1100; i++)
-          putAllmap.put("K" + i, "V"+ i );
-        doPutAll(uniqueName, putAllmap);
-        return null;
-      }
-      
-    });
-    
-    // get and getall the values and compare them. 
-    vm1.invoke(new SerializableCallable() {
-      public Object call() throws Exception {
-        checkWithGet(uniqueName, 0, 2000, true);
-        checkWithGet(uniqueName, 2000,  2001, false);
-        
-        ArrayList arrayl = new ArrayList();
-        for (int i =0; i< 2000; i++) {
-          String k = "K" + i;
-          arrayl.add(k);
-        }
-        checkWithGetAll(uniqueName, arrayl);
-        return null;
-      }
-    });
-    
-  }
-
-  public void _testWObasicClose() throws Throwable{
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    VM vm2 = host.getVM(2);
-    VM vm3 = host.getVM(3);
-    
-    String homeDir = "../../testWObasicClose";
-    final String uniqueName = getName();
-
-    createServerRegion(vm0, 11, 1,  500, homeDir, uniqueName, 500, true, false);
-    createServerRegion(vm1, 11, 1,  500, homeDir, uniqueName, 500, true, false);
-    createServerRegion(vm2, 11, 1,  500, homeDir, uniqueName, 500, true, false);
-    createServerRegion(vm3, 11, 1,  500, homeDir, uniqueName, 500, true, false);
-    
-    AsyncInvocation a1 = doAsyncPuts(vm0, uniqueName, 1, 50, "vm0");
-    AsyncInvocation a2 = doAsyncPuts(vm1, uniqueName, 40, 100, "vm1");
-    AsyncInvocation a3 = doAsyncPuts(vm2, uniqueName, 40, 100, "vm2");
-    AsyncInvocation a4 = doAsyncPuts(vm3, uniqueName, 90, 150, "vm3");
-    
-    a1.join();
-    a2.join();
-    a3.join();
-    a4.join();
-   
-    Thread.sleep(5000); 
-    cacheClose (vm0, false);
-    cacheClose (vm1, false);
-    cacheClose (vm2, false);
-    cacheClose (vm3, false);
-    
-    AsyncInvocation async1 = createServerRegionAsync(vm0, 11, 1,  500, homeDir, uniqueName, 500, true, false);
-    AsyncInvocation async2 = createServerRegionAsync(vm1, 11, 1,  500, homeDir, uniqueName, 500, true, false);
-    AsyncInvocation async3 = createServerRegionAsync(vm2, 11, 1,  500, homeDir, uniqueName, 500, true, false);
-    AsyncInvocation async4 = createServerRegionAsync(vm3, 11, 1,  500, homeDir, uniqueName, 500, true, false);
-    async1.getResult();
-    async2.getResult();
-    async3.getResult();
-    async4.getResult();
-    
-    verifyHDFSData(vm0, uniqueName); 
-    
-    cacheClose (vm0, false);
-    cacheClose (vm1, false);
-    cacheClose (vm2, false);
-    cacheClose (vm3, false);
-  }
-  
-  
-  protected void cacheClose(VM vm, final boolean sleep){
-    vm.invoke( new SerializableCallable() {
-      public Object call() throws Exception {
-        if (sleep)
-          Thread.sleep(2000);
-        getCache().getLogger().info("Cache close in progress "); 
-        getCache().close();
-        getCache().getLogger().info("Cache closed");
-        return null;
-      }
-    });
-    
-  }
-  
-  protected void verifyInEntriesMap (HashMap<String, String> entriesMap, int start, int end, String suffix) {
-    for (int i =start; i< end; i++) {
-      String k = "K" + i;
-      String v = "V"+ i + suffix;
-      Object s = entriesMap.get(v);
-      assertTrue( "The expected key " + k+ " didn't match the received value " + s + ". value: " + v, k.equals(s));
-    }
-  }
-  
-  /**
-   * Reads all the sequence files and returns the list of key value pairs persisted. 
-   * Returns the key value pair as <value, key> tuple as there can be multiple values 
-   * for a key
-   * @throws Exception
-   */
-  protected HashMap<String, HashMap<String, String>>  createFilesAndEntriesMap(VM vm0, final String uniqueName, final String regionName) throws Exception {
-    HashMap<String, HashMap<String, String>> entriesToFileMap = (HashMap<String, HashMap<String, String>>) 
-    vm0.invoke( new SerializableCallable() {
-      public Object call() throws Exception {
-        HashMap<String, HashMap<String, String>> entriesToFileMap = new HashMap<String, HashMap<String, String>>();
-        HDFSStoreImpl hdfsStore = (HDFSStoreImpl) ((GemFireCacheImpl)getCache()).findHDFSStore(uniqueName);
-        FileSystem fs = hdfsStore.getFileSystem();
-        System.err.println("dumping file names in HDFS directory: " + hdfsStore.getHomeDir());
-        try {
-          Path basePath = new Path(hdfsStore.getHomeDir());
-          Path regionPath = new Path(basePath, regionName);
-          RemoteIterator<LocatedFileStatus> files = fs.listFiles(regionPath, true);
-          
-          while(files.hasNext()) {
-            HashMap<String, String> entriesMap = new HashMap<String, String>();
-            LocatedFileStatus next = files.next();
-            /* MergeGemXDHDFSToGFE - Disabled as I am not pulling in DunitEnv */
-            // System.err.println(DUnitEnv.get().getPid() + " - " + next.getPath());
-            System.err.println(" - " + next.getPath());
-            readSequenceFile(fs, next.getPath(), entriesMap);
-            entriesToFileMap.put(next.getPath().getName(), entriesMap);
-          }
-        } catch (FileNotFoundException e) {
-          // TODO Auto-generated catch block
-          e.printStackTrace();
-        } catch (IOException e) {
-          // TODO Auto-generated catch block
-          e.printStackTrace();
-        }
-        
-        return entriesToFileMap;
-      }
-      @SuppressWarnings("deprecation")
-      public void readSequenceFile(FileSystem inputFS, Path sequenceFileName,  
-          HashMap<String, String> entriesMap) throws IOException {
-        SequenceFileHoplog hoplog = new SequenceFileHoplog(inputFS, sequenceFileName, null);
-        HoplogIterator<byte[], byte[]> iter = hoplog.getReader().scan();
-        try {
-          while (iter.hasNext()) {
-            iter.next();
-            PersistedEventImpl te = UnsortedHoplogPersistedEvent.fromBytes(iter.getValue());
-            String stringkey = ((String)CacheServerHelper.deserialize(iter.getKey()));
-            String value = (String) te.getDeserializedValue();
-            entriesMap.put(value, stringkey);
-            if (getCache().getLoggerI18n().fineEnabled())
-              getCache().getLoggerI18n().fine("Key: " + stringkey + " value: " + value  + " path " + sequenceFileName.getName());
-          }
-        } catch (Exception e) {
-          assertTrue(e.toString(), false);
-        }
-        iter.close();
-        hoplog.close();
-     }
-    });
-    return entriesToFileMap;
-  }
- protected SerializableCallable validateEmpty(VM vm0, final int numEntries, final String uniqueName) {
-    SerializableCallable validateEmpty = new SerializableCallable("validateEmpty") {
-      public Object call() throws Exception {
-        Region r = getRootRegion(uniqueName);
-        
-        assertTrue(r.isEmpty());
-        
-        //validate region is empty on peer as well
-        assertFalse(r.entrySet().iterator().hasNext());
-        //Make sure the region is empty
-        for (int i =0; i< numEntries; i++) {
-          assertEquals("failure on key K" + i , null, r.get("K" + i));
-        }
-        
-        return null;
-      }
-    };
-    
-    vm0.invoke(validateEmpty);
-    return validateEmpty;
-  }
-
-  protected void closeCache(VM vm0) {
-    //Restart and validate still empty.
-    SerializableRunnable closeCache = new SerializableRunnable("close cache") {
-      @Override
-      public void run() {
-        getCache().close();
-        disconnectFromDS();
-      }
-    };
-    
-    vm0.invoke(closeCache);
-  }
-
-  protected void verifyDataInHDFS(VM vm0, final String uniqueName, final boolean shouldHaveData,
-      final boolean wait, final boolean waitForQueueToDrain, final int numEntries) {
-        vm0.invoke(new SerializableCallable("check for data in hdfs") {
-          @Override
-          public Object call() throws Exception {
-            
-            HDFSRegionDirector director = HDFSRegionDirector.getInstance();
-            final SortedOplogStatistics stats = director.getHdfsRegionStats("/" + uniqueName);
-            waitForCriterion(new WaitCriterion() {
-              @Override
-              public boolean done() {
-                return stats.getActiveFileCount() > 0 == shouldHaveData;
-              }
-              
-              @Override
-              public String description() {
-                return "Waiting for active file count to be greater than 0: " + stats.getActiveFileCount() + " stats=" + System.identityHashCode(stats);
-              }
-            }, 30000, 100, true);
-            
-            if(waitForQueueToDrain) {
-              PartitionedRegion region = (PartitionedRegion) getCache().getRegion(uniqueName);
-              final AsyncEventQueueStats queueStats = region.getHDFSEventQueueStats();
-              waitForCriterion(new WaitCriterion() {
-                @Override
-                public boolean done() {
-                  return queueStats.getEventQueueSize() <= 0;
-                }
-                
-                @Override
-                public String description() {
-                  return "Waiting for queue stats to reach 0: " + queueStats.getEventQueueSize();
-                }
-              }, 30000, 100, true);
-            }
-            return null;
-          }
-        });
-      }
-
-  protected void doPuts(VM vm0, final String uniqueName, final int numEntries) {
-    // Do some puts
-    vm0.invoke(new SerializableCallable("do puts") {
-      public Object call() throws Exception {
-        Region r = getRootRegion(uniqueName);
-        for (int i =0; i< numEntries; i++)
-          r.put("K" + i, "V"+ i );
-        return null;
-      }
-    });
-  }
-
-  protected void validate(VM vm1, final String uniqueName, final int numEntries) {
-    SerializableCallable validate = new SerializableCallable("validate") {
-      public Object call() throws Exception {
-        Region r = getRootRegion(uniqueName);
-        
-        for (int i =0; i< numEntries; i++) {
-          assertEquals("failure on key K" + i , "V"+ i, r.get("K" + i));
-        }
-        
-        return null;
-      }
-    };
-    vm1.invoke(validate);
-  }
-
-  protected void dumpFiles(VM vm0, final String uniqueName) {
-    vm0.invoke(new SerializableRunnable() {
-  
-      @Override
-      public void run() {
-        HDFSStoreImpl hdfsStore = (HDFSStoreImpl) ((GemFireCacheImpl)getCache()).findHDFSStore(uniqueName);
-        FileSystem fs;
-        try {
-          fs = hdfsStore.getFileSystem();
-        } catch (IOException e1) {
-          throw new HDFSIOException(e1.getMessage(), e1);
-        }
-        System.err.println("dumping file names in HDFS directory: " + hdfsStore.getHomeDir());
-        try {
-          RemoteIterator<LocatedFileStatus> files = fs.listFiles(new Path(hdfsStore.getHomeDir()), true);
-          
-          while(files.hasNext()) {
-            LocatedFileStatus next = files.next();
-            /* MergeGemXDHDFSToGFE - Disabled as I am not pulling in DunitEnv */
-            // System.err.println(DUnitEnv.get().getPid() + " - " + next.getPath());
-            System.err.println(" - " + next.getPath());
-          }
-        } catch (FileNotFoundException e) {
-          // TODO Auto-generated catch block
-          e.printStackTrace();
-        } catch (IOException e) {
-          // TODO Auto-generated catch block
-          e.printStackTrace();
-        }
-        
-      }
-      
-    });
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
deleted file mode 100644
index 07d9f77..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
+++ /dev/null
@@ -1,389 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.net.URI;
-import java.util.HashSet;
-import java.util.Random;
-import java.util.Set;
-import java.util.TreeMap;
-
-import junit.framework.TestCase;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalFileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.Operation;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionFactory;
-import com.gemstone.gemfire.cache.RegionShortcut;
-import com.gemstone.gemfire.cache.SerializedCacheValue;
-import com.gemstone.gemfire.cache.TransactionId;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreFactoryImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.SortedHDFSQueuePersistedEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl.FileSystemFactory;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.HoplogWriter;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer.Compactor;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.HFileStoreStatistics;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
-import com.gemstone.gemfire.internal.cache.versions.DiskVersionTag;
-import com.gemstone.gemfire.internal.util.BlobHelper;
-import org.apache.hadoop.hbase.io.hfile.BlockCache;
-
-import dunit.DistributedTestCase;
-import dunit.DistributedTestCase.ExpectedException;
-
-public abstract class BaseHoplogTestCase extends TestCase {
-  public static final String HDFS_STORE_NAME = "hdfs";
-  public static final Random rand = new Random(System.currentTimeMillis());
-  protected Path testDataDir;
-  protected Cache cache;
-  
-  protected HDFSRegionDirector director; 
-  protected HdfsRegionManager regionManager;
-  protected HDFSStoreFactory hsf;
-  protected HDFSStoreImpl hdfsStore;
-  protected RegionFactory<Object, Object> regionfactory;
-  protected Region<Object, Object> region;
-  protected SortedOplogStatistics stats;
-  protected HFileStoreStatistics storeStats;
-  protected BlockCache blockCache;
-  
-  Set<ExpectedException> exceptions = new HashSet<ExpectedException>();
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
-    System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");
-    
-    //This is logged by HDFS when it is stopped.
-    exceptions.add(DistributedTestCase.addExpectedException("sleep interrupted"));
-    exceptions.add(DistributedTestCase.addExpectedException("java.io.InterruptedIOException"));
-    
-    testDataDir = new Path("test-case");
-
-    cache = createCache();
-    
-    configureHdfsStoreFactory();
-    hdfsStore = (HDFSStoreImpl) hsf.create(HDFS_STORE_NAME);
-
-    regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION);
-//    regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
-    region = regionfactory.create(getName());
-    
-    // disable compaction by default and clear existing queues
-    HDFSCompactionManager compactionManager = HDFSCompactionManager.getInstance(hdfsStore);
-    compactionManager.reset();
-    
-    director = HDFSRegionDirector.getInstance();
-    director.setCache(cache);
-    regionManager = ((LocalRegion)region).getHdfsRegionManager();
-    stats = director.getHdfsRegionStats("/" + getName());
-    storeStats = hdfsStore.getStats();
-    blockCache = hdfsStore.getBlockCache();
-    AbstractHoplogOrganizer.JUNIT_TEST_RUN = true;
-  }
-
-  protected void configureHdfsStoreFactory() throws Exception {
-    hsf = this.cache.createHDFSStoreFactory();
-    hsf.setHomeDir(testDataDir.toString());
-    hsf.setMinorCompaction(false);
-    hsf.setMajorCompaction(false);
-  }
-
-  protected Cache createCache() {
-    CacheFactory cf = new CacheFactory().set("mcast-port", "0")
-        .set("log-level", "info")
-        ;
-    cache = cf.create();
-    return cache;
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    if (region != null) {
-      region.destroyRegion();
-    }
-    
-    if (hdfsStore != null) {
-      hdfsStore.getFileSystem().delete(testDataDir, true);
-      hdfsStore.destroy();
-    }
-    
-    if (cache != null) {
-      cache.close();
-    }
-    super.tearDown();
-    for (ExpectedException ex: exceptions) {
-      ex.remove();
-    }
-  }
-
-  /**
-   * creates a hoplog file with numKeys records. Keys follow key-X pattern and values follow value-X
-   * pattern where X=0 to X is = numKeys -1
-   * 
-   * @return the sorted map of inserted KVs
-   */
-  protected TreeMap<String, String> createHoplog(int numKeys, Hoplog oplog) throws IOException {
-    int offset = (numKeys > 10 ? 100000 : 0);
-    
-    HoplogWriter writer = oplog.createWriter(numKeys);
-    TreeMap<String, String> map = new TreeMap<String, String>();
-    for (int i = offset; i < (numKeys + offset); i++) {
-      String key = ("key-" + i);
-      String value = ("value-" + System.nanoTime());
-      writer.append(key.getBytes(), value.getBytes());
-      map.put(key, value);
-    }
-    writer.close();
-    return map;
-  }
-  
-  protected FileStatus[] getBucketHoplogs(String regionAndBucket, final String type)
-      throws IOException {
-    return getBucketHoplogs(hdfsStore.getFileSystem(), regionAndBucket, type);
-  }
-  
-  protected FileStatus[] getBucketHoplogs(FileSystem fs, String regionAndBucket, final String type)
-      throws IOException {
-    FileStatus[] hoplogs = fs.listStatus(
-        new Path(testDataDir, regionAndBucket), new PathFilter() {
-          @Override
-          public boolean accept(Path file) {
-            return file.getName().endsWith(type);
-          }
-        });
-    return hoplogs;
-  }
-
-  protected String getRandomHoplogName() {
-    String hoplogName = "hoplog-" + System.nanoTime() + "-" + rand.nextInt(10000) + ".hop";
-    return hoplogName;
-  }
-  
-//  public static MiniDFSCluster initMiniCluster(int port, int numDN) throws Exception {
-//    HashMap<String, String> map = new HashMap<String, String>();
-//    map.put(DFSConfigKeys.DFS_REPLICATION_KEY, "1");
-//    return initMiniCluster(port, numDN, map);
-//  }
-//
-//  public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map) throws Exception {
-//    System.setProperty("test.build.data", "hdfs-test-cluster");
-//    Configuration hconf = new HdfsConfiguration();
-//    for (Entry<String, String> entry : map.entrySet()) {
-//      hconf.set(entry.getKey(), entry.getValue());
-//    }
-//
-//    hconf.set("dfs.namenode.fs-limits.min-block-size", "1024");
-//    
-//    Builder builder = new MiniDFSCluster.Builder(hconf);
-//    builder.numDataNodes(numDN);
-//    builder.nameNodePort(port);
-//    MiniDFSCluster cluster = builder.build();
-//    return cluster;
-//  }
-
-  public static void setConfigFile(HDFSStoreFactory factory, File configFile, String config)
-      throws Exception {
-    BufferedWriter bw = new BufferedWriter(new FileWriter(configFile));
-    bw.write(config);
-    bw.close();
-    factory.setHDFSClientConfigFile(configFile.getName());
-  }
-  
-  public static void alterMajorCompaction(HDFSStoreImpl store, boolean enable) {
-    HDFSStoreMutator mutator = store.createHdfsStoreMutator();
-    mutator.setMajorCompaction(enable);
-    store.alter(mutator);
-  }
-  
-  public static void alterMinorCompaction(HDFSStoreImpl store, boolean enable) {
-    HDFSStoreMutator mutator = store.createHdfsStoreMutator();
-    mutator.setMinorCompaction(enable);
-    store.alter(mutator);
-  }
-  
-  public void deleteMiniClusterDir() throws Exception {
-    File clusterDir = new File("hdfs-test-cluster");
-    if (clusterDir.exists()) {
-      FileUtils.deleteDirectory(clusterDir);
-    }
-  }
-  
-  public static class TestEvent extends SortedHDFSQueuePersistedEvent {
-    Object key;
-    
-    public TestEvent(String k, String v) throws Exception {
-      this(k, v, Operation.PUT_IF_ABSENT);
-    }
-
-    public TestEvent(String k, String v, Operation op) throws Exception {
-      super(v, op, (byte) 0x02, false, new DiskVersionTag(), BlobHelper.serializeToBlob(k), 0);
-      this.key = k; 
-    }
-
-    public Object getKey() {
-      return key;
-      
-    }
-
-    public Object getNewValue() {
-      return valueObject;
-    }
-
-    public Operation getOperation() {
-      return op;
-    }
-    
-    public Region<Object, Object> getRegion() {
-      return null;
-    }
-
-    public Object getCallbackArgument() {
-      return null;
-    }
-
-    public boolean isCallbackArgumentAvailable() {
-      return false;
-    }
-
-    public boolean isOriginRemote() {
-      return false;
-    }
-
-    public DistributedMember getDistributedMember() {
-      return null;
-    }
-
-    public boolean isExpiration() {
-      return false;
-    }
-
-    public boolean isDistributed() {
-      return false;
-    }
-
-    public Object getOldValue() {
-      return null;
-    }
-
-    public SerializedCacheValue<Object> getSerializedOldValue() {
-      return null;
-    }
-
-    public SerializedCacheValue<Object> getSerializedNewValue() {
-      return null;
-    }
-
-    public boolean isLocalLoad() {
-      return false;
-    }
-
-    public boolean isNetLoad() {
-      return false;
-    }
-
-    public boolean isLoad() {
-      return false;
-    }
-
-    public boolean isNetSearch() {
-      return false;
-    }
-
-    public TransactionId getTransactionId() {
-      return null;
-    }
-
-    public boolean isBridgeEvent() {
-      return false;
-    }
-
-    public boolean hasClientOrigin() {
-      return false;
-    }
-
-    public boolean isOldValueAvailable() {
-      return false;
-    }
-  }
-  
-  public abstract class AbstractCompactor implements Compactor {
-    @Override
-    public HDFSStore getHdfsStore() {
-      return hdfsStore;
-    }
-
-    public void suspend() {
-    }
-
-    public void resume() {
-    }
-
-    public boolean isBusy(boolean isMajor) {
-      return false;
-    }
-  }
-  
-  public HDFSStoreFactoryImpl getCloseableLocalHdfsStoreFactory() {
-    final FileSystemFactory fsFactory = new FileSystemFactory() {
-      // by default local FS instance is not disabled by close. Hence this
-      // customization
-      class CustomFileSystem extends LocalFileSystem {
-        boolean isClosed = false;
-
-        public void close() throws IOException {
-          isClosed = true;
-          super.close();
-        }
-
-        public FileStatus getFileStatus(Path f) throws IOException {
-          if (isClosed) {
-            throw new IOException();
-          }
-          return super.getFileStatus(f);
-        }
-      }
-
-      public FileSystem create(URI namenode, Configuration conf, boolean forceNew) throws IOException {
-        CustomFileSystem fs = new CustomFileSystem();
-        fs.initialize(namenode, conf);
-        return fs;
-      }
-    };
-
-    HDFSStoreFactoryImpl storeFactory = new HDFSStoreFactoryImpl(cache) {
-      public HDFSStore create(String name) {
-        return new HDFSStoreImpl(name, this.configHolder) {
-          public FileSystemFactory getFileSystemFactory() {
-            return fsFactory;
-          }
-        };
-      }
-    };
-    return storeFactory;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CardinalityEstimatorJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CardinalityEstimatorJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CardinalityEstimatorJUnitTest.java
deleted file mode 100644
index db050b3..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/CardinalityEstimatorJUnitTest.java
+++ /dev/null
@@ -1,188 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Operation;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class CardinalityEstimatorJUnitTest extends BaseHoplogTestCase {
-
-  public void testSingleHoplogCardinality() throws Exception {
-    int count = 10;
-    int bucketId = (int) System.nanoTime();
-    HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
-
-    // flush and create hoplog
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    for (int i = 0; i < count; i++) {
-      items.add(new TestEvent(("key-" + i), ("value-" + System.nanoTime())));
-    }
-    // assert that size is 0 before flush begins
-    assertEquals(0, organizer.sizeEstimate());
-    organizer.flush(items.iterator(), count);
-
-    assertEquals(count, organizer.sizeEstimate());
-    assertEquals(0, stats.getActiveReaderCount());
-    
-    organizer.close();
-    organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
-    assertEquals(count, organizer.sizeEstimate());
-    assertEquals(1, stats.getActiveReaderCount());
-  }
-
-  public void testSingleHoplogCardinalityWithDuplicates() throws Exception {
-    int bucketId = (int) System.nanoTime();
-    HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
-
-    List<TestEvent> items = new ArrayList<TestEvent>();
-    items.add(new TestEvent("key-0", "value-0"));
-    items.add(new TestEvent("key-0", "value-0"));
-    items.add(new TestEvent("key-1", "value-1"));
-    items.add(new TestEvent("key-2", "value-2"));
-    items.add(new TestEvent("key-3", "value-3"));
-    items.add(new TestEvent("key-3", "value-3"));
-    items.add(new TestEvent("key-4", "value-4"));
-
-    organizer.flush(items.iterator(), 7);
-    assertEquals(5, organizer.sizeEstimate());
-  }
-
-  public void testMultipleHoplogCardinality() throws Exception {
-    int bucketId = (int) System.nanoTime();
-    HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
-
-    List<TestEvent> items = new ArrayList<TestEvent>();
-    items.add(new TestEvent("key-0", "value-0"));
-    items.add(new TestEvent("key-1", "value-1"));
-    items.add(new TestEvent("key-2", "value-2"));
-    items.add(new TestEvent("key-3", "value-3"));
-    items.add(new TestEvent("key-4", "value-4"));
-
-    organizer.flush(items.iterator(), 5);
-    assertEquals(5, organizer.sizeEstimate());
-
-    items.clear();
-    items.add(new TestEvent("key-1", "value-0"));
-    items.add(new TestEvent("key-5", "value-5"));
-    items.add(new TestEvent("key-6", "value-6"));
-    items.add(new TestEvent("key-7", "value-7"));
-    items.add(new TestEvent("key-8", "value-8"));
-    items.add(new TestEvent("key-9", "value-9"));
-
-    organizer.flush(items.iterator(), 6);
-    assertEquals(10, organizer.sizeEstimate());
-  }
-
-  public void testCardinalityAfterRestart() throws Exception {
-    int bucketId = (int) System.nanoTime();
-    HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
-
-    List<TestEvent> items = new ArrayList<TestEvent>();
-    items.add(new TestEvent("key-0", "value-0"));
-    items.add(new TestEvent("key-1", "value-1"));
-    items.add(new TestEvent("key-2", "value-2"));
-    items.add(new TestEvent("key-3", "value-3"));
-    items.add(new TestEvent("key-4", "value-4"));
-
-    assertEquals(0, organizer.sizeEstimate());
-    organizer.flush(items.iterator(), 5);
-    assertEquals(5, organizer.sizeEstimate());
-
-    // restart
-    organizer.close();
-    organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
-    assertEquals(5, organizer.sizeEstimate());
-    
-    items.clear();
-    items.add(new TestEvent("key-1", "value-0"));
-    items.add(new TestEvent("key-5", "value-5"));
-    items.add(new TestEvent("key-6", "value-6"));
-    items.add(new TestEvent("key-7", "value-7"));
-    items.add(new TestEvent("key-8", "value-8"));
-    items.add(new TestEvent("key-9", "value-9"));
-
-    organizer.flush(items.iterator(), 6);
-    assertEquals(10, organizer.sizeEstimate());
-
-    // restart - make sure that HLL from the youngest file is read
-    organizer.close();
-    organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
-    assertEquals(10, organizer.sizeEstimate());
-    
-    items.clear();
-    items.add(new TestEvent("key-1", "value-1"));
-    items.add(new TestEvent("key-5", "value-5"));
-    items.add(new TestEvent("key-10", "value-10"));
-    items.add(new TestEvent("key-11", "value-11"));
-    items.add(new TestEvent("key-12", "value-12"));
-    items.add(new TestEvent("key-13", "value-13"));
-    items.add(new TestEvent("key-14", "value-14"));
-
-    organizer.flush(items.iterator(), 7);
-    assertEquals(15, organizer.sizeEstimate());
-  }
-
-  public void testCardinalityAfterMajorCompaction() throws Exception {
-    doCardinalityAfterCompactionWork(true);
-  }
-
-  public void testCardinalityAfterMinorCompaction() throws Exception {
-    doCardinalityAfterCompactionWork(false);
-  }
-
-  private void doCardinalityAfterCompactionWork(boolean isMajor) throws Exception {
-    int bucketId = (int) System.nanoTime();
-    HoplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
-
-    List<TestEvent> items = new ArrayList<TestEvent>();
-    items.add(new TestEvent("key-0", "value-0"));
-    items.add(new TestEvent("key-1", "value-1"));
-    items.add(new TestEvent("key-2", "value-2"));
-    items.add(new TestEvent("key-3", "value-3"));
-    items.add(new TestEvent("key-4", "value-4"));
-
-    organizer.flush(items.iterator(), 5);
-    assertEquals(5, organizer.sizeEstimate());
-
-    items.clear();
-    items.add(new TestEvent("key-0", "value-0"));
-    items.add(new TestEvent("key-1", "value-5", Operation.DESTROY));
-    items.add(new TestEvent("key-2", "value-6", Operation.INVALIDATE));
-    items.add(new TestEvent("key-5", "value-5"));
-
-    organizer.flush(items.iterator(), 4);
-    assertEquals(6, organizer.sizeEstimate());
-
-    items.clear();
-    items.add(new TestEvent("key-3", "value-5", Operation.DESTROY));
-    items.add(new TestEvent("key-4", "value-6", Operation.INVALIDATE));
-    items.add(new TestEvent("key-5", "value-0"));
-    items.add(new TestEvent("key-6", "value-5"));
-
-    organizer.flush(items.iterator(), 4);
-    
-    items.add(new TestEvent("key-5", "value-0"));
-    items.add(new TestEvent("key-6", "value-5"));
-    
-    items.clear();
-    organizer.flush(items.iterator(), items.size());
-    assertEquals(7, organizer.sizeEstimate());
-
-    organizer.getCompactor().compact(isMajor, false);
-    assertEquals(3, organizer.sizeEstimate());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCacheLoaderJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCacheLoaderJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCacheLoaderJUnitTest.java
deleted file mode 100644
index 67dcddf..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HDFSCacheLoaderJUnitTest.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.util.List;
-
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.AttributesMutator;
-import com.gemstone.gemfire.cache.CacheLoader;
-import com.gemstone.gemfire.cache.CacheLoaderException;
-import com.gemstone.gemfire.cache.LoaderHelper;
-import com.gemstone.gemfire.cache.asyncqueue.AsyncEventListener;
-import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueFactoryImpl;
-import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueImpl;
-import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueStats;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-/**
- * Tests that entries loaded from a cache loader are inserted in the HDFS queue 
- * 
- * @author hemantb
- */
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSCacheLoaderJUnitTest extends BaseHoplogTestCase {
-
-  private static int totalEventsReceived = 0;
-  protected void configureHdfsStoreFactory() throws Exception {
-    hsf = this.cache.createHDFSStoreFactory();
-    hsf.setHomeDir(testDataDir.toString());
-    hsf.setBatchInterval(100000000);
-    hsf.setBatchSize(10000);
-  }
-
-  /**
-   * Tests that entries loaded from a cache loader are inserted in the HDFS queue 
-   * but are not inserted in async queues. 
-   * @throws Exception
-   */
-  public void testCacheLoaderForAsyncQAndHDFS() throws Exception {
-    
-    final AsyncEventQueueStats hdfsQueuestatistics = ((AsyncEventQueueImpl)cache.
-        getAsyncEventQueues().toArray()[0]).getStatistics();
-    
-    AttributesMutator am = this.region.getAttributesMutator();
-    am.setCacheLoader(new CacheLoader() {
-      private int i = 0;
-      public Object load(LoaderHelper helper)
-      throws CacheLoaderException {
-        return new Integer(i++);
-      }
-      
-      public void close() { }
-    });
-    
-    
-    
-    String asyncQueueName = "myQueue";
-    new AsyncEventQueueFactoryImpl(cache).setBatchTimeInterval(1).
-    create(asyncQueueName, new AsyncEventListener() {
-      
-      @Override
-      public void close() {
-        // TODO Auto-generated method stub
-        
-      }
-
-      @Override
-      public boolean processEvents(List events) {
-        totalEventsReceived += events.size();
-        return true;
-      }
-    });
-    am.addAsyncEventQueueId(asyncQueueName);
-    
-    region.put(1, new Integer(100));
-    region.destroy(1);
-    region.get(1);
-    region.destroy(1);
-    
-    assertTrue("HDFS queue should have received four events. But it received " + 
-        hdfsQueuestatistics.getEventQueueSize(), 4 == hdfsQueuestatistics.getEventQueueSize());
-    assertTrue("HDFS queue should have received four events. But it received " + 
-        hdfsQueuestatistics.getEventsReceived(), 4 == hdfsQueuestatistics.getEventsReceived());
-    
-    region.get(1);
-    Thread.sleep(2000);
-    
-    assertTrue("Async queue should have received only 5 events. But it received " + 
-        totalEventsReceived, totalEventsReceived == 5);
-    assertTrue("HDFS queue should have received 5 events. But it received " + 
-        hdfsQueuestatistics.getEventQueueSize(), 5 == hdfsQueuestatistics.getEventQueueSize());
-    assertTrue("HDFS queue should have received 5 events. But it received " + 
-        hdfsQueuestatistics.getEventsReceived(), 5 == hdfsQueuestatistics.getEventsReceived());
-    
-    
-  }
-  
-}


[05/50] [abbrv] incubator-geode git commit: [GEODE-440] RangeIndexAPIJUnitTest.testQueryMethod_2 Added a flag to ignore the update in progress calculation for querying

Posted by ds...@apache.org.
[GEODE-440] RangeIndexAPIJUnitTest.testQueryMethod_2
Added a flag to ignore the update in progress calculation for querying


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/f8935b3b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/f8935b3b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/f8935b3b

Branch: refs/heads/develop
Commit: f8935b3b81e3abc9a5e0a0a9d91f89a449aca251
Parents: 17d0061
Author: Jason Huynh <jh...@pivotal.io>
Authored: Tue Oct 20 13:33:12 2015 -0700
Committer: Jason Huynh <jh...@pivotal.io>
Committed: Tue Oct 20 13:33:12 2015 -0700

----------------------------------------------------------------------
 .../gemstone/gemfire/cache/query/internal/index/IndexManager.java | 3 ++-
 .../cache/query/internal/index/RangeIndexAPIJUnitTest.java        | 2 ++
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f8935b3b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/index/IndexManager.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/index/IndexManager.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/index/IndexManager.java
index 0a9b166..2c597a8 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/index/IndexManager.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/index/IndexManager.java
@@ -134,6 +134,7 @@ public class IndexManager  {
   public static final int INDEX_ELEMARRAY_THRESHOLD = Integer.parseInt(System.getProperty(INDEX_ELEMARRAY_THRESHOLD_PROP,"100"));
   public static final int INDEX_ELEMARRAY_SIZE = Integer.parseInt(System.getProperty(INDEX_ELEMARRAY_SIZE_PROP,"5"));
   public final static AtomicLong SAFE_QUERY_TIME = new AtomicLong(0);
+  public static boolean ENABLE_UPDATE_IN_PROGRESS_INDEX_CALCULATION = true;
   /** The NULL constant */
   public static final Object NULL = new NullToken();
 
@@ -205,7 +206,7 @@ public class IndexManager  {
    * @param lastModifiedTime
    */
   public static boolean needsRecalculation(long queryStartTime, long lastModifiedTime) {
-    return queryStartTime <= SAFE_QUERY_TIME.get() - queryStartTime + lastModifiedTime;
+    return ENABLE_UPDATE_IN_PROGRESS_INDEX_CALCULATION && queryStartTime <= SAFE_QUERY_TIME.get() - queryStartTime + lastModifiedTime;
   }
   
   /**

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f8935b3b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/RangeIndexAPIJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/RangeIndexAPIJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/RangeIndexAPIJUnitTest.java
index b9f6ff8..430728c 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/RangeIndexAPIJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/RangeIndexAPIJUnitTest.java
@@ -56,6 +56,7 @@ public class RangeIndexAPIJUnitTest {
   @Before
   public void setUp() throws java.lang.Exception {
     CacheUtils.startCache();
+    IndexManager.ENABLE_UPDATE_IN_PROGRESS_INDEX_CALCULATION = false;
     region = CacheUtils.createRegion("portfolios", Portfolio.class);
     for (int i = 0; i < 12; i++) {
       //CacheUtils.log(new Portfolio(i));
@@ -71,6 +72,7 @@ public class RangeIndexAPIJUnitTest {
 
   @After
   public void tearDown() throws java.lang.Exception {
+    IndexManager.ENABLE_UPDATE_IN_PROGRESS_INDEX_CALCULATION = true;
     CacheUtils.closeCache();
   }
 


[40/50] [abbrv] incubator-geode git commit: fixed GEODE-412.

Posted by ds...@apache.org.
fixed GEODE-412.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/791a4184
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/791a4184
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/791a4184

Branch: refs/heads/develop
Commit: 791a4184bc1b97d02a1412e618fdb11c1e22dabf
Parents: ded1454
Author: Hitesh Khamesra <hk...@pivotal.io>
Authored: Fri Oct 23 09:43:06 2015 -0700
Committer: Hitesh Khamesra <hk...@pivotal.io>
Committed: Fri Oct 23 10:09:45 2015 -0700

----------------------------------------------------------------------
 .../OnGroupsFunctionExecutionDUnitTest.java     | 30 ++++++++++++++------
 1 file changed, 22 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/791a4184/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/OnGroupsFunctionExecutionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/OnGroupsFunctionExecutionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/OnGroupsFunctionExecutionDUnitTest.java
index 9ea26f3..b2e0219 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/OnGroupsFunctionExecutionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/OnGroupsFunctionExecutionDUnitTest.java
@@ -77,7 +77,9 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
     public void execute(FunctionContext context) {
       getLogWriter().fine("SWAP:1:executing OnGroupsFunction:"+invocationCount);
       InternalDistributedSystem ds = InternalDistributedSystem.getConnectedInstance();
-      invocationCount++;
+      synchronized (OnGroupsFunction.class) {
+    	  invocationCount++;
+      }
       ArrayList<String> l = (ArrayList<String>) context.getArguments();
       if (l != null) {
         assertFalse(Collections.disjoint(l, ds.getDistributedMember().getGroups()));
@@ -136,9 +138,12 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
       @Override
       public Object call() throws Exception {
         OnGroupsFunction f = (OnGroupsFunction) FunctionService.getFunction(OnGroupsFunction.Id);
-        assertEquals(count, f.invocationCount);
+        
         // assert succeeded, reset count
-        f.invocationCount = 0;
+        synchronized (OnGroupsFunction.class) {
+        	assertEquals(count, f.invocationCount);
+        	f.invocationCount = 0;
+        }
         return null;
       }
     });
@@ -149,8 +154,11 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
       @Override
       public Object call() throws Exception {
         OnGroupsFunction f = (OnGroupsFunction) FunctionService.getFunction(OnGroupsFunction.Id);
-        int count = f.invocationCount;
-        f.invocationCount = 0;
+        int count  = 0 ;
+        synchronized (OnGroupsFunction.class) {
+        	count = f.invocationCount;
+        	f.invocationCount = 0;
+        }
         return count;
       }
     });
@@ -161,8 +169,12 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
       @Override
       public Object call() throws Exception {
         OnGroupsFunction f = (OnGroupsFunction) FunctionService.getFunction(OnGroupsFunction.Id);
-        int count = f.invocationCount;
-        f.invocationCount = 0;
+        int count = 0;
+        synchronized (OnGroupsFunction.class) {
+        	count = f.invocationCount;
+            f.invocationCount = 0;
+		}
+        
         return count;
       }
     });
@@ -173,7 +185,9 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
       @Override
       public Object call() throws Exception {
         OnGroupsFunction f = (OnGroupsFunction) FunctionService.getFunction(OnGroupsFunction.Id);
-        f.invocationCount = 0;
+        synchronized (OnGroupsFunction.class) {
+        	f.invocationCount = 0;
+		}
         return null;
       }
     });


[21/50] [abbrv] incubator-geode git commit: GEODE-429: Remove HdfsStore Junit and Dunits

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java
deleted file mode 100644
index e6a1229..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HdfsSortedOplogOrganizerJUnitTest.java
+++ /dev/null
@@ -1,1045 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.TreeSet;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.regex.Matcher;
-
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.SortedHoplogPersistedEvent;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer.HoplogComparator;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogOrganizer.Compactor;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.TieredCompactionJUnitTest.TestHoplog;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.HoplogUtil;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.TrackedReference;
-import com.gemstone.gemfire.internal.util.BlobHelper;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-import dunit.DistributedTestCase;
-import dunit.DistributedTestCase.ExpectedException;
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HdfsSortedOplogOrganizerJUnitTest extends BaseHoplogTestCase {
-  /**
-   * Tests flush operation
-   */
-  public void testFlush() throws Exception {
-    int count = 10;
-    int bucketId = (int) System.nanoTime();
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
-
-    // flush and create hoplog
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    for (int i = 0; i < count; i++) {
-      items.add(new TestEvent(("key-" + i), ("value-" + System.nanoTime())));
-    }
-    organizer.flush(items.iterator(), count);
-
-    // check file existence in bucket directory
-    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/" + bucketId, 
-                      HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-
-    // only one hoplog should exists
-    assertEquals(1, hoplogs.length);
-    
-    assertEquals(count, organizer.sizeEstimate());
-    assertEquals(0, stats.getActiveReaderCount());
-  }
-
-  /**
-   * Tests reads from a set of hoplogs containing both valid and stale KVs
-   */
-  public void testReopen() throws Exception {
-    int bucketId = (int) System.nanoTime();
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
-    
-    // flush and create hoplog
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    for (int i = 0; i < 100; i++) {
-      items.add(new TestEvent("" + i, ("1-1")));
-    }
-    organizer.flush(items.iterator(), items.size());
-    
-    Hoplog hoplog = organizer.getSortedOplogs().iterator().next().get();
-    byte[] keyBytes1 = BlobHelper.serializeToBlob("1");
-    hoplog.close();
-    
-    for (int i = 0; i < 10; i++) {
-      Path path = new Path(testDataDir, getName() + "/" + bucketId + "/" + hoplog.getFileName());
-      HFileSortedOplog oplog = new HFileSortedOplog(hdfsStore, path, blockCache, stats, storeStats);
-      oplog.getReader().read(keyBytes1);
-      oplog.close(false);
-    }
-  }
-  
-  /**
-   * Tests reads from a set of hoplogs containing both valid and stale KVs
-   */
-  public void testRead() throws Exception {
-    doRead(regionManager);
-  }
-  
-//  public void testNewReaderWithNameNodeHA() throws Exception {
-//    deleteMiniClusterDir();
-//    int nn1port = AvailablePortHelper.getRandomAvailableTCPPort();
-//    int nn2port = AvailablePortHelper.getRandomAvailableTCPPort();
-//    
-//    MiniDFSCluster cluster = initMiniHACluster(nn1port, nn2port);
-//    initClientHAConf(nn1port, nn2port);
-//    
-//    HDFSStoreImpl store1 = (HDFSStoreImpl) hsf.create("Store-1");
-//    regionfactory.setHDFSStoreName(store1.getName());
-//    Region<Object, Object> region1 = regionfactory.create("region-1");
-//    HdfsRegionManager regionManager1 = ((LocalRegion)region1).getHdfsRegionManager();
-//    
-//    HoplogOrganizer<SortedHoplogPersistedEvent> organizer = doRead(regionManager1);
-//    organizer.close();
-//    
-//    dunit.DistributedTestCase.ExpectedException ex = DistributedTestCase.addExpectedException("java.io.EOFException");
-//    NameNode nnode2 = cluster.getNameNode(1);
-//    assertTrue(nnode2.isStandbyState());
-//    cluster.shutdownNameNode(0);
-//    cluster.transitionToActive(1);
-//    assertFalse(nnode2.isStandbyState());
-//    
-//    organizer = new HdfsSortedOplogOrganizer(regionManager1, 0);
-//    byte[] keyBytes1 = BlobHelper.serializeToBlob("1");
-//    byte[] keyBytes3 = BlobHelper.serializeToBlob("3");
-//    byte[] keyBytes4 = BlobHelper.serializeToBlob("4");
-//    assertEquals("2-1", organizer.read(keyBytes1).getValue());
-//    assertEquals("3-3", organizer.read(keyBytes3).getValue());
-//    assertEquals("1-4", organizer.read(keyBytes4).getValue());
-//    ex.remove();
-//
-//    region1.destroyRegion();
-//    store1.destroy();
-//    cluster.shutdown();
-//    FileUtils.deleteDirectory(new File("hdfs-test-cluster"));
-//  }
-  
-//  public void testActiveReaderWithNameNodeHA() throws Exception {
-//    deleteMiniClusterDir();
-//    int nn1port = AvailablePortHelper.getRandomAvailableTCPPort();
-//    int nn2port = AvailablePortHelper.getRandomAvailableTCPPort();
-//    
-//    MiniDFSCluster cluster = initMiniHACluster(nn1port, nn2port);
-//    initClientHAConf(nn1port, nn2port);
-//    
-//    HDFSStoreImpl store1 = (HDFSStoreImpl) hsf.create("Store-1");
-//    regionfactory.setHDFSStoreName(store1.getName());
-//    Region<Object, Object> region1 = regionfactory.create("region-1");
-//    HdfsRegionManager regionManager1 = ((LocalRegion)region1).getHdfsRegionManager();
-//    
-//    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager1, 0);
-//    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-//    for (int i = 100000; i < 101000; i++) {
-//      items.add(new TestEvent(("" + i), (i + " some string " + i)));
-//    }
-//    organizer.flush(items.iterator(), items.size());
-//    organizer.getSortedOplogs().get(0).get().getReader();
-//    
-//    dunit.DistributedTestCase.ExpectedException ex = DistributedTestCase.addExpectedException("java.io.EOFException");
-//    NameNode nnode2 = cluster.getNameNode(1);
-//    assertTrue(nnode2.isStandbyState());
-//    cluster.shutdownNameNode(0);
-//    cluster.transitionToActive(1);
-//    assertFalse(nnode2.isStandbyState());
-//    
-//    for (int i = 100000; i < 100500; i++) {
-//      byte[] keyBytes1 = BlobHelper.serializeToBlob("" + i);
-//      assertEquals(i + " some string " + i, organizer.read(keyBytes1).getValue());
-//    }
-//    ex.remove();
-//    region1.destroyRegion();
-//    store1.destroy();
-//    cluster.shutdown();
-//    FileUtils.deleteDirectory(new File("hdfs-test-cluster"));
-//  }
-  
-//  public void testFlushWithNameNodeHA() throws Exception {
-//    deleteMiniClusterDir();
-//    int nn1port = AvailablePortHelper.getRandomAvailableTCPPort();
-//    int nn2port = AvailablePortHelper.getRandomAvailableTCPPort();
-//    
-//    MiniDFSCluster cluster = initMiniHACluster(nn1port, nn2port);
-//    
-//    initClientHAConf(nn1port, nn2port);
-//    HDFSStoreImpl store1 = (HDFSStoreImpl) hsf.create("Store-1");
-//    
-//    regionfactory.setHDFSStoreName(store1.getName());
-//    Region<Object, Object> region1 = regionfactory.create("region-1");
-//    HdfsRegionManager regionManager1 = ((LocalRegion)region1).getHdfsRegionManager();
-//    
-//    HoplogOrganizer<SortedHoplogPersistedEvent> organizer = new HdfsSortedOplogOrganizer(regionManager1, 0);
-//    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-//    items.add(new TestEvent(("1"), ("1-1")));
-//    organizer.flush(items.iterator(), items.size());
-//
-//    dunit.DistributedTestCase.ExpectedException ex = DistributedTestCase.addExpectedException("java.io.EOFException");
-//    NameNode nnode2 = cluster.getNameNode(1);
-//    assertTrue(nnode2.isStandbyState());
-//    cluster.shutdownNameNode(0);
-//    cluster.transitionToActive(1);
-//    assertFalse(nnode2.isStandbyState());
-//    
-//    items.add(new TestEvent(("4"), ("1-4")));
-//    organizer.flush(items.iterator(), items.size());
-//    byte[] keyBytes1 = BlobHelper.serializeToBlob("1");
-//    byte[] keyBytes4 = BlobHelper.serializeToBlob("4");
-//    assertEquals("1-1", organizer.read(keyBytes1).getValue());
-//    assertEquals("1-4", organizer.read(keyBytes4).getValue());
-//    ex.remove();
-//    
-//    region1.destroyRegion();
-//    store1.destroy();
-//    cluster.shutdown();
-//    FileUtils.deleteDirectory(new File("hdfs-test-cluster"));
-//  }
-
-  public HoplogOrganizer<SortedHoplogPersistedEvent> doRead(HdfsRegionManager rm) throws Exception {
-    HoplogOrganizer<SortedHoplogPersistedEvent> organizer = new HdfsSortedOplogOrganizer(rm, 0);
-
-    // flush and create hoplog
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    items.add(new TestEvent(("1"), ("1-1")));
-    items.add(new TestEvent(("4"), ("1-4")));
-    organizer.flush(items.iterator(), items.size());
-
-    items.clear();
-    items.add(new TestEvent(("1"), ("2-1")));
-    items.add(new TestEvent(("3"), ("2-3")));
-    organizer.flush(items.iterator(), items.size());
-
-    items.clear();
-    items.add(new TestEvent(("3"), ("3-3")));
-    items.add(new TestEvent(("5"), ("3-5")));
-    organizer.flush(items.iterator(), items.size());
-
-    // check file existence in bucket directory
-    FileStatus[] hoplogs = getBucketHoplogs(rm.getStore().getFileSystem(),
-        rm.getRegionFolder() + "/" + 0,
-        HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-
-    // expect 3 files are 3 flushes
-    assertEquals(3, hoplogs.length);
-    byte[] keyBytes1 = BlobHelper.serializeToBlob("1");
-    byte[] keyBytes3 = BlobHelper.serializeToBlob("3");
-    byte[] keyBytes4 = BlobHelper.serializeToBlob("4");
-    // expect key 1 from hoplog 2
-    assertEquals("2-1", organizer.read(keyBytes1).getValue());
-    // expect key 3 from hoplog 3
-    assertEquals("3-3", organizer.read(keyBytes3).getValue());
-    // expect key 4 from hoplog 1
-    assertEquals("1-4", organizer.read(keyBytes4).getValue());
-    return organizer;
-  }
-
-  /**
-   * Tests bucket organizer initialization during startup. Existing hoplogs should identified and
-   * returned
-   */
-  public void testHoplogIdentification() throws Exception {
-    // create one empty file and one directories in bucket directory
-    Path bucketPath = new Path(testDataDir, getName() + "/0");
-    FileSystem fs = hdfsStore.getFileSystem();
-    fs.createNewFile(new Path(bucketPath, "temp_file"));
-    fs.mkdirs(new Path(bucketPath, "temp_dir"));
-
-    // create 2 hoplogs files each of type flush, minor and major hoplog
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-    String[] extensions = { HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION,
-        HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION,
-        HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION,
-        HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION,
-        HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION,
-        HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION};
-    for (String string : extensions) {
-      Hoplog oplog = organizer.getTmpSortedOplog(null, string);
-      createHoplog(0, oplog);
-      organizer.makeLegitimate(oplog);
-    }
-
-    // create a temp hoplog
-    Hoplog oplog = organizer.getTmpSortedOplog(null, HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION);
-    createHoplog(0, oplog);
-
-    // bucket directory should have 6 hoplogs, 1 temp log, 1 misc file and 1 directory
-    FileStatus[] results = fs.listStatus(bucketPath);
-    assertEquals(9, results.length);
-
-    // only two are hoplogs
-    List<Hoplog> list = organizer.identifyAndLoadSortedOplogs(true);
-    assertEquals(6, list.size());
-  }
-
-  public void testExpiryMarkerIdentification() throws Exception {
-    // epxired hoplogs from the list below should be deleted
-    String[] files = {
-        "0-1-1231" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
-        "0-2-1232" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION,
-        "0-3-1233" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
-        "0-4-1234" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
-        "0-5-1235" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION };
-    
-    Path bucketPath = new Path(testDataDir, getName() + "/0");
-    FileSystem fs = hdfsStore.getFileSystem();
-    for (String file : files) {
-      Hoplog oplog = new HFileSortedOplog(hdfsStore, new Path(bucketPath, file),
-          blockCache, stats, storeStats);
-      createHoplog(10, oplog);
-    }
-
-    String marker1 = "0-4-1234"
-        + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION
-        + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION;
-    fs.createNewFile(new Path(bucketPath, marker1));
-    String marker2 = "0-5-1235"
-        + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION
-        + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION;
-    fs.createNewFile(new Path(bucketPath, marker2));    
-    
-    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", "");
-    assertEquals(7, hoplogs.length);
-    
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(
-        regionManager, 0);
-    
-    FileStatus[] markers = organizer.getExpiryMarkers();
-    // one hoplog and one exp marker will be deletion targets
-    assertEquals(2, markers.length);
-    for (FileStatus marker : markers) {
-      String name = marker.getPath().getName();
-      assertTrue(name.equals(marker1) || name.equals(marker2));
-    }
-    organizer.close();
-  }
-  
-  public void testExpiredHoplogCleanup() throws Exception {
-    // epxired hoplogs from the list below should be deleted
-    String[] files = {
-        "0-1-0000" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
-        "0-1-1111" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
-        "0-1-1111" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION
-        + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION,
-        
-        "0-2-0000" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION,
-        "0-2-2222" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION,
-        
-        "0-3-0000" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
-        "0-3-3333" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
-        "0-3-3333" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION
-            + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION,
-        
-        "0-4-4444" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION };
-    
-    Path bucketPath = new Path(testDataDir, getName() + "/0");
-    FileSystem fs = hdfsStore.getFileSystem();
-    for (String file : files) {
-      if (file.endsWith(AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION)) {
-        fs.createNewFile(new Path(bucketPath, file));
-        continue;
-      }
-      Hoplog oplog = new HFileSortedOplog(hdfsStore, new Path(bucketPath, file),
-          blockCache, stats, storeStats);
-      createHoplog(10, oplog);
-    }
-    
-    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", "");
-    assertEquals(9, hoplogs.length);
-
-    long target = System.currentTimeMillis();
-    TimeUnit.SECONDS.sleep(1);
-    
-    // all but minor compacted files from below this will not be deleted as it
-    // is after target delete time
-    files = new String[] { 
-        "0-4-4444" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION
-            + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION,
-            
-        "0-5-5555" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION
-            + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION,
-        "0-5-5555" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
-        
-        "0-6-6666" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION
-    };
-    for (String file : files) {
-      if (file.endsWith(AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION)) {
-        fs.createNewFile(new Path(bucketPath, file));
-        continue;
-      }
-      Hoplog oplog = new HFileSortedOplog(hdfsStore, new Path(bucketPath, file),
-          blockCache, stats, storeStats);
-      createHoplog(10, oplog);
-    }
-    
-    hoplogs = getBucketHoplogs(getName() + "/0", "");
-    assertEquals(13, hoplogs.length);
-    int hopSize = 0;
-    for (FileStatus file : hoplogs) {
-      if(file.getLen() > hopSize) {
-        hopSize = (int) file.getLen();
-      }
-    }
-
-    final AtomicInteger behavior = new AtomicInteger(0);
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0) {
-      @Override
-      protected FileStatus[] getExpiryMarkers() throws IOException {
-        if (behavior.get() == 1) {
-          ArrayList<FileStatus> markers = new ArrayList<FileStatus>();
-          for (FileStatus marker : super.getExpiryMarkers()) {
-            markers.add(marker);
-          }
-          // inject a dummy old expiry marker for major compacted file
-          long age = 2 * HDFSStore.DEFAULT_MAJOR_COMPACTION_INTERVAL_MINS * 60 * 1000;
-          String markerName = "0-2-2222" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION + EXPIRED_HOPLOG_EXTENSION;
-          FileStatus marker = new FileStatus(0, false, 1, 1024, System.currentTimeMillis() - age, new Path(bucketPath, markerName));
-          markers.add(marker);
-          return markers.toArray(new FileStatus[markers.size()]);
-        }
-        return super.getExpiryMarkers();
-      }
-    };
-
-    List<FileStatus> list = organizer.getOptimizationTargets(target);
-    assertEquals(6, list.size());
-
-    behavior.set(1);
-    list = organizer.getOptimizationTargets(target);
-    assertEquals(8, list.size());
-    
-    assertEquals(9 * hopSize, stats.getStoreUsageBytes());
-    int count = organizer.deleteExpiredFiles(list);
-    assertEquals(8, count);
-    assertEquals(5 * hopSize, stats.getStoreUsageBytes());
-    
-    List<FileStatus> tmp = new ArrayList<FileStatus>(Arrays.asList(hoplogs));
-    for (Iterator<FileStatus> iter = tmp.iterator(); iter.hasNext();) {
-      hoplogs = getBucketHoplogs(getName() + "/0", "");
-      FileStatus file = iter.next();
-      for (FileStatus hoplog : hoplogs) {
-        if(hoplog.getPath().getName().startsWith("0-5-5555")) {
-          fail("this file should have been deleted" + hoplog.getPath().getName());
-        }
-
-        if (hoplog.getPath().getName().equals(file.getPath().getName())) {
-          iter.remove();
-          break;
-        }
-      }
-    }
-
-    assertEquals(7, tmp.size());
-    organizer.close();
-  }
-  
-  public void testAlterPurgeInterval() throws Exception {
-    // epxired hoplogs from the list below should be deleted
-    String[] files = {
-        "0-1-0000" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
-        "0-1-1111" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
-        "0-2-2222" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
-        "0-4-4444" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION };
-    
-    Path bucketPath = new Path(testDataDir, getName() + "/0");
-    hdfsStore.getFileSystem();
-    for (String file : files) {
-      Hoplog oplog = new HFileSortedOplog(hdfsStore, new Path(bucketPath, file),
-          blockCache, stats, storeStats);
-      createHoplog(10, oplog);
-    }
-    
-    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", "");
-    int hopSize = 0;
-    for (FileStatus file : hoplogs) {
-      if(file.getLen() > hopSize) {
-        hopSize = (int) file.getLen();
-      }
-    }
-
-    final AtomicInteger behavior = new AtomicInteger(0);
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0) {
-      @Override
-      protected FileStatus[] getExpiryMarkers() throws IOException {
-        if (behavior.get() == 1) {
-          ArrayList<FileStatus> markers = new ArrayList<FileStatus>();
-          // inject dummy old expiry markers
-          long age = 120 * 1000; // 120 seconds old
-          String markerName = "0-2-2222" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION + EXPIRED_HOPLOG_EXTENSION;
-          FileStatus marker = new FileStatus(0, false, 1, 1024, System.currentTimeMillis() - age, new Path(bucketPath, markerName));
-          markers.add(marker);
-          markerName = "0-4-4444" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION + EXPIRED_HOPLOG_EXTENSION;
-          marker = new FileStatus(0, false, 1, 1024, System.currentTimeMillis() - age, new Path(bucketPath, markerName));
-          markers.add(marker);
-          return markers.toArray(new FileStatus[markers.size()]);
-        }
-        return super.getExpiryMarkers();
-      }
-    };
-
-    behavior.set(1);
-    int count = organizer.initiateCleanup();
-    assertEquals(0, count);
-    
-    HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
-    mutator.setPurgeInterval(1);
-    hdfsStore.alter(mutator);
-    count = organizer.initiateCleanup();
-    assertEquals(4, count);
-  }
-  
-  public void testInUseExpiredHoplogCleanup() throws Exception {
-    Path bucketPath = new Path(testDataDir, getName() + "/0");
-    FileSystem fs = hdfsStore.getFileSystem();
-    
-    String[] files = new String[] {
-        "0-1-1231" + AbstractHoplogOrganizer.FLUSH_HOPLOG_EXTENSION,
-        "0-2-1232" + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION,
-        "0-3-1233" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
-        "0-4-1234" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION,
-        "0-5-1235" + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION };
-    
-    for (String file : files) {
-      Hoplog oplog = new HFileSortedOplog(hdfsStore, new Path(bucketPath, file),
-          blockCache, stats, storeStats);
-      createHoplog(10, oplog);
-    }
-    
-    final HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(
-        regionManager, 0);
-    List<TrackedReference<Hoplog>> hopRefs = organizer.getSortedOplogs();
-    assertEquals(files.length, hopRefs.size());
-    
-    // this is expiry marker for one of the files that will be compacted below.
-    // While compaction is going on file deletion should not happen
-    files = new String[] { "0-5-1235"
-        + AbstractHoplogOrganizer.MINOR_HOPLOG_EXTENSION
-        + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION };
-    
-    for (String file : files) {
-      fs.createNewFile(new Path(bucketPath, file));
-    }
-    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/0", "");
-    assertEquals(hopRefs.size() + files.length, hoplogs.length);
-    
-    TimeUnit.MILLISECONDS.sleep(200);
-    long target = System.currentTimeMillis();
-    List<FileStatus> list = organizer.getOptimizationTargets(target);
-    assertEquals(2, list.size());
-    
-    for (TrackedReference<Hoplog> ref : hopRefs) {
-      ref.increment("test");
-    }
-
-    fs.delete(new Path(bucketPath, files[0]), false);
-    
-    TimeUnit.MILLISECONDS.sleep(50);
-    organizer.markSortedOplogForDeletion(hopRefs, false);
-    
-    list = organizer.getOptimizationTargets(target);
-    assertEquals(0, list.size());
-    organizer.close();
-  }
-  
-  /**
-   * Tests max sequence initialization when file already exists and server starts
-   */
-  public void testSeqInitialization() throws Exception {
-    // create many hoplogs files
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-    String[] extensions = { HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION,
-        HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION,
-        HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION,
-        HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION,
-        HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION};
-    for (String string : extensions) {
-      Hoplog oplog = organizer.getTmpSortedOplog(null, string);
-      createHoplog(1, oplog);
-      organizer.makeLegitimate(oplog);
-    }
-
-    // a organizer should start creating files starting at 6 as five files already existed
-    organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-    Hoplog oplog = organizer.getTmpSortedOplog(null, HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION);
-    createHoplog(1, oplog);
-    organizer.makeLegitimate(oplog);
-    assertEquals(6, HdfsSortedOplogOrganizer.getSequenceNumber(oplog));
-    organizer.close();
-  }
-
-  /**
-   * Tests temp file creation and making file legitimate
-   */
-  public void testMakeLegitimate() throws Exception {
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
-    // create empty tmp hoplog
-    Hoplog oplog = organizer.getTmpSortedOplog(null, HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-    createHoplog(0, oplog);
-
-    Path hoplogPath = new Path(testDataDir, getName() + "/0/" + oplog.getFileName());
-    FileSystem fs = hdfsStore.getFileSystem();
-    FileStatus hoplogStatus = fs.getFileStatus(hoplogPath);
-    assertNotNull(hoplogStatus);
-
-    organizer.makeLegitimate(oplog);
-
-    try {
-      hoplogStatus = fs.getFileStatus(hoplogPath);
-      assertNull(hoplogStatus);
-    } catch (FileNotFoundException e) {
-      // tmp file is renamed hence should not exist, exception expected
-    }
-
-    assertTrue(oplog.getFileName().endsWith(HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION));
-    hoplogPath = new Path(testDataDir, getName() + "/0/" + oplog.getFileName());
-    hoplogStatus = fs.getFileStatus(hoplogPath);
-    assertNotNull(hoplogStatus);
-  }
-
-  /**
-   * Tests hoplog file name comparator
-   */
-  public void testHoplogFileComparator() throws IOException {
-    String name1 = "bucket1-10-3.hop";
-    String name2 = "bucket1-1-20.hop";
-    String name3 = "bucket1-30-201.hop";
-    String name4 = "bucket1-100-201.hop";
-
-    TreeSet<TrackedReference<Hoplog>> list = new TreeSet<TrackedReference<Hoplog>>(new HoplogComparator());
-    // insert soplog is the list out of expected order
-    hdfsStore.getFileSystem();
-    list.add(new TrackedReference<Hoplog>(new HFileSortedOplog(hdfsStore, new Path(testDataDir, name2), blockCache, stats, storeStats)));
-    list.add(new TrackedReference<Hoplog>(new HFileSortedOplog(hdfsStore, new Path(testDataDir, name4), blockCache, stats, storeStats)));
-    list.add(new TrackedReference<Hoplog>(new HFileSortedOplog(hdfsStore, new Path(testDataDir, name1), blockCache, stats, storeStats)));
-    list.add(new TrackedReference<Hoplog>(new HFileSortedOplog(hdfsStore, new Path(testDataDir, name3), blockCache, stats, storeStats)));
-
-    Iterator<TrackedReference<Hoplog>> iter = list.iterator();
-    assertEquals(name4, iter.next().get().getFileName());
-    assertEquals(name3, iter.next().get().getFileName());
-    assertEquals(name2, iter.next().get().getFileName());
-    assertEquals(name1, iter.next().get().getFileName());
-  }
-  
-  /**
-   * Tests clear on a set of hoplogs.
-   */
-  public void testClear() throws Exception {
-    int bucketId = (int) System.nanoTime();
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, bucketId);
-
-    // flush and create hoplog
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    items.add(new TestEvent(("1"), ("1-1")));
-    items.add(new TestEvent(("4"), ("1-4")));
-    organizer.flush(items.iterator(), items.size());
-
-    items.clear();
-    items.add(new TestEvent(("1"), ("2-1")));
-    items.add(new TestEvent(("3"), ("2-3")));
-    organizer.flush(items.iterator(), items.size());
-
-    items.clear();
-    items.add(new TestEvent(("3"), ("3-3")));
-    items.add(new TestEvent(("5"), ("3-5")));
-    organizer.flush(items.iterator(), items.size());
-
-    // check file existence in bucket directory
-    FileStatus[] hoplogs = getBucketHoplogs(getName() + "/" + bucketId, HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-
-    // expect 3 files are 3 flushes
-    assertEquals(3, hoplogs.length);
-    
-    organizer.clear();
-    
-    // check that all files are now expired
-    hoplogs = getBucketHoplogs(getName() + "/" + bucketId, HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-    FileStatus[] exs = getBucketHoplogs(getName() + "/" + bucketId, HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
-    FileStatus[] valids = HdfsSortedOplogOrganizer.filterValidHoplogs(hoplogs, exs);
-    assertEquals(Collections.EMPTY_LIST, Arrays.asList(valids));
-    
-    assertEquals(0, stats.getActiveFileCount());
-    assertEquals(0, stats.getInactiveFileCount());
-  }
-  
-  public void testFixedIntervalMajorCompaction() throws Exception {
-    final AtomicInteger majorCReqCount = new AtomicInteger(0);
-    
-    final Compactor compactor = new AbstractCompactor() {
-      @Override
-      public boolean compact(boolean isMajor, boolean isForced) throws IOException {
-        majorCReqCount.incrementAndGet();
-        return true;
-      }
-    };
-    
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0) {
-      @Override
-      public synchronized Compactor getCompactor() {
-        return compactor;
-      }
-    };
-    
-    regionManager.addOrganizer(0, organizer);
-    
-    System.setProperty(HoplogConfig.JANITOR_INTERVAL_SECS, "1");
-    HDFSRegionDirector.resetJanitor();
-    
-    alterMajorCompaction(hdfsStore, true);
-    
-    // create hoplog in the past, 90 seconds before current time
-    organizer.hoplogCreated(getName(), 0, new TestHoplog(hdfsStore, 100, System.currentTimeMillis() - 90000));
-    TimeUnit.MILLISECONDS.sleep(50);
-    organizer.hoplogCreated(getName(), 0, new TestHoplog(hdfsStore, 100, System.currentTimeMillis() - 90000));
-    
-    List<TrackedReference<Hoplog>> hoplogs = organizer.getSortedOplogs();
-    assertEquals(2, hoplogs.size());
-    
-    for (int i = 0; i < 3; i++) {
-      TimeUnit.SECONDS.sleep(1);
-      assertEquals(0, majorCReqCount.get());
-    }
-    HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
-    mutator.setMajorCompactionInterval(1);
-    hdfsStore.alter(mutator);
-    TimeUnit.SECONDS.sleep(5);
-    assertTrue(3 < majorCReqCount.get());
-  }
-  
- 
-  public void testCorruptHfileBucketFail() throws Exception {
-    // create a corrupt file
-    FileSystem fs = hdfsStore.getFileSystem();
-    for (int i = 0; i < 113; i++) {
-      FSDataOutputStream opStream = fs.create(new Path(testDataDir.getName() + "/region-1/" + i + "/1-1-1.hop"));
-      opStream.writeBytes("Some random corrupt file");
-      opStream.close();
-    }
-      
-    // create region with store
-//    regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
-    Region<Object, Object> region1 = regionfactory.create("region-1");
-    ExpectedException ex = DistributedTestCase.addExpectedException("CorruptHFileException");
-    try {
-      region1.get("key");
-      fail("get should have failed with corrupt file error");
-    } catch (HDFSIOException e) {
-      // expected
-    } finally {
-      ex.remove();
-    }
-    
-    region1.destroyRegion();
-  }
-
-  public void testMaxOpenReaders() throws Exception {
-    System.setProperty("hoplog.bucket.max.open.files", "5");
-    HoplogOrganizer<? extends PersistedEventImpl> organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    for (int i = 0; i < 10; i++) {
-      items.clear();
-      items.add(new TestEvent("" + i, "" + i));
-      organizer.flush(items.iterator(), items.size());
-    }
-    
-    HdfsSortedOplogOrganizer bucket = (HdfsSortedOplogOrganizer) organizer;
-    List<TrackedReference<Hoplog>> hoplogs = bucket.getSortedOplogs();
-    int closedCount = 0 ;
-    for (TrackedReference<Hoplog> hoplog : hoplogs) {
-      HFileSortedOplog hfile = (HFileSortedOplog) hoplog.get();
-      if (hfile.isClosed()) { 
-        closedCount++;
-      }
-    }
-    assertEquals(10, closedCount);
-    assertEquals(10, stats.getActiveFileCount());
-    assertEquals(0, stats.getActiveReaderCount());
-    
-    byte[] keyBytes1 = BlobHelper.serializeToBlob("1");
-    organizer.read(keyBytes1).getValue();
-    
-    closedCount = 0 ;
-    for (TrackedReference<Hoplog> hoplog : hoplogs) {
-      HFileSortedOplog hfile = (HFileSortedOplog) hoplog.get();
-      if (hfile.isClosed()) { 
-        closedCount++;
-      }
-    }
-    assertEquals(5, closedCount);
-    assertEquals(10, stats.getActiveFileCount());
-    assertEquals(0, stats.getInactiveFileCount());
-    assertEquals(5, stats.getActiveReaderCount());
-    
-    organizer.getCompactor().compact(false, false);
-    assertEquals(1, stats.getActiveFileCount());
-    assertEquals(0, stats.getActiveReaderCount());
-    assertEquals(0, stats.getInactiveFileCount());
-  }
-
-  public void testConcurrentReadInactiveClose() throws Exception {
-    final HoplogOrganizer<? extends PersistedEventImpl> organizer = regionManager.create(0);
-    alterMinorCompaction(hdfsStore, true);
-
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    for (int i = 0; i < 4; i++) {
-      items.clear();
-      items.add(new TestEvent("" + i, "" + i));
-      organizer.flush(items.iterator(), items.size());
-    }
-    
-    final byte[] keyBytes1 = BlobHelper.serializeToBlob("1");
-    class ReadTask implements Runnable {
-      public void run() {
-        try {
-          organizer.read(keyBytes1);
-        } catch (IOException e) {
-          e.printStackTrace();
-        }
-      }
-    }
-    ScheduledExecutorService[] readers = new ScheduledExecutorService[10];
-    for (int i = 0; i < readers.length; i++) {
-      readers[i] = Executors.newSingleThreadScheduledExecutor();
-      readers[i].scheduleWithFixedDelay(new ReadTask(), 0, 1, TimeUnit.MILLISECONDS);
-    }
-    
-    for (int i = 0; i < 100; i++) {
-      items.clear();
-      items.add(new TestEvent("" + i, "" + i));
-      organizer.flush(items.iterator(), items.size());
-    }
-    
-    for (int i = 0; i < readers.length; i++) {
-      readers[i].shutdown();
-      readers[i].awaitTermination(1, TimeUnit.SECONDS);
-      TimeUnit.MILLISECONDS.sleep(50);
-    }
-    
-    for (int i = 0; i < 20; i++) {
-      if (stats.getActiveFileCount() < 4) {
-        break;
-      }
-      organizer.getCompactor().compact(false, false);
-    }
-
-    organizer.performMaintenance();
-    TimeUnit.SECONDS.sleep(1);
-    
-    assertTrue("" + stats.getActiveFileCount(), stats.getActiveFileCount() <= 4);
-    assertEquals(stats.getActiveReaderCount(), stats.getActiveReaderCount());
-    assertEquals(0, stats.getInactiveFileCount());
-  }
-  
-  public void testEmptyBucketCleanup() throws Exception {
-    HdfsSortedOplogOrganizer o = new HdfsSortedOplogOrganizer(regionManager, 0);
-    long target = System.currentTimeMillis();
-    o.getOptimizationTargets(target);
-    // making sure empty bucket is not causing IO errors. no assertion needed
-    // for this test case.
-  }
-  
-  public void testExpiredFilterAtStartup() throws Exception {
-    HdfsSortedOplogOrganizer bucket = new HdfsSortedOplogOrganizer(regionManager, 0);
-    
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    items.add(new TestEvent(("1"), ("1-1")));
-    items.add(new TestEvent(("4"), ("1-4")));
-    bucket.flush(items.iterator(), items.size());
-    
-    items.clear();
-    items.add(new TestEvent(("1"), ("2-1")));
-    items.add(new TestEvent(("3"), ("2-3")));
-    bucket.flush(items.iterator(), items.size());
-    
-    FileStatus[] files = getBucketHoplogs(getName() + "/" + 0,
-        HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-    assertEquals(2, files.length);
-    
-    files = getBucketHoplogs(getName() + "/" + 0,
-        HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
-    assertEquals(0, files.length);
-    
-    HdfsSortedOplogOrganizer bucket2 = new HdfsSortedOplogOrganizer(regionManager, 0);
-    List<TrackedReference<Hoplog>> hoplogs = bucket2.getSortedOplogs();
-    assertEquals(2, hoplogs.size());
-    
-    bucket.clear();
-    
-    files = getBucketHoplogs(getName() + "/" + 0,
-        HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-    assertEquals(2, files.length);
-    
-    files = getBucketHoplogs(getName() + "/" + 0,
-        HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
-    assertEquals(2, files.length);
-    
-    bucket2 = new HdfsSortedOplogOrganizer(regionManager, 0);
-    hoplogs = bucket2.getSortedOplogs();
-    assertEquals(0, hoplogs.size());
-    
-    items.clear();
-    items.add(new TestEvent(("1"), ("2-1")));
-    items.add(new TestEvent(("3"), ("2-3")));
-    bucket.flush(items.iterator(), items.size());
-    
-    bucket2 = new HdfsSortedOplogOrganizer(regionManager, 0);
-    hoplogs = bucket2.getSortedOplogs();
-    assertEquals(1, hoplogs.size());
-    bucket.close();
-    bucket2.close();
-  }
-
-  public void testExpireFilterRetartAfterClear() throws Exception {
-    HdfsSortedOplogOrganizer bucket = new HdfsSortedOplogOrganizer(regionManager, 0);
-    
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    items.add(new TestEvent(("1"), ("1-1")));
-    items.add(new TestEvent(("4"), ("1-4")));
-    bucket.flush(items.iterator(), items.size());
-
-    items.clear();
-    items.add(new TestEvent(("1"), ("2-1")));
-    items.add(new TestEvent(("3"), ("2-3")));
-    bucket.flush(items.iterator(), items.size());
-    
-    FileStatus[] files = getBucketHoplogs(getName() + "/" + 0,
-        HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-    assertEquals(2, files.length);
-    
-    files = getBucketHoplogs(getName() + "/" + 0,
-        HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
-    assertEquals(0, files.length);
-    
-    HdfsSortedOplogOrganizer bucket2 = new HdfsSortedOplogOrganizer(regionManager, 0);
-    List<TrackedReference<Hoplog>> hoplogs = bucket2.getSortedOplogs();
-    assertEquals(2, hoplogs.size());
-    
-    bucket.clear();
-    
-    files = getBucketHoplogs(getName() + "/" + 0,
-        HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-    assertEquals(2, files.length);
-    
-    files = getBucketHoplogs(getName() + "/" + 0,
-        HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
-    assertEquals(2, files.length);
-    
-    bucket2 = new HdfsSortedOplogOrganizer(regionManager, 0);
-    hoplogs = bucket2.getSortedOplogs();
-    assertEquals(0, hoplogs.size());
-    bucket.close();
-    bucket2.close();
-  }
-  
-  /**
-   * tests maintenance does not fail even if there are no hoplogs
-   */
-  public void testNoFileJanitor() throws Exception {
-    HoplogOrganizer<? extends PersistedEventImpl> organizer;
-    organizer = regionManager.create(0);
-    organizer.performMaintenance();
-  }
-  
-  public void testValidHoplogRegex() {
-    String[] valid = {"1-1-1.hop", "1-1-1.ihop", "1-1-1.chop"};
-    String[] invalid = {"1-1-1.khop", "1-1-1.hop.tmphop", "1-1-1.hop.ehop", "1-1-.hop", "-1-1.hop"};
-    
-    for (String string : valid) {
-      Matcher matcher = HdfsSortedOplogOrganizer.SORTED_HOPLOG_PATTERN.matcher(string);
-      assertTrue(matcher.matches());
-    }
-    
-    for (String string : invalid) {
-      Matcher matcher = HdfsSortedOplogOrganizer.SORTED_HOPLOG_PATTERN.matcher(string);
-      assertFalse(matcher.matches());
-    }
-  }
-  
-  public void testOneHoplogMajorCompaction() throws Exception {
-    HoplogOrganizer<? extends PersistedEventImpl> organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-    alterMajorCompaction(hdfsStore, true);
-    
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    items.add(new TestEvent(("1"), ("1-1")));
-    organizer.flush(items.iterator(),items.size());    
-    
-    
-    FileStatus[] files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.FLUSH_HOPLOG_EXTENSION);
-    assertEquals(1, files.length);    
-    
-    //Minor compaction will not perform on 1 .hop file
-    organizer.getCompactor().compact(false, false);
-    files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION);
-    assertEquals(0, files.length);
-    
-    //Major compaction will perform on 1 .hop file
-    organizer.getCompactor().compact(true, false);
-    files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION);     
-    assertEquals(1, files.length);
-    String hoplogName =files[0].getPath().getName();    
-    files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION);
-    assertEquals(0, files.length);
-    
-    organizer.getCompactor().compact(true, false);
-    files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION);
-    assertEquals(1, files.length);
-    assertEquals(hoplogName, files[0].getPath().getName());
-    
-    //Minor compaction does not convert major compacted file
-    organizer.getCompactor().compact(false, false);
-    files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MINOR_HOPLOG_EXTENSION);
-    assertEquals(0, files.length);
-    
-    files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.MAJOR_HOPLOG_EXTENSION);
-    assertEquals(1, files.length);
-    assertEquals(hoplogName, files[0].getPath().getName());
-    
-    files = getBucketHoplogs(getName() + "/0", HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION);
-    assertEquals(1, files.length);
-    assertNotSame(hoplogName + HdfsSortedOplogOrganizer.EXPIRED_HOPLOG_EXTENSION, files[0].getPath().getName() );
-  }
-
-  public void testExposeCleanupInterval() throws Exception {
-    FileSystem fs = hdfsStore.getFileSystem();
-    Path cleanUpIntervalPath = new Path(hdfsStore.getHomeDir(), HoplogConfig.CLEAN_UP_INTERVAL_FILE_NAME);
-    assertTrue(fs.exists(cleanUpIntervalPath));
-    long interval = HDFSStore.DEFAULT_OLD_FILE_CLEANUP_INTERVAL_MINS
-        *60 * 1000;
-    assertEquals(interval, HoplogUtil.readCleanUpIntervalMillis(fs,cleanUpIntervalPath));
-  }
-  
-  @Override
-  protected void setUp() throws Exception {
-    System.setProperty(HoplogConfig.JANITOR_INTERVAL_SECS, "" + HoplogConfig.JANITOR_INTERVAL_SECS_DEFAULT);
-    super.setUp();
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HfileSortedOplogJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HfileSortedOplogJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HfileSortedOplogJUnitTest.java
deleted file mode 100644
index 7420437..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/HfileSortedOplogJUnitTest.java
+++ /dev/null
@@ -1,540 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.NoSuchElementException;
-import java.util.TreeMap;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreFactoryImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.HoplogReader;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog.HoplogWriter;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogSetReader.HoplogIterator;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
-import org.junit.experimental.categories.Category;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HfileSortedOplogJUnitTest extends BaseHoplogTestCase {
-	ArrayList<Object> toBeCleaned = new ArrayList<>();
-  
-  /**
-   * Tests hoplog creation using a writer. If this test fails, all the tests wills fail as hoplog
-   * creation is the first step
-   */
-  public void testHoplogWriter() throws Exception {
-    String hoplogName = getRandomHoplogName();
-    createHoplog(hoplogName, 1);
-    FileStatus hoplogStatus = hdfsStore.getFileSystem().getFileStatus(new Path(testDataDir, hoplogName));
-    assertNotNull(hoplogStatus);
-  }
-
-  /**
-   * Tests hoplog deletion.
-   */
-  public void testDeletion() throws Exception {
-    String hoplogName = getRandomHoplogName();
-    createHoplog(hoplogName, 1);
-    HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
-
-    testHoplog.delete();
-
-    try {
-      FileStatus hoplogStatus = hdfsStore.getFileSystem().getFileStatus(new Path(testDataDir, hoplogName));
-      // hoplog should not exists. fail if it does
-      assertNull("File deletion failed", hoplogStatus);
-    } catch (FileNotFoundException e) {
-      // exception expected after deletion
-    }
-  }
-
-  /**
-   * Tests hoplog reader creation and key based gets
-   */
-  public void testHoplogReader() throws Exception {
-    String hop1 = getRandomHoplogName();
-    Map<String, String> map = createHoplog(hop1, 10);
-
-    HFileSortedOplog testHoplog1 = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hop1), blockCache, stats, storeStats);
-    HoplogReader reader = testHoplog1.getReader();
-    // verify that each entry put in the hoplog is returned by reader
-    for (Entry<String, String> entry : map.entrySet()) {
-      byte[] value = reader.read(entry.getKey().getBytes());
-      assertNotNull(value);
-    }
-  }
-
-  /**
-   * Tests full iteration on a hoplog. Ensures all inserted keys are returned and no key is missing
-   */
-  public void testIterator() throws IOException {
-    int count = 10;
-    ByteArrayComparator bac = new ByteArrayComparator();
-
-    String hoplogName = getRandomHoplogName();
-    TreeMap<String, String> sortedMap = createHoplog(hoplogName, count);
-
-    HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
-    HoplogReader reader = testHoplog.getReader();
-
-    Iterator<Entry<String, String>> mapIter = sortedMap.entrySet().iterator();
-    HoplogIterator<byte[], byte[]> iter = reader.scan();
-    for (; iter.hasNext();) {
-      byte[] key = iter.next();
-      Entry<String, String> entry = mapIter.next();
-      assertEquals(0, bac.compare(key, iter.getKey()));
-      assertEquals(0, bac.compare(key, entry.getKey().getBytes()));
-      assertEquals(0, bac.compare(iter.getValue(), entry.getValue().getBytes()));
-      count--;
-    }
-    assertEquals(0, count);
-  }
-
-  /**
-   * Tests hoplog iterator. after returning first key, has next should return false and all
-   * subsequent next calls should return null
-   */
-  public void testSingleKVIterator() throws Exception {
-    String hoplogName = getRandomHoplogName();
-    TreeMap<String, String> map = createHoplog(hoplogName, 1);
-    HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
-    HoplogReader reader = testHoplog.getReader();
-
-    HoplogIterator<byte[], byte[]> iter = reader.scan();
-    assertNull(iter.getKey());
-    assertNull(iter.getValue());
-    assertTrue(iter.hasNext());
-    assertNull(iter.getKey());
-    assertNull(iter.getValue());
-
-    Entry<String, String> entry = map.firstEntry();
-    iter.next();
-    assertNotNull(iter.getKey());
-    assertEquals(entry.getKey(), new String(iter.getKey()));
-    assertNotNull(iter.getValue());
-    assertEquals(entry.getValue(), new String(iter.getValue()));
-
-    assertFalse(iter.hasNext());
-    try {
-      iter.next();
-      fail();
-    } catch (NoSuchElementException e) {
-    }
-  }
-
-  /**
-   * Tests iteration on a hoplog with no keys, using a scanner. Scanner should not return any value
-   * and hasNext should return false everytime
-   */
-  public void testEmptyFileIterator() throws Exception {
-    String hoplogName = getRandomHoplogName();
-    createHoplog(hoplogName, 0);
-    HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
-    HoplogReader reader = testHoplog.getReader();
-    HoplogIterator<byte[], byte[]> iter = reader.scan();
-    assertNull(iter.getKey());
-    assertNull(iter.getValue());
-    assertFalse(iter.hasNext());
-    assertNull(iter.getKey());
-    assertNull(iter.getValue());
-    try {
-      iter.next();
-      fail();
-    } catch (NoSuchElementException e) {
-    }
-  }
-
-  /**
-   * Tests from exclusive iterator
-   */
-  public void testFromExclusiveIterator() throws Exception {
-    fromIterator(false);
-  }
-
-  /**
-   * Tests from inclusive iterator
-   */
-  public void testFromInclusiveIterator() throws Exception {
-    fromIterator(true);
-  }
-
-  /**
-   * Tests from condition based iteration. creates hoplog with 10 KVs. Creates a scanner starting at
-   * a middle key and verifies the count of KVs iterated on
-   */
-  public void fromIterator(boolean includeFrom) throws Exception {
-    int count = 10;
-    ByteArrayComparator bac = new ByteArrayComparator();
-
-    String hoplogName = getRandomHoplogName();
-    // sorted map contains the keys inserted in the hoplog for testing
-    TreeMap<String, String> sortedMap = createHoplog(hoplogName, count);
-
-    HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
-    HoplogReader reader = testHoplog.getReader();
-
-    int middleKey = 4;
-    // remove top keys from the sorted map as the hoplog scanner should not
-    // return those
-    Iterator<Entry<String, String>> mapIter = sortedMap.entrySet().iterator();
-    for (int i = 0; i < middleKey; i++) {
-      mapIter.next();
-      count--;
-    }
-    if (!includeFrom) {
-      mapIter.next();
-      count--;
-    }
-
-    // keys are like Key-X, for X=0 till X=9. Start iterator at fifth key,
-    // key-4. if excluding from key, start at sixth key, key-5.
-    HoplogIterator<byte[], byte[]> iter = reader.scan(("key-" + middleKey).getBytes(), includeFrom,
-        null, true);
-
-    for (; iter.hasNext();) {
-      byte[] key = iter.next();
-      Entry<String, String> entry = mapIter.next();
-      // make sure the KV returned by iterator match the inserted KV
-      assertEquals(0, bac.compare(key, iter.getKey()));
-      assertEquals(0, bac.compare(key, entry.getKey().getBytes()));
-      assertEquals(0, bac.compare(iter.getValue(), entry.getValue().getBytes()));
-      count--;
-    }
-    assertEquals(0, count);
-  }
-
-  /**
-   * Tests to exclusive iterator
-   */
-  public void testToExclusiveIterator() throws Exception {
-    toIterator(false);
-  }
-
-  /**
-   * Tests to inclusive iterator
-   */
-  public void testToInclusiveIterator() throws Exception {
-    toIterator(true);
-  }
-
-  /**
-   * Tests to condition based iteration. creates hoplog with 10 KVs. Creates a scanner ending at
-   * a middle key and verifies the count of KVs iterated on
-   */
-  public void toIterator(boolean includeTo) throws Exception {
-    int count = 10;
-    ByteArrayComparator bac = new ByteArrayComparator();
-    
-    String hoplogName = getRandomHoplogName();
-    // sorted map contains the keys inserted in the hoplog for testing
-    TreeMap<String, String> sortedMap = createHoplog(hoplogName, count);
-    Iterator<Entry<String, String>> mapIter = sortedMap.entrySet().iterator();
-    
-    HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
-    HoplogReader reader = testHoplog.getReader();
-    
-    int middleKey = 4;
-    // keys are like Key-X, for X=0 till X=9. End iterator at fifth key,
-    // key-4. if excluding to key, end at fourth key, key-3.
-    HoplogIterator<byte[], byte[]> iter = reader.scan(null, true, ("key-" + middleKey).getBytes(), includeTo);
-    
-    for (; iter.hasNext();) {
-      byte[] key = iter.next();
-      Entry<String, String> entry = mapIter.next();
-      // make sure the KV returned by iterator match the inserted KV
-      assertEquals(0, bac.compare(key, iter.getKey()));
-      assertEquals(0, bac.compare(key, entry.getKey().getBytes()));
-      assertEquals(0, bac.compare(iter.getValue(), entry.getValue().getBytes()));
-      
-      count --;
-    }
-    
-    if (includeTo) {
-      count++;
-    }
-
-    assertEquals(10, count + middleKey);
-  }
-  
-  /**
-   * Tests whether sortedoplog supports duplicate keys, required when conflation is disabled
-   */
-  public void testFromToIterator() throws IOException {
-    ByteArrayComparator bac = new ByteArrayComparator();
-    String hoplogName = getRandomHoplogName();
-    HFileSortedOplog hoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
-    
-    int count = 5;
-    HoplogWriter writer = hoplog.createWriter(5);
-    for (int i = 0; i < count; i++) {
-      String value = "value-" + (i * 2);
-      // even keys key-[0 2 4 6 8]
-      writer.append(("key-" + (i * 2)).getBytes(), value.getBytes());
-    }
-    writer.close();
-    
-    HoplogReader reader = hoplog.getReader();
-    HoplogIterator<byte[], byte[]> iter = reader.scan("key-1".getBytes(), true, "key-7".getBytes(), true);
-
-    for (int i = 2; i < 7; i += 2) {
-      assertTrue(iter.hasNext());
-      iter.next();
-      assertEquals(0, bac.compare(("key-" + i).getBytes(), iter.getKey()));
-      assertEquals(0, bac.compare(("value-" + i).getBytes(), iter.getValue()));
-      System.out.println(new String(iter.getKey()));
-    }
-    assertFalse(iter.hasNext());
-  }
-  
-  /**
-   * Tests whether sortedoplog supports duplicate keys, required when conflation is disabled
-   */
-  public void testDuplicateKeys() throws IOException {
-    String hoplogName = getRandomHoplogName();
-    HFileSortedOplog hoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
-
-    // write duplicate keys
-    int count = 2;
-    HoplogWriter writer = hoplog.createWriter(2);
-    List<String> values = new ArrayList<String>();
-    for(int i = 1; i <= count; i++) {
-      String value = "value" + i;
-      writer.append("key-1".getBytes(), value.getBytes());
-      values.add(value);
-    }
-    writer.close();
-
-    HoplogReader reader = hoplog.getReader();
-    HoplogIterator<byte[], byte[]> scanner = reader.scan();
-    for (byte[] key = null; scanner.hasNext();) {
-      key = scanner.next();
-      count--;
-      assertEquals(0, Bytes.compareTo(key, "key-1".getBytes()));
-      values.remove(new String(scanner.getValue()));
-    }
-    assertEquals(0, count);
-    assertEquals(0, values.size());
-  }
-  
-  public void testOffsetBasedScan() throws Exception {
-    // Each record is 43 bytes. each block is 256 bytes. each block will have 6
-    // records
-     
-    int blocksize = 1 << 8;
-    System.setProperty(HoplogConfig.HFILE_BLOCK_SIZE_CONF,
-        String.valueOf(blocksize));
-
-    int count = 50;
-    String hoplogName = getRandomHoplogName();
-    createHoplog(hoplogName, count);
-
-    HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(
-        testDataDir, hoplogName), blockCache, stats, storeStats);
-
-    HoplogReader reader = testHoplog.getReader();
-    
-    HoplogIterator<byte[], byte[]> scanner = reader.scan(blocksize * 1, blocksize * 2);
-    int range1Count = 0;
-    String range1EndKey = null;
-    for (byte[] key = null; scanner.hasNext();) {
-      key = scanner.next();
-      range1Count++;
-      range1EndKey = new String(key);
-    }
-    int range1EndKeyNum = Integer.valueOf(range1EndKey.substring("Key-".length()));
-
-    scanner = reader.scan(blocksize * 2, blocksize * 1);
-    int range2Count = 0;
-    String range2EndKey = null;
-    for (byte[] key = null; scanner.hasNext();) {
-      key = scanner.next();
-      range2Count++;
-      range2EndKey = new String(key);
-    }
-    
-    assertEquals(range2EndKey, range1EndKey);
-    assertEquals(2, range1Count/range2Count);
-    
-    scanner = reader.scan(blocksize * 3, blocksize * 1);
-    String range3FirstKey = new String(scanner.next());
-    
-    int range3FirstKeyNum = Integer.valueOf(range3FirstKey.substring("Key-"
-        .length()));
-    
-    // range 3 starts at the end of range 1. so the two keys must be consecutive
-    assertEquals(range1EndKeyNum + 1, range3FirstKeyNum);
-    
-    testHoplog.close();
-  }
-  
-  public void testOffsetScanBeyondFileSize() throws Exception {
-    // Each record is 43 bytes. each block is 256 bytes. each block will have 6
-    // records
-    
-    int blocksize = 1 << 8;
-    System.setProperty(HoplogConfig.HFILE_BLOCK_SIZE_CONF,
-        String.valueOf(blocksize));
-    
-    int count = 20;
-    String hoplogName = getRandomHoplogName();
-    createHoplog(hoplogName, count);
-    
-    HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(
-        testDataDir, hoplogName), blockCache, stats, storeStats);
-    
-    HoplogReader reader = testHoplog.getReader();
-    
-    HoplogIterator<byte[], byte[]> scanner = reader.scan(blocksize * 5, blocksize * 2);
-    assertFalse(scanner.hasNext());
-    
-    testHoplog.close();
-  }
-  
-  public void testZeroValueOffsetScan() throws Exception {
-    // Each record is 43 bytes. each block is 256 bytes. each block will have 6
-    // records
-    
-    int blocksize = 1 << 8;
-    System.setProperty(HoplogConfig.HFILE_BLOCK_SIZE_CONF,
-        String.valueOf(blocksize));
-    
-    int count = 20;
-    String hoplogName = getRandomHoplogName();
-    createHoplog(hoplogName, count);
-    
-    HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(
-        testDataDir, hoplogName), blockCache, stats, storeStats);
-    
-    HoplogReader reader = testHoplog.getReader();
-    
-    HoplogIterator<byte[], byte[]> scanner = reader.scan(0, blocksize * 2);
-    assertTrue(scanner.hasNext());
-    int keyNum = Integer.valueOf(new String(scanner.next()).substring("Key-"
-        .length()));
-    assertEquals(100000, keyNum);
-
-    testHoplog.close();
-  }
-  
-  /*
-   * Tests reader succeeds to read data even if FS client is recycled without
-   * this reader knowing
-   */
-  public void testReaderDetectAndUseRecycledFs() throws Exception {
-    HDFSStoreFactoryImpl storeFactory = getCloseableLocalHdfsStoreFactory();
-    HDFSStoreImpl store = (HDFSStoreImpl) storeFactory.create("Store-1");
-    toBeCleaned.add(store);
-
-    HFileSortedOplog hop = new HFileSortedOplog(store, new Path(getName() + "-1-1.hop"), blockCache, stats, storeStats);
-    toBeCleaned.add(hop);
-    TreeMap<String, String> map = createHoplog(10, hop);
-
-    HoplogReader reader = hop.getReader();
-    // verify that each entry put in the hoplog is returned by reader
-    for (Entry<String, String> entry : map.entrySet()) {
-      byte[] value = reader.read(entry.getKey().getBytes());
-      assertNotNull(value);
-    }
-
-    cache.getLogger().info("<ExpectedException action=add>java.io.IOException</ExpectedException>");
-    try {
-      store.getFileSystem().close();
-      store.checkAndClearFileSystem();
-      
-      for (Entry<String, String> entry : map.entrySet()) {
-        reader = hop.getReader();
-        byte[] value = reader.read(entry.getKey().getBytes());
-        assertNotNull(value);
-      }
-    } finally {
-      cache.getLogger().info("<ExpectedException action=remove>java.io.IOException</ExpectedException>");
-    }
-  }
-
-  public void testNewScannerDetechAndUseRecycledFs() throws Exception {
-    HDFSStoreFactoryImpl storeFactory = getCloseableLocalHdfsStoreFactory();
-    HDFSStoreImpl store = (HDFSStoreImpl) storeFactory.create("Store-1");
-    toBeCleaned.add(store);
-
-    HFileSortedOplog hop = new HFileSortedOplog(store, new Path(getName() + "-1-1.hop"), blockCache, stats, storeStats);
-    createHoplog(10, hop);
-
-    HoplogIterator<byte[], byte[]> scanner = hop.getReader().scan();
-    // verify that each entry put in the hoplog is returned by reader
-    int i = 0;
-    while (scanner.hasNext()) {
-      byte[] key = scanner.next();
-      assertNotNull(key);
-      i++;
-    }
-    assertEquals(10, i);
-    // flush block cache
-    hop.close(true);
-    hop.delete();
-    
-    hop = new HFileSortedOplog(store, new Path(getName()+"-1-1.hop"), blockCache, stats, storeStats);
-		createHoplog(10, hop);
-  	toBeCleaned.add(hop);
-    hop.getReader();
-    
-    cache.getLogger().info("<ExpectedException action=add>java.io.IOException</ExpectedException>");
-    try {
-      store.getFileSystem().close();
-      store.checkAndClearFileSystem();
-      
-      scanner = hop.getReader().scan();
-      // verify that each entry put in the hoplog is returned by reader
-      i = 0;
-      while (scanner.hasNext()) {
-        byte[] key = scanner.next();
-        assertNotNull(key);
-        i++;
-      }
-      assertEquals(10, i);
-    } finally {
-      cache.getLogger().info("<ExpectedException action=remove>java.io.IOException</ExpectedException>");
-    }
-  }
-  
-  @Override
-  protected void tearDown() throws Exception {
-    for (Object obj : toBeCleaned) {
-      try {
-        if (HDFSStoreImpl.class.isInstance(obj)) {
-          ((HDFSStoreImpl) obj).clearFolder();
-        } else if (AbstractHoplog.class.isInstance(obj)) {
-          ((AbstractHoplog) obj).close();
-          ((AbstractHoplog) obj).delete();
-        }
-      } catch (Exception e) {
-        System.out.println(e);
-      }
-    }
-    super.tearDown();
-  }
-    
-  private TreeMap<String, String> createHoplog(String hoplogName, int numKeys) throws IOException {
-    HFileSortedOplog hoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
-    TreeMap<String, String> map = createHoplog(numKeys, hoplog);
-    return map;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/74c3156a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/SortedOplogListIterJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/SortedOplogListIterJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/SortedOplogListIterJUnitTest.java
deleted file mode 100644
index 13aa6a9..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/SortedOplogListIterJUnitTest.java
+++ /dev/null
@@ -1,178 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal.hoplog;
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.hdfs.internal.PersistedEventImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.SortedHoplogPersistedEvent;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.TrackedReference;
-import com.gemstone.gemfire.internal.util.BlobHelper;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class SortedOplogListIterJUnitTest extends BaseHoplogTestCase {
-  public void testOneIterOneKey() throws Exception {
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    items.add(new TestEvent(("0"), ("0")));
-    organizer.flush(items.iterator(), items.size());
-
-    List<TrackedReference<Hoplog>> oplogs = organizer.getSortedOplogs();
-    HoplogSetIterator iter = new HoplogSetIterator(oplogs);
-    assertTrue(iter.hasNext());
-    int count = 0;
-    for (ByteBuffer keyBB = null; iter.hasNext();) {
-      keyBB = iter.next();
-      byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
-      assertEquals(String.valueOf(count), BlobHelper.deserializeBlob(key));
-      count++;
-    }
-    assertEquals(1, count);
-    organizer.close();
-  }
-  
-  public void testOneIterDuplicateKey() throws Exception {
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-    
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    items.add(new TestEvent(("0"), ("V2")));
-    items.add(new TestEvent(("0"), ("V1")));
-    items.add(new TestEvent(("1"), ("V2")));
-    items.add(new TestEvent(("1"), ("V1")));
-    organizer.flush(items.iterator(), items.size());
-    
-    List<TrackedReference<Hoplog>> oplogs = organizer.getSortedOplogs();
-    HoplogSetIterator iter = new HoplogSetIterator(oplogs);
-    assertTrue(iter.hasNext());
-    int count = 0;
-    for (ByteBuffer keyBB = null; iter.hasNext();) {
-      keyBB = iter.next();
-      byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
-      byte[] value = HFileSortedOplog.byteBufferToArray(iter.getValue());
-      assertEquals(String.valueOf(count), BlobHelper.deserializeBlob(key));
-      assertEquals("V2", ((PersistedEventImpl) SortedHoplogPersistedEvent.fromBytes(value)).getValue());
-      count++;
-    }
-    assertEquals(2, count);
-    organizer.close();
-  }
-  
-  public void testTwoIterSameKey() throws Exception {
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-    
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    items.add(new TestEvent(("0"), ("V1")));
-    organizer.flush(items.iterator(), items.size());
-    items.clear();
-    items.add(new TestEvent(("0"), ("V2")));
-    organizer.flush(items.iterator(), items.size());
-    
-    List<TrackedReference<Hoplog>> oplogs = organizer.getSortedOplogs();
-    HoplogSetIterator iter = new HoplogSetIterator(oplogs);
-    assertTrue(iter.hasNext());
-    int count = 0;
-    for (ByteBuffer keyBB = null; iter.hasNext();) {
-      keyBB = iter.next();
-      byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
-      byte[] value = HFileSortedOplog.byteBufferToArray(iter.getValue());
-      assertEquals(String.valueOf(count), BlobHelper.deserializeBlob(key));
-      assertEquals("V2", ((PersistedEventImpl) SortedHoplogPersistedEvent.fromBytes(value)).getValue());
-      count++;
-    }
-    assertEquals(1, count);
-    organizer.close();
-  }
-  
-  public void testTwoIterDiffKey() throws Exception {
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-    
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    items.add(new TestEvent(("0"), ("V1")));
-    organizer.flush(items.iterator(), items.size());
-    items.clear();
-    items.add(new TestEvent(("1"), ("V1")));
-    organizer.flush(items.iterator(), items.size());
-    
-    List<TrackedReference<Hoplog>> oplogs = organizer.getSortedOplogs();
-    HoplogSetIterator iter = new HoplogSetIterator(oplogs);
-    assertTrue(iter.hasNext());
-    int count = 0;
-    for (ByteBuffer keyBB = null; iter.hasNext();) {
-      keyBB = iter.next();
-      byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
-      byte[] value = HFileSortedOplog.byteBufferToArray(iter.getValue());
-      assertEquals(String.valueOf(count), BlobHelper.deserializeBlob(key));
-      assertEquals("V1", ((PersistedEventImpl) SortedHoplogPersistedEvent.fromBytes(value)).getValue());
-      count++;
-    }
-    assertEquals(2, count);
-    organizer.close();
-  }
-  
-  public void testMergedIterator() throws Exception {
-    HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);
-
-    // #1
-    ArrayList<TestEvent> items = new ArrayList<TestEvent>();
-    items.add(new TestEvent(("1"), ("1")));
-    items.add(new TestEvent(("2"), ("1")));
-    items.add(new TestEvent(("3"), ("1")));
-    items.add(new TestEvent(("4"), ("1")));
-    organizer.flush(items.iterator(), items.size());
-
-    // #2
-    items.clear();
-    items.add(new TestEvent(("2"), ("1")));
-    items.add(new TestEvent(("4"), ("1")));
-    items.add(new TestEvent(("6"), ("1")));
-    items.add(new TestEvent(("8"), ("1")));
-    organizer.flush(items.iterator(), items.size());
-
-    // #3
-    items.clear();
-    items.add(new TestEvent(("1"), ("1")));
-    items.add(new TestEvent(("3"), ("1")));
-    items.add(new TestEvent(("5"), ("1")));
-    items.add(new TestEvent(("7"), ("1")));
-    items.add(new TestEvent(("9"), ("1")));
-    organizer.flush(items.iterator(), items.size());
-
-    // #4
-    items.clear();
-    items.add(new TestEvent(("0"), ("1")));
-    items.add(new TestEvent(("1"), ("1")));
-    items.add(new TestEvent(("4"), ("1")));
-    items.add(new TestEvent(("5"), ("1")));
-    organizer.flush(items.iterator(), items.size());
-
-    List<TrackedReference<Hoplog>> oplogs = organizer.getSortedOplogs();
-    HoplogSetIterator iter = new HoplogSetIterator(oplogs);
-    // the iteration pattern for this test should be 0-9:
-    // 0 1 4 5 oplog #4
-    // 1 3 5 7 9 oplog #3
-    // 2 4 6 8 oplog #2
-    // 1 2 3 4 oplog #1
-    int count = 0;
-    for (ByteBuffer keyBB = null; iter.hasNext();) {
-      keyBB = iter.next();
-      byte[] key = HFileSortedOplog.byteBufferToArray(keyBB);
-      assertEquals(String.valueOf(count), BlobHelper.deserializeBlob(key));
-      count++;
-    }
-    assertEquals(10, count);
-    organizer.close();
-  }
-}


[12/50] [abbrv] incubator-geode git commit: GEODE-429: Remove hdfsStore gfsh commands

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/HDFSStoreCommandsController.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/HDFSStoreCommandsController.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/HDFSStoreCommandsController.java
deleted file mode 100644
index c182edd..0000000
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/HDFSStoreCommandsController.java
+++ /dev/null
@@ -1,229 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.management.internal.web.controllers;
-
-import org.springframework.shell.core.annotation.CliOption;
-import org.springframework.stereotype.Controller;
-import org.springframework.web.bind.annotation.PathVariable;
-import org.springframework.web.bind.annotation.RequestMapping;
-import org.springframework.web.bind.annotation.RequestMethod;
-import org.springframework.web.bind.annotation.RequestParam;
-import org.springframework.web.bind.annotation.ResponseBody;
-
-import com.gemstone.gemfire.internal.lang.StringUtils;
-import com.gemstone.gemfire.management.cli.CliMetaData;
-import com.gemstone.gemfire.management.cli.ConverterHint;
-import com.gemstone.gemfire.management.cli.Result;
-import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
-import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
-
-/**
- * The HDFSStoreCommandsController class implements GemFire Management REST API web service endpoints for the
- * Gfsh Hdfs Store Commands.
- * <p/>
- * @author Namrata Thanvi
- * @see com.gemstone.gemfire.management.internal.cli.commands.HDFSStoreCommands
- * @see com.gemstone.gemfire.management.internal.web.controllers.AbstractCommandsController
- * @see org.springframework.stereotype.Controller
- * @see org.springframework.web.bind.annotation.PathVariable
- * @see org.springframework.web.bind.annotation.RequestMapping
- * @see org.springframework.web.bind.annotation.RequestMethod
- * @see org.springframework.web.bind.annotation.RequestParam
- * @see org.springframework.web.bind.annotation.ResponseBody
- * @since 9.0
- */
-@Controller("hdfsStoreController")
-@RequestMapping(AbstractCommandsController.REST_API_VERSION)
-@SuppressWarnings("unused")
-public class HDFSStoreCommandsController extends AbstractCommandsController {
-  @RequestMapping(method = RequestMethod.GET, value = "/hdfsstores")
-  @ResponseBody
-  public String listHDFSStores() {
-    String my= processCommand(CliStrings.LIST_HDFS_STORE);
-    return my;
-  }
-  
-  @RequestMapping(method = RequestMethod.POST, value = "/hdfsstores")
-  @ResponseBody
-  public String createHdfsStore(
-		  @RequestParam(CliStrings.CREATE_HDFS_STORE__NAME) final String storeName,		  
-		  @RequestParam(value = CliStrings.CREATE_HDFS_STORE__NAMENODE, required=false) final String  namenode,
-		  @RequestParam(value = CliStrings.CREATE_HDFS_STORE__HOMEDIR, required=false) final String  homedir,
-		  @RequestParam(value = CliStrings.CREATE_HDFS_STORE__BATCHSIZE,required=false) final Integer batchSize,                    
-		  @RequestParam(value = CliStrings.CREATE_HDFS_STORE__BATCHINTERVAL, required=false) final Integer batchInterval,          
-          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__READCACHESIZE, required=false) final Float readCachesize,
-          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__DISPATCHERTHREADS, required=false) final Integer dispatcherThreads,
-          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__MAXMEMORY, required=false) final Integer maxMemory,
-          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__BUFFERPERSISTENT, required=false) final Boolean persistence,
-          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__SYNCDISKWRITE, required=false) final Boolean  synchronousDiskWrite,                    
-          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__DISKSTORENAME, required=false) final String diskStoreName,
-          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__MINORCOMPACT, required=false) final Boolean minorCompaction,
-          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__MINORCOMPACTIONTHREADS, required=false) final Integer minorCompactionThreads,
-          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACT, required=false) final Boolean majorCompact,
-          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTINTERVAL, required=false) final Integer majorCompactionInterval,
-          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTIONTHREADS, required=false) final Integer majorCompactionThreads,
-          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__PURGEINTERVAL, required=false) final Integer purgeInterval,
-          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__WRITEONLYFILESIZE, required=false) final Integer writeOnlyFileSize,
-          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__FILEROLLOVERINTERVAL, required=false) final Integer fileRolloverInterval,
-          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__CLIENTCONFIGFILE, required=false) final String clientConfigFile,
-          @RequestParam(value = CliStrings.CREATE_HDFS_STORE__GROUP, required = false) final String[] groups)
-  {
-		CommandStringBuilder command = new CommandStringBuilder(CliStrings.CREATE_HDFS_STORE);
-
-		command.addOption(CliStrings.CREATE_HDFS_STORE__NAME, storeName);
-		
-		if (hasValue(namenode))
-			command.addOption(CliStrings.CREATE_HDFS_STORE__NAMENODE, namenode);
-		
-		if (hasValue(homedir))
-			command.addOption(CliStrings.CREATE_HDFS_STORE__HOMEDIR, homedir);
-		
-		if (hasValue(batchSize))
-			command.addOption(CliStrings.CREATE_HDFS_STORE__BATCHSIZE, String.valueOf(batchSize));
-		
-		if (hasValue(batchInterval))
-			command.addOption(CliStrings.CREATE_HDFS_STORE__BATCHINTERVAL, String.valueOf(batchInterval));
-		
-		if (hasValue(readCachesize))
-			command.addOption(CliStrings.CREATE_HDFS_STORE__READCACHESIZE, String.valueOf(readCachesize));
-		
-		if (hasValue(dispatcherThreads))
-			command.addOption(CliStrings.CREATE_HDFS_STORE__DISPATCHERTHREADS, String.valueOf(dispatcherThreads));
-		
-		if (hasValue(maxMemory))
-			command.addOption(CliStrings.CREATE_HDFS_STORE__MAXMEMORY,String.valueOf(maxMemory));
-		
-		if (hasValue(persistence))
-			command.addOption(CliStrings.CREATE_HDFS_STORE__BUFFERPERSISTENT,String.valueOf(Boolean.TRUE.equals(persistence)));
-		
-		if (hasValue(synchronousDiskWrite))
-			command.addOption(CliStrings.CREATE_HDFS_STORE__SYNCDISKWRITE,String.valueOf(Boolean.TRUE.equals(synchronousDiskWrite)));
-		
-		if (hasValue(diskStoreName))
-			command.addOption(CliStrings.CREATE_HDFS_STORE__DISKSTORENAME,String.valueOf(diskStoreName));
-		
-		if (hasValue(minorCompaction))
-			command.addOption(CliStrings.CREATE_HDFS_STORE__MINORCOMPACT,String.valueOf(Boolean.TRUE.equals(minorCompaction)));
-		
-		if (hasValue(minorCompactionThreads))
-			command.addOption(CliStrings.CREATE_HDFS_STORE__MINORCOMPACTIONTHREADS,String.valueOf(minorCompactionThreads));
-		
-		if (hasValue(majorCompact))
-			command.addOption(CliStrings.CREATE_HDFS_STORE__MAJORCOMPACT,String.valueOf(Boolean.TRUE.equals(majorCompact)));
-		
-		if (hasValue(majorCompactionInterval))
-			command.addOption(CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTINTERVAL,String.valueOf(majorCompactionInterval));
-		
-		if (hasValue(majorCompactionThreads))
-			command.addOption(CliStrings.CREATE_HDFS_STORE__MAJORCOMPACTIONTHREADS,String.valueOf(majorCompactionThreads));
-		
-		if (hasValue(purgeInterval))
-			command.addOption(CliStrings.CREATE_HDFS_STORE__PURGEINTERVAL,String.valueOf(purgeInterval));
-		
-		if (hasValue(writeOnlyFileSize))
-			command.addOption(CliStrings.CREATE_HDFS_STORE__WRITEONLYFILESIZE,String.valueOf(writeOnlyFileSize));
-		
-		if (hasValue(fileRolloverInterval))
-			command.addOption(CliStrings.CREATE_HDFS_STORE__FILEROLLOVERINTERVAL,String.valueOf(fileRolloverInterval));
-		
-		if (hasValue(clientConfigFile))
-			command.addOption(CliStrings.CREATE_HDFS_STORE__CLIENTCONFIGFILE,String.valueOf(clientConfigFile));		
-
-		if (hasValue(groups)) {
-			command.addOption(CliStrings.CREATE_HDFS_STORE__GROUP,StringUtils.concat(groups, StringUtils.COMMA_DELIMITER));
-		}
-
-		return processCommand(command.toString());
-  }  
-  
-  @RequestMapping(method = RequestMethod.GET, value = "/hdfsstores/{name}")
-  @ResponseBody
-  public String describeHDFSStore(
-		  @PathVariable("name") final String hdfsStoreName,
-          @RequestParam(CliStrings.DESCRIBE_HDFS_STORE__MEMBER) final String memberNameId)
-  {	  
-    CommandStringBuilder command = new CommandStringBuilder(CliStrings.DESCRIBE_HDFS_STORE);
-    command.addOption(CliStrings.DESCRIBE_HDFS_STORE__NAME, decode(hdfsStoreName));
-    command.addOption(CliStrings.DESCRIBE_HDFS_STORE__MEMBER, memberNameId);    
-    return processCommand(command.toString());
-  }  
-  
-  @RequestMapping(method = RequestMethod.PUT, value = "/hdfsstores/{name}")
-  @ResponseBody
-  public String alterHdfsStore(
-		  @PathVariable("name") final String hdfsStoreName,	  								
-		  @RequestParam(value = CliStrings.ALTER_HDFS_STORE__BATCHSIZE, required=false) final Integer batchSize,                                    				                                
-		  @RequestParam(value = CliStrings.ALTER_HDFS_STORE__BATCHINTERVAL, required=false) final Integer batchInterval,
-          @RequestParam(value = CliStrings.ALTER_HDFS_STORE__MINORCOMPACT, required=false) final Boolean minorCompaction,
-          @RequestParam(value = CliStrings.ALTER_HDFS_STORE__MINORCOMPACTIONTHREADS, required=false) final Integer minorCompactionThreads,
-          @RequestParam(value = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACT, required=false) final Boolean majorCompact,
-          @RequestParam(value = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTINTERVAL, required=false) final Integer majorCompactionInterval,
-          @RequestParam(value = CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTIONTHREADS, required=false) final Integer majorCompactionThreads,
-          @RequestParam(value = CliStrings.ALTER_HDFS_STORE__PURGEINTERVAL, required=false) final Integer purgeInterval,
-          @RequestParam(value = CliStrings.ALTER_HDFS_STORE__WRITEONLYFILESIZE, required=false) final Integer writeOnlyFileSize,
-          @RequestParam(value = CliStrings.ALTER_HDFS_STORE__FILEROLLOVERINTERVAL, required=false) final Integer fileRolloverInterval,
-          @RequestParam(value = CliStrings.ALTER_HDFS_STORE__GROUP, required = false) final String[] groups)
-  {
-	  CommandStringBuilder command = new CommandStringBuilder(CliStrings.ALTER_HDFS_STORE);
-
-		command.addOption(CliStrings.ALTER_HDFS_STORE__NAME, hdfsStoreName);
-		
-		
-		if (hasValue(batchSize))
-			command.addOption(CliStrings.ALTER_HDFS_STORE__BATCHSIZE, String.valueOf(batchSize));
-		
-		if (hasValue(batchInterval))
-			command.addOption(CliStrings.ALTER_HDFS_STORE__BATCHINTERVAL, String.valueOf(batchInterval));	
-		
-		if (hasValue(minorCompaction))
-			command.addOption(CliStrings.ALTER_HDFS_STORE__MINORCOMPACT,String.valueOf(Boolean.TRUE.equals(minorCompaction)));
-		
-		if (hasValue(minorCompactionThreads))
-			command.addOption(CliStrings.ALTER_HDFS_STORE__MINORCOMPACTIONTHREADS,String.valueOf(minorCompactionThreads));
-		
-		if (hasValue(majorCompact))
-			command.addOption(CliStrings.ALTER_HDFS_STORE__MAJORCOMPACT,String.valueOf(Boolean.TRUE.equals(majorCompact)));
-		
-		if (hasValue(majorCompactionInterval))
-			command.addOption(CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTINTERVAL,String.valueOf(majorCompactionInterval));
-		
-		if (hasValue(majorCompactionThreads))
-			command.addOption(CliStrings.ALTER_HDFS_STORE__MAJORCOMPACTIONTHREADS,String.valueOf(majorCompactionThreads));
-		
-		if (hasValue(purgeInterval))
-			command.addOption(CliStrings.ALTER_HDFS_STORE__PURGEINTERVAL,String.valueOf(purgeInterval));
-		
-		if (hasValue(writeOnlyFileSize))
-			command.addOption(CliStrings.ALTER_HDFS_STORE__WRITEONLYFILESIZE,String.valueOf(writeOnlyFileSize));
-		
-		if (hasValue(fileRolloverInterval))
-			command.addOption(CliStrings.ALTER_HDFS_STORE__FILEROLLOVERINTERVAL,String.valueOf(fileRolloverInterval));
-		
-		if (hasValue(groups)) {
-			command.addOption(CliStrings.ALTER_HDFS_STORE__GROUP,StringUtils.concat(groups, StringUtils.COMMA_DELIMITER));
-		}
-
-		return processCommand(command.toString());
-  }
-  
-  @RequestMapping(method = RequestMethod.DELETE, value = "/hdfsstores/{name}")
-  @ResponseBody
-  public String destroyHDFSStore(
-		  @PathVariable("name") final String hdfsStoreName,
-          @RequestParam(value = CliStrings.DESTROY_HDFS_STORE__GROUP, required = false) final String[] groups)
-  {
-    CommandStringBuilder command = new CommandStringBuilder(CliStrings.DESTROY_HDFS_STORE);
-    command.addOption(CliStrings.DESTROY_HDFS_STORE__NAME, decode(hdfsStoreName));
-
-    if (hasValue(groups)) {
-      command.addOption(CliStrings.DESTROY_HDFS_STORE__GROUP, StringUtils.concat(groups, StringUtils.COMMA_DELIMITER));
-    }
-    return processCommand(command.toString());
-    
-  }  
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/ShellCommandsController.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/ShellCommandsController.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/ShellCommandsController.java
index ef4c49f..46ed1a6 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/ShellCommandsController.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/ShellCommandsController.java
@@ -9,21 +9,12 @@ package com.gemstone.gemfire.management.internal.web.controllers;
 
 import java.io.IOException;
 import java.util.Set;
+
 import javax.management.AttributeNotFoundException;
 import javax.management.InstanceNotFoundException;
 import javax.management.MalformedObjectNameException;
 import javax.management.ObjectName;
 
-import com.gemstone.gemfire.internal.GemFireVersion;
-import com.gemstone.gemfire.internal.lang.ObjectUtils;
-import com.gemstone.gemfire.internal.lang.StringUtils;
-import com.gemstone.gemfire.internal.util.IOUtils;
-import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
-import com.gemstone.gemfire.management.internal.web.domain.Link;
-import com.gemstone.gemfire.management.internal.web.domain.LinkIndex;
-import com.gemstone.gemfire.management.internal.web.domain.QueryParameterSource;
-import com.gemstone.gemfire.management.internal.web.http.HttpMethod;
-
 import org.springframework.http.HttpStatus;
 import org.springframework.http.MediaType;
 import org.springframework.http.ResponseEntity;
@@ -34,6 +25,16 @@ import org.springframework.web.bind.annotation.RequestMethod;
 import org.springframework.web.bind.annotation.RequestParam;
 import org.springframework.web.bind.annotation.ResponseBody;
 
+import com.gemstone.gemfire.internal.GemFireVersion;
+import com.gemstone.gemfire.internal.lang.ObjectUtils;
+import com.gemstone.gemfire.internal.lang.StringUtils;
+import com.gemstone.gemfire.internal.util.IOUtils;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.web.domain.Link;
+import com.gemstone.gemfire.management.internal.web.domain.LinkIndex;
+import com.gemstone.gemfire.management.internal.web.domain.QueryParameterSource;
+import com.gemstone.gemfire.management.internal.web.http.HttpMethod;
+
 /**
  * The ShellCommandsController class implements GemFire REST API calls for Gfsh Shell Commands.
  * 
@@ -249,12 +250,7 @@ public class ShellCommandsController extends AbstractCommandsController {
       .add(new Link(CliStrings.STATUS_GATEWAYSENDER, toUri("/gateways/senders/{id}")))
       .add(new Link(CliStrings.STOP_GATEWAYRECEIVER, toUri("/gateways/receivers?op=stop"), HttpMethod.POST))
       .add(new Link(CliStrings.STOP_GATEWAYSENDER, toUri("/gateways/senders/{id}?op=stop"), HttpMethod.POST))
-       // HDFS Store Commands
-       .add(new Link(CliStrings.LIST_HDFS_STORE, toUri("/hdfsstores"), HttpMethod.GET))
-       .add(new Link(CliStrings.DESCRIBE_HDFS_STORE, toUri("/hdfsstores/{name}"), HttpMethod.GET))
-       .add(new Link(CliStrings.CREATE_HDFS_STORE, toUri("/hdfsstores"), HttpMethod.POST))
-       .add(new Link(CliStrings.DESTROY_HDFS_STORE, toUri("/hdfsstores/{name}"), HttpMethod.DELETE))
-       .add(new Link(CliStrings.ALTER_HDFS_STORE,   toUri("/hdfsstores/{name}"), HttpMethod.PUT));
+      ;
   }
 
   @RequestMapping(method = { RequestMethod.GET, RequestMethod.HEAD }, value = "/ping")

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommandsJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommandsJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommandsJUnitTest.java
deleted file mode 100644
index af47138..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/HDFSStoreCommandsJUnitTest.java
+++ /dev/null
@@ -1,838 +0,0 @@
-/*
- * =========================================================================
- *  Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
- *  This product is protected by U.S. and international copyright
- *  and intellectual property laws. Pivotal products are covered by
- *  more patents listed at http://www.pivotal.io/patents.
- * ========================================================================
- */
-
-package com.gemstone.gemfire.management.internal.cli.commands;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertSame;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import org.jmock.Expectations;
-import org.jmock.Mockery;
-import org.jmock.lib.legacy.ClassImposteriser;
-import org.json.JSONArray;
-import org.json.JSONException;
-import org.json.JSONObject;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.execute.Execution;
-import com.gemstone.gemfire.cache.execute.FunctionInvocationTargetException;
-import com.gemstone.gemfire.cache.execute.ResultCollector;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.cache.execute.AbstractExecution;
-import com.gemstone.gemfire.management.cli.Result;
-import com.gemstone.gemfire.management.cli.Result.Status;
-import com.gemstone.gemfire.management.internal.cli.functions.AlterHDFSStoreFunction;
-import com.gemstone.gemfire.management.internal.cli.functions.CliFunctionResult;
-import com.gemstone.gemfire.management.internal.cli.functions.CreateHDFSStoreFunction;
-import com.gemstone.gemfire.management.internal.cli.functions.DescribeHDFSStoreFunction;
-import com.gemstone.gemfire.management.internal.cli.functions.DestroyHDFSStoreFunction;
-import com.gemstone.gemfire.management.internal.cli.functions.ListHDFSStoresFunction;
-import com.gemstone.gemfire.management.internal.cli.functions.ListHDFSStoresFunction.HdfsStoreDetails;
-import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
-import com.gemstone.gemfire.management.internal.cli.json.GfJsonObject;
-import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
-import com.gemstone.gemfire.management.internal.cli.result.InfoResultData;
-import com.gemstone.gemfire.management.internal.cli.result.TabularResultData;
-import com.gemstone.gemfire.management.internal.cli.util.HDFSStoreNotFoundException;
-import com.gemstone.gemfire.management.internal.cli.util.MemberNotFoundException;
-import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-/**
- * The HDFSStoreCommandsJUnitTest class is a test suite of test cases testing
- * the contract and functionality of the HDFSStoreCommands class implementing
- * commands in the GemFire shell (gfsh) that access and modify hdfs stores in
- * GemFire. </p>
- * 
- * @author Namrata Thanvi
- * @see com.gemstone.gemfire.management.internal.cli.commands.HDFSStoreCommands
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder
- * @see com.gemstone.gemfire.management.internal.cli.functions.DescribeHDFSStoreFunction
- * @see org.jmock.Expectations
- * @see org.jmock.Mockery
- * @see org.jmock.lib.legacy.ClassImposteriser
- * @see org.junit.Assert
- * @see org.junit.Test
- */
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSStoreCommandsJUnitTest {
-
-  private Mockery mockContext;
-
-  @Before
-  public void setUp() {
-    mockContext = new Mockery() {
-      {
-        setImposteriser(ClassImposteriser.INSTANCE);
-      }
-    };
-  }
-
-  @After
-  public void tearDown() {
-    mockContext.assertIsSatisfied();
-    mockContext = null;
-  }
-
-  @Test
-  public void testGetHDFSStoreDescription() {
-    final String hdfsStoreName = "mockHdfsStore";
-    final String memberId = "mockMember";
-    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
-    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
-
-    final HDFSStoreConfigHolder expectedHdfsStoreConfigHolder = createMockHDFSStoreConfigHolder(mockContext, "hdfsStoreName",
-        "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0, 2048, true, true, true, 40,
-        40, 40, 800);
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockMember).getName();
-        will(returnValue(null));
-        oneOf(mockMember).getId();
-        will(returnValue(memberId));
-        oneOf(mockFunctionExecutor).withArgs(with(equal(hdfsStoreName)));
-        will(returnValue(mockFunctionExecutor));
-        oneOf(mockFunctionExecutor).execute(with(aNonNull(DescribeHDFSStoreFunction.class)));
-        will(returnValue(mockResultCollector));
-        oneOf(mockResultCollector).getResult();
-        will(returnValue(Arrays.asList(expectedHdfsStoreConfigHolder)));
-      }
-    });
-
-    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
-    final HDFSStoreConfigHolder actualHdfsStoreConfigHolder = commands.getHDFSStoreDescription(memberId, hdfsStoreName);
-
-    assertNotNull(actualHdfsStoreConfigHolder);
-    assertEquals(expectedHdfsStoreConfigHolder, actualHdfsStoreConfigHolder);
-  }
-
-  @Test(expected = MemberNotFoundException.class)
-  public void testGetHDFSStoreDescriptionThrowsMemberNotFoundException() {
-    final String hdfsStoreName = "mockHdfsStore";
-    final String memberId = "mockMember";
-    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockMember).getName();
-        will(returnValue(null));
-        oneOf(mockMember).getId();
-        will(returnValue("testMember"));
-      }
-    });
-
-    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, null);
-
-    try {
-      commands.getHDFSStoreDescription(memberId, hdfsStoreName);
-    } catch (MemberNotFoundException expected) {
-      assertEquals(CliStrings.format(CliStrings.MEMBER_NOT_FOUND_ERROR_MESSAGE, memberId), expected.getMessage());
-      throw expected;
-    }
-  }
-
-  @Test(expected = HDFSStoreNotFoundException.class)
-  public void testGetHDFSStoreDescriptionThrowsResourceNotFoundException() {
-    final String hdfsStoreName = "mockHdfsStore";
-    final String memberId = "mockMember";
-
-    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockMember).getName();
-        will(returnValue(null));
-        oneOf(mockMember).getId();
-        will(returnValue(memberId));
-        oneOf(mockFunctionExecutor).withArgs(with(equal(hdfsStoreName)));
-        will(returnValue(mockFunctionExecutor));
-        oneOf(mockFunctionExecutor).execute(with(aNonNull(DescribeHDFSStoreFunction.class)));
-        will(throwException(new HDFSStoreNotFoundException("expected")));
-      }
-    });
-
-    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
-    try {
-      commands.getHDFSStoreDescription(memberId, hdfsStoreName);
-    } catch (HDFSStoreNotFoundException expected) {
-      assertEquals("expected", expected.getMessage());
-      throw expected;
-    }
-  }
-
-  @Test(expected = RuntimeException.class)
-  public void testGetHDFSStoreDescriptionThrowsRuntimeException() {
-    final String hdfsStoreName = "mockHdfsStore";
-    final String memberId = "mockMember";
-
-    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-
-    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockMember).getName();
-        will(returnValue(null));
-        oneOf(mockMember).getId();
-        will(returnValue(memberId));
-        oneOf(mockFunctionExecutor).withArgs(with(equal(hdfsStoreName)));
-        will(returnValue(mockFunctionExecutor));
-        oneOf(mockFunctionExecutor).execute(with(aNonNull(DescribeHDFSStoreFunction.class)));
-        will(throwException(new RuntimeException("expected")));
-      }
-    });
-
-    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
-    try {
-      commands.getHDFSStoreDescription(memberId, hdfsStoreName);
-    } catch (RuntimeException expected) {
-      assertEquals("expected", expected.getMessage());
-      throw expected;
-    }
-  }
-
-  @Test(expected = RuntimeException.class)
-  public void testGetHDFSStoreDescriptionWithInvalidFunctionResultReturnType() {
-    final String hdfsStoreName = "mockHDFSStore";
-    final String memberId = "mockMember";
-
-    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-
-    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
-
-    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockMember).getName();
-        will(returnValue(null));
-        oneOf(mockMember).getId();
-        will(returnValue(memberId));
-        oneOf(mockFunctionExecutor).withArgs(with(equal(hdfsStoreName)));
-        will(returnValue(mockFunctionExecutor));
-        oneOf(mockFunctionExecutor).execute(with(aNonNull(DescribeHDFSStoreFunction.class)));
-        will(returnValue(mockResultCollector));
-        oneOf(mockResultCollector).getResult();
-        will(returnValue(Arrays.asList(new Object())));
-      }
-    });
-
-    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
-    try {
-      commands.getHDFSStoreDescription(memberId, hdfsStoreName);
-    } catch (RuntimeException expected) {
-      assertEquals(CliStrings.format(CliStrings.UNEXPECTED_RETURN_TYPE_EXECUTING_COMMAND_ERROR_MESSAGE, Object.class
-          .getName(), CliStrings.DESCRIBE_HDFS_STORE), expected.getMessage());
-      assertNull(expected.getCause());
-      throw expected;
-    }
-  }
-
-  @Test
-  public void testGetHDFSStoreListing() {
-    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-
-    final DistributedMember mockDistributedMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-
-    final AbstractExecution mockFunctionExecutor = mockContext.mock(AbstractExecution.class, "Function Executor");
-
-    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
-
-    final HDFSStoreConfigHolder expectedHdfsStoreConfigHolderOne = createMockHDFSStoreConfigHolder(mockContext, "hdfsStoreName1",
-        "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0, 2048, true, true, true, 40,
-        40, 40, 800);
-    final HDFSStoreConfigHolder expectedHdfsStoreConfigHolderTwo = createMockHDFSStoreConfigHolder(mockContext, "hdfsStoreName2",
-        "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0, 2048, true, true, true, 40,
-        40, 40, 800);
-    final HDFSStoreConfigHolder expectedHdfsStoreConfigHolderThree = createMockHDFSStoreConfigHolder(mockContext, "hdfsStoreName3",
-        "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0, 2048, true, true, true, 40,
-        40, 40, 800);
- 
-    
-    HdfsStoreDetails d1=new HdfsStoreDetails(expectedHdfsStoreConfigHolderOne.getName(), "member1", "member1");
-    HdfsStoreDetails d2=new HdfsStoreDetails(expectedHdfsStoreConfigHolderTwo.getName(), "member2", "member2");
-    HdfsStoreDetails d3=new HdfsStoreDetails(expectedHdfsStoreConfigHolderThree.getName(), "member3", "member3");
-    
-    final Set<HdfsStoreDetails> expectedHdfsStores = new HashSet<HdfsStoreDetails>();
-    expectedHdfsStores.add( d1);
-    expectedHdfsStores.add(d2 );    
-    expectedHdfsStores.add(d3);
-
-    final List<Object> results = new ArrayList<Object>();
-    results.add(expectedHdfsStores);
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockFunctionExecutor).setIgnoreDepartedMembers(with(equal(true)));
-        oneOf(mockFunctionExecutor).execute(with(aNonNull(ListHDFSStoresFunction.class)));
-        will(returnValue(mockResultCollector));
-        oneOf(mockResultCollector).getResult();
-        will(returnValue(results));
-      }
-    });
-
-    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockDistributedMember, mockFunctionExecutor);
-
-    final List<?> actualHdfsStores = commands.getHdfsStoreListing(commands.getNormalMembers(mockCache));
-
-    Assert.assertNotNull(actualHdfsStores);   
-    Assert.assertTrue(actualHdfsStores.contains(d1));
-    Assert.assertTrue(actualHdfsStores.contains(d2));
-    Assert.assertTrue(actualHdfsStores.contains(d3));
-  }
-
-  @Test(expected = RuntimeException.class)
-  public void testGetHDFSStoreListThrowsRuntimeException() {
-    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-    final DistributedMember mockDistributedMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockFunctionExecutor).execute(with(aNonNull(ListHDFSStoresFunction.class)));
-        will(throwException(new RuntimeException("expected")));
-      }
-    });
-
-    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockDistributedMember, mockFunctionExecutor);
-
-    try {
-      commands.getHdfsStoreListing(commands.getNormalMembers(mockCache));
-    } catch (RuntimeException expected) {
-      assertEquals("expected", expected.getMessage());
-      throw expected;
-    }
-  }
-
-  @Test
-  public void testGetHDFSStoreListReturnsFunctionInvocationTargetExceptionInResults() {
-    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-    final DistributedMember mockDistributedMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final AbstractExecution mockFunctionExecutor = mockContext.mock(AbstractExecution.class, "Function Executor");
-    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
-
-    final HDFSStoreConfigHolder expectedHdfsStoreConfigHolder = createMockHDFSStoreConfigHolder(mockContext, "hdfsStoreName",
-        "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0, 2048, true, true, true, 40,
-        40, 40, 800);
-
-    final List<HdfsStoreDetails> expectedHdfsStores = Arrays.asList(new HdfsStoreDetails(
-        expectedHdfsStoreConfigHolder.getName(), "member1", "member1"));
-
-    final List<Object> results = new ArrayList<Object>();
-
-    results.add(expectedHdfsStores);
-    results.add(new FunctionInvocationTargetException("expected"));
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockFunctionExecutor).setIgnoreDepartedMembers(with(equal(true)));
-        oneOf(mockFunctionExecutor).execute(with(aNonNull(ListHDFSStoresFunction.class)));
-        will(returnValue(mockResultCollector));
-        oneOf(mockResultCollector).getResult();
-        will(returnValue(results));
-      }
-    });
-
-    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockDistributedMember, mockFunctionExecutor);
-
-    final List<HdfsStoreDetails> actualHdfsStores = commands.getHdfsStoreListing(commands
-        .getNormalMembers(mockCache));
-
-  }
-
-  @Test
-  public void testGetCreatedHDFSStore() throws JSONException {
-    final String hdfsStoreName = "mockHdfsStore";
-    final String memberId = "mockMember";
-    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
-    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
-    XmlEntity xml = null;
-    final CliFunctionResult cliResult = new CliFunctionResult(memberId, xml, "Success");
-    // Need to fix the return value of this function
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockFunctionExecutor).withArgs(with(aNonNull(HDFSStoreConfigHolder.class)));
-        will(returnValue(mockFunctionExecutor));
-        oneOf(mockFunctionExecutor).execute(with(aNonNull(CreateHDFSStoreFunction.class)));
-        will(returnValue(mockResultCollector));
-        oneOf(mockResultCollector).getResult();
-        will(returnValue(Arrays.asList(cliResult)));
-      }
-    });
-
-    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
-    final Result result = commands.getCreatedHdfsStore(null, hdfsStoreName, "hdfs://localhost:9000", "test", null, 20,
-        20, true, true, 100, 10000, "testStore", true, 10, true, .23F, 10, 10, 10, 10, 10);
-
-    assertNotNull(result);
-    assertEquals(Status.OK, result.getStatus());
-    TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
-    GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
-    assertNotNull(jsonObject.get("Member"));
-    assertNotNull(jsonObject.get("Result"));
-
-    assertEquals(memberId, (((JSONArray)jsonObject.get("Member")).get(0)));
-    assertEquals("Success", (((JSONArray)jsonObject.get("Result")).get(0)));
-  }
-
-  @Test
-  public void testGetCreatedHDFSStoreWithThrowable() throws JSONException {
-    final String hdfsStoreName = "mockHdfsStore";
-    final String memberId = "mockMember";
-    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
-    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
-    RuntimeException exception = new RuntimeException("Test Exception");
-
-    final CliFunctionResult cliResult = new CliFunctionResult(memberId, exception, null);
-    // Need to fix the return value of this function
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockFunctionExecutor).withArgs(with(aNonNull(HDFSStoreConfigHolder.class)));
-        will(returnValue(mockFunctionExecutor));
-        oneOf(mockFunctionExecutor).execute(with(aNonNull(CreateHDFSStoreFunction.class)));
-        will(returnValue(mockResultCollector));
-        oneOf(mockResultCollector).getResult();
-        will(returnValue(Arrays.asList(cliResult)));
-      }
-    });
-
-    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
-    final Result result = commands.getCreatedHdfsStore(null, hdfsStoreName, "hdfs://localhost:9000", "test", null, 20,
-        20, true, true, 100, 10000, "testStore", true, 10, true, .23F, 10, 10, 10, 10, 10);
-
-    assertNotNull(result);
-    assertEquals(Status.ERROR, result.getStatus());
-
-    TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
-    GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
-    assertNotNull(jsonObject.get("Member"));
-    assertNotNull(jsonObject.get("Result"));
-    assertEquals(memberId, (((JSONArray)jsonObject.get("Member")).get(0)));
-    assertEquals("ERROR: " + exception.getClass().getName() + ": " + exception.getMessage(), (((JSONArray)jsonObject
-        .get("Result")).get(0)));
-  }
-
-  @Test
-  public void testGetCreatedHDFSStoreWithCacheClosedException() throws JSONException {
-    final String hdfsStoreName = "mockHdfsStore";
-    final String memberId = "mockMember";
-    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
-    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
-
-    final CliFunctionResult cliResult = new CliFunctionResult(memberId, false, null);
-    // Need to fix the return value of this function
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockFunctionExecutor).withArgs(with(aNonNull(HDFSStoreConfigHolder.class)));
-        will(returnValue(mockFunctionExecutor));
-        oneOf(mockFunctionExecutor).execute(with(aNonNull(CreateHDFSStoreFunction.class)));
-        will(returnValue(mockResultCollector));
-        oneOf(mockResultCollector).getResult();
-        will(returnValue(Arrays.asList(cliResult)));
-      }
-    });
-
-    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
-    final Result result = commands.getCreatedHdfsStore(null, hdfsStoreName, "hdfs://localhost:9000", "test", null, 20,
-        20, true, true, 100, 10000, "testStore", true, 10, true, .23F, 10, 10, 10, 10, 10);
-
-    assertNotNull(result);
-    InfoResultData resultData = (InfoResultData)((CommandResult)result).getResultData();
-    GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
-    assertNotNull(jsonObject.get("message"));
-
-    assertEquals("Unable to create hdfs store:" + hdfsStoreName, (((JSONArray)jsonObject.get("message")).get(0)));
-  }
-
-  @Test
-  public void testGetAlteredHDFSStore() throws JSONException {
-    final String hdfsStoreName = "mockHdfsStore";
-    final String memberId = "mockMember";
-    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
-    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
-    XmlEntity xml = null;
-    final CliFunctionResult cliResult = new CliFunctionResult(memberId, xml, "Success");
-    // Need to fix the return value of this function
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockFunctionExecutor).withArgs(with(aNonNull(HDFSStoreConfigHolder.class)));
-        will(returnValue(mockFunctionExecutor));
-        oneOf(mockFunctionExecutor).execute(with(aNonNull(AlterHDFSStoreFunction.class)));
-        will(returnValue(mockResultCollector));
-        oneOf(mockResultCollector).getResult();
-        will(returnValue(Arrays.asList(cliResult)));
-      }
-    });
-
-    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
-    final Result result = commands.getAlteredHDFSStore(null, hdfsStoreName, 100, 100, true, 100, true, 100, 100, 100,
-        100, 100);
-
-    assertNotNull(result);
-    assertEquals(Status.OK, result.getStatus());
-    TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
-    GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
-    assertNotNull(jsonObject.get("Member"));
-    assertNotNull(jsonObject.get("Result"));
-
-    assertEquals(memberId, (((JSONArray)jsonObject.get("Member")).get(0)));
-    assertEquals("Success", (((JSONArray)jsonObject.get("Result")).get(0)));
-  }
-
-  @Test
-  public void testGetAlteredHDFSStoreWithThrowable() throws JSONException {
-    final String hdfsStoreName = "mockHdfsStore";
-    final String memberId = "mockMember";
-    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
-    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
-    RuntimeException exception = new RuntimeException("Test Exception");
-    final CliFunctionResult cliResult = new CliFunctionResult(memberId, exception, "Success");
-    // Need to fix the return value of this function
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockFunctionExecutor).withArgs(with(aNonNull(HDFSStoreConfigHolder.class)));
-        will(returnValue(mockFunctionExecutor));
-        oneOf(mockFunctionExecutor).execute(with(aNonNull(AlterHDFSStoreFunction.class)));
-        will(returnValue(mockResultCollector));
-        oneOf(mockResultCollector).getResult();
-        will(returnValue(Arrays.asList(cliResult)));
-      }
-    });
-
-    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
-    final Result result = commands.getAlteredHDFSStore(null, hdfsStoreName, 100, 100, true, 100, true, 100, 100, 100,
-        100, 100);
-
-    assertNotNull(result);
-    assertEquals(Status.ERROR, result.getStatus());
-    TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
-    GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
-    assertNotNull(jsonObject.get("Member"));
-    assertNotNull(jsonObject.get("Result"));
-
-    assertEquals(memberId, (((JSONArray)jsonObject.get("Member")).get(0)));
-    assertEquals("ERROR: " + exception.getClass().getName() + ": " + exception.getMessage(), (((JSONArray)jsonObject
-        .get("Result")).get(0)));
-  }
-
-  @Test
-  public void testGetAlteredHDFSStoreWithCacheClosedException() throws JSONException {
-    final String hdfsStoreName = "mockHdfsStore";
-    final String memberId = "mockMember";
-    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
-    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
-    final CliFunctionResult cliResult = new CliFunctionResult(memberId, false, null);
-    // Need to fix the return value of this function
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockFunctionExecutor).withArgs(with(aNonNull(HDFSStoreConfigHolder.class)));
-        will(returnValue(mockFunctionExecutor));
-        oneOf(mockFunctionExecutor).execute(with(aNonNull(AlterHDFSStoreFunction.class)));
-        will(returnValue(mockResultCollector));
-        oneOf(mockResultCollector).getResult();
-        will(returnValue(Arrays.asList(cliResult)));
-      }
-    });
-
-    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
-    final Result result = commands.getAlteredHDFSStore(null, hdfsStoreName, 100, 100, true, 100, true, 100, 100, 100,
-        100, 100);
-
-    assertNotNull(result);
-    TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
-    JSONObject jsonObject = (JSONObject)resultData.getGfJsonObject().get("content");
-    assertEquals(0, jsonObject.length());
-  }
-
-  @Test
-  public void testDestroyStore() throws JSONException {
-    final String hdfsStoreName = "mockHdfsStore";
-    final String memberId = "mockMember";
-    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
-    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
-    XmlEntity xml = null;
-    final CliFunctionResult cliResult = new CliFunctionResult(memberId, xml, "Success");
-    // Need to fix the return value of this function
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockFunctionExecutor).withArgs(hdfsStoreName);
-        will(returnValue(mockFunctionExecutor));
-        oneOf(mockFunctionExecutor).execute(with(aNonNull(DestroyHDFSStoreFunction.class)));
-        will(returnValue(mockResultCollector));
-        oneOf(mockResultCollector).getResult();
-        will(returnValue(Arrays.asList(cliResult)));
-      }
-    });
-
-    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
-    final Result result = commands.destroyStore(hdfsStoreName, null);
-
-    assertNotNull(result);
-    assertEquals(Status.OK, result.getStatus());
-    TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
-    GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
-    assertNotNull(jsonObject.get("Member"));
-    assertNotNull(jsonObject.get("Result"));
-
-    assertEquals(memberId, (((JSONArray)jsonObject.get("Member")).get(0)));
-    assertEquals("Success", (((JSONArray)jsonObject.get("Result")).get(0)));
-  }
-
-  @Test
-  public void testDestroyStoreWithThrowable() throws JSONException {
-    final String hdfsStoreName = "mockHdfsStore";
-    final String memberId = "mockMember";
-    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
-    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
-    RuntimeException exception = new RuntimeException("Test Exception");
-    final CliFunctionResult cliResult = new CliFunctionResult(memberId, exception, "Success");
-    // Need to fix the return value of this function
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockFunctionExecutor).withArgs(hdfsStoreName);
-        will(returnValue(mockFunctionExecutor));
-        oneOf(mockFunctionExecutor).execute(with(aNonNull(DestroyHDFSStoreFunction.class)));
-        will(returnValue(mockResultCollector));
-        oneOf(mockResultCollector).getResult();
-        will(returnValue(Arrays.asList(cliResult)));
-      }
-    });
-
-    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
-    final Result result = commands.destroyHdfstore(hdfsStoreName, null);
-
-    assertNotNull(result);
-    assertEquals(Status.ERROR, result.getStatus());
-    TabularResultData resultData = (TabularResultData)((CommandResult)result).getResultData();
-    GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
-    assertNotNull(jsonObject.get("Member"));
-    assertNotNull(jsonObject.get("Result"));
-
-    assertEquals(memberId, (((JSONArray)jsonObject.get("Member")).get(0)));
-    assertEquals("ERROR: " + exception.getClass().getName() + ": " + exception.getMessage(), (((JSONArray)jsonObject
-        .get("Result")).get(0)));
-  }
-
-  @Test
-  public void testDestroyStoreWithCacheClosedException() throws JSONException {
-    final String hdfsStoreName = "mockHdfsStore";
-    final String memberId = "mockMember";
-    final Cache mockCache = mockContext.mock(Cache.class, "Cache");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
-    final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
-    final CliFunctionResult cliResult = new CliFunctionResult(memberId, false, null);
-    // Need to fix the return value of this function
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockFunctionExecutor).withArgs(hdfsStoreName);
-        will(returnValue(mockFunctionExecutor));
-        oneOf(mockFunctionExecutor).execute(with(aNonNull(DestroyHDFSStoreFunction.class)));
-        will(returnValue(mockResultCollector));
-        oneOf(mockResultCollector).getResult();
-        will(returnValue(Arrays.asList(cliResult)));
-      }
-    });
-
-    final HDFSStoreCommands commands = new TestHDFSStoreCommands(mockCache, mockMember, mockFunctionExecutor);
-
-    final Result result = commands.destroyHdfstore(hdfsStoreName, null);
-
-    assertNotNull(result);
-
-    assertNotNull(result);
-    InfoResultData resultData = (InfoResultData)((CommandResult)result).getResultData();
-    GfJsonObject jsonObject = resultData.getGfJsonObject().getJSONObject("content");
-    assertNotNull(jsonObject.get("message"));
-
-    assertEquals("No matching hdfs stores found.", (((JSONArray)jsonObject.get("message")).get(0)));
-  }
-
-  public static HDFSStoreConfigHolder createMockHDFSStoreConfigHolder(Mockery mockContext, final String storeName, final String namenode,
-      final String homeDir, final int maxFileSize, final int fileRolloverInterval, final float blockCachesize,
-      final String clientConfigFile, final int batchSize, final int batchInterval, final String diskStoreName,
-      final boolean syncDiskwrite, final int dispatcherThreads, final int maxMemory, final boolean bufferPersistent,
-      final boolean minorCompact, final boolean majorCompact, final int majorCompactionInterval,
-      final int majorCompactionThreads, final int minorCompactionThreads, final int purgeInterval) {
-
-    HDFSStoreConfigHolder mockHdfsStore = mockContext.mock(HDFSStoreConfigHolder.class, "HDFSStoreConfigHolder_"
-        + storeName);
-
-    createMockStore(mockContext, mockHdfsStore, storeName, namenode, homeDir, maxFileSize, fileRolloverInterval,
-        minorCompact, minorCompactionThreads, majorCompact, majorCompactionThreads, majorCompactionInterval,
-        purgeInterval, blockCachesize, clientConfigFile, batchSize,
-        batchInterval, diskStoreName, syncDiskwrite, dispatcherThreads, maxMemory, bufferPersistent);
-    return mockHdfsStore;
-
-  }
-
-  public static void createMockStore(Mockery mockContext, final HDFSStore mockStore, final String storeName,
-      final String namenode, final String homeDir, final int maxFileSize, final int fileRolloverInterval,
-      final boolean minorCompact, final int minorCompactionThreads, final boolean majorCompact,
-      final int majorCompactionThreads, final int majorCompactionInterval, final int purgeInterval,
-      final float blockCachesize, final String clientConfigFile, final int batchSize, final int batchInterval,
-      final String diskStoreName, final boolean syncDiskwrite, final int dispatcherThreads, final int maxMemory,
-      final boolean bufferPersistent) {
-
-    mockContext.checking(new Expectations() {
-      {
-        allowing(mockStore).getName();
-        will(returnValue(storeName));
-        allowing(mockStore).getNameNodeURL();
-        will(returnValue(namenode));
-        allowing(mockStore).getHomeDir();
-        will(returnValue(homeDir));
-        allowing(mockStore).getWriteOnlyFileRolloverSize();
-        will(returnValue(maxFileSize));
-        allowing(mockStore).getWriteOnlyFileRolloverInterval();
-        will(returnValue(fileRolloverInterval));
-        allowing(mockStore).getMinorCompaction();
-        will(returnValue(minorCompact));
-        allowing(mockStore).getMajorCompaction();
-        will(returnValue(majorCompact));
-        allowing(mockStore).getMajorCompactionInterval();
-        will(returnValue(majorCompactionInterval));
-        allowing(mockStore).getMajorCompactionThreads();
-        will(returnValue(majorCompactionThreads));
-        allowing(mockStore).getMinorCompactionThreads();
-        will(returnValue(minorCompactionThreads));
-        allowing(mockStore).getPurgeInterval();
-        will(returnValue(purgeInterval));
-        allowing(mockStore).getInputFileCountMax();
-        will(returnValue(10));
-        allowing(mockStore).getInputFileSizeMax();
-        will(returnValue(1024));
-        allowing(mockStore).getInputFileCountMin();
-        will(returnValue(2));
-        allowing(mockStore).getBlockCacheSize();
-        will(returnValue(blockCachesize));
-        allowing(mockStore).getHDFSClientConfigFile();
-        will(returnValue(clientConfigFile));
-
-        allowing(mockStore).getBatchSize();
-        will(returnValue(batchSize));
-        allowing(mockStore).getBatchInterval();
-        will(returnValue(batchInterval));
-        allowing(mockStore).getDiskStoreName();
-        will(returnValue(diskStoreName));
-        allowing(mockStore).getSynchronousDiskWrite();
-        will(returnValue(syncDiskwrite));
-        allowing(mockStore).getBufferPersistent();
-        will(returnValue(bufferPersistent));
-        allowing(mockStore).getDispatcherThreads();
-        will(returnValue(dispatcherThreads));
-        allowing(mockStore).getMaxMemory();
-        will(returnValue(maxMemory));
-      }
-    });
-  }
-
-  protected static class TestHDFSStoreCommands extends HDFSStoreCommands {
-
-    private final Cache cache;
-
-    private final DistributedMember distributedMember;
-
-    private final Execution functionExecutor;
-
-    public TestHDFSStoreCommands(final Cache cache, final DistributedMember distributedMember,
-        final Execution functionExecutor) {
-      assert cache != null: "The Cache cannot be null!";
-      this.cache = cache;
-      this.distributedMember = distributedMember;
-      this.functionExecutor = functionExecutor;
-    }
-
-    @Override
-    protected Cache getCache() {
-      return this.cache;
-    }
-
-    @Override
-    protected Set<DistributedMember> getMembers(final Cache cache) {
-      assertSame(getCache(), cache);
-      return Collections.singleton(this.distributedMember);
-    }
-
-    @Override
-    protected Execution getMembersFunctionExecutor(final Set<DistributedMember> members) {
-      Assert.assertNotNull(members);
-      return this.functionExecutor;
-    }
-
-    @Override
-    protected Set<DistributedMember> getNormalMembers(final Cache cache) {
-      assertSame(getCache(), cache);
-      return Collections.singleton(this.distributedMember);
-    }
-
-    @Override
-    protected Set<DistributedMember> getGroupMembers(String[] groups) {
-      Set<DistributedMember> dm = new HashSet<DistributedMember>();
-      dm.add(distributedMember);
-      return dm;
-
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunctionJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunctionJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunctionJUnitTest.java
deleted file mode 100644
index 4a93e30..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/AlterHDFSStoreFunctionJUnitTest.java
+++ /dev/null
@@ -1,324 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-
-package com.gemstone.gemfire.management.internal.cli.functions;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.logging.log4j.Logger;
-import org.jmock.Expectations;
-import org.jmock.Mockery;
-import org.jmock.lib.legacy.ClassImposteriser;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheClosedException;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.execute.ResultSender;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.InternalCache;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.management.internal.cli.commands.HDFSStoreCommandsJUnitTest;
-import com.gemstone.gemfire.management.internal.cli.functions.AlterHDFSStoreFunction.AlterHDFSStoreAttributes;
-import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-/**
- * The AlterHDFSStoreFunctionJUnitTest test suite class tests the contract and
- * functionality of the AlterHDFSStoreFunction class. </p>
- * 
- * @author Namrata Thanvi
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder
- * @see com.gemstone.gemfire.management.internal.cli.functions.AlterHDFSStoreFunction
- * @see org.jmock.Expectations
- * @see org.jmock.Mockery
- * @see org.junit.Assert
- * @see org.junit.Test
- */
-@SuppressWarnings( { "unused" })
-@Category({IntegrationTest.class, HoplogTest.class})
-public class AlterHDFSStoreFunctionJUnitTest {
-
-  private static final Logger logger = LogService.getLogger();
-
-  private Mockery mockContext;
-
-  @Before
-  public void setup() {
-    mockContext = new Mockery() {
-      {
-        setImposteriser(ClassImposteriser.INSTANCE);
-      }
-    };
-  }
-
-  @After
-  public void tearDown() {
-    mockContext.assertIsSatisfied();
-    mockContext = null;
-  }
-
-  @Test
-  public void testExecute() throws Throwable {
-
-    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-
-    final String memberId = "mockMemberId";
-    final String memberName = "mockMemberName";
-
-    final AlterHDFSStoreFunction function = createAlterHDFSStoreFunction(mockCache, mockMember, xmlEntity);
-    final TestResultSender testResultSender = new TestResultSender();
-    final HDFSStoreImpl mockHdfsStore = CreateHDFSStoreFunctionJUnitTest.createMockHDFSStoreImpl(mockContext,
-        "hdfsStoreName", "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 20, 20, null, false, 0, 1024, false,
-        false, true, 20, 20, 10, 100);
-	final AlterHDFSStoreAttributes alterHDFSStoreAttributes = new AlterHDFSStoreAttributes(
-				"mockStore", 100, 100, false, false, 100, 100, 100, 100, 100,
-				100);
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockMember).getId();
-        will(returnValue(memberId));
-        exactly(2).of(mockMember).getName();
-        will(returnValue(memberName));        
-        oneOf(mockFunctionContext).getArguments();
-        will(returnValue(alterHDFSStoreAttributes));
-        oneOf(mockCache).findHDFSStore(alterHDFSStoreAttributes.getHdfsUniqueName());
-        will(returnValue(mockHdfsStore));
-        oneOf(mockFunctionContext).getResultSender();
-        will(returnValue(testResultSender));
-      }
-    });
-
-    function.execute(mockFunctionContext);
-
-    final List<?> results = testResultSender.getResults();
-
-    assertNotNull(results);
-    assertEquals(1, results.size());
-
-    final CliFunctionResult result = (CliFunctionResult)results.get(0);
-    assertEquals(memberName, result.getMemberIdOrName());
-    assertEquals("Success", result.getMessage());
-
-  }
-
-  @Test
-  @SuppressWarnings("unchecked")
-  public void testExecuteOnMemberHavingNoHDFSStore() throws Throwable {
-
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
-    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-
-    final String memberId = "mockMemberId";
-    final String memberName = "mockMemberName";
-
-    final TestResultSender testResultSender = new TestResultSender();
-    final AlterHDFSStoreFunction function = createAlterHDFSStoreFunction(mockCache, mockMember, xmlEntity);
-	final AlterHDFSStoreAttributes alterHDFSStoreAttributes = new AlterHDFSStoreAttributes(
-				"mockStore", 100, 100, false, false, 100, 100, 100, 100, 100,
-				100);
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockCache).findHDFSStore(alterHDFSStoreAttributes.getHdfsUniqueName());
-        will(returnValue(null));       
-        oneOf(mockMember).getId();
-        will(returnValue(memberId));
-        exactly(2).of(mockMember).getName();
-        will(returnValue(memberName));
-        oneOf(mockFunctionContext).getArguments();
-        will(returnValue(alterHDFSStoreAttributes));
-        oneOf(mockFunctionContext).getResultSender();
-        will(returnValue(testResultSender));
-      }
-    });
-
-    function.execute(mockFunctionContext);
-
-    final List<?> results = testResultSender.getResults();
-
-    assertNotNull(results);
-    assertEquals(1, results.size());
-
-    final CliFunctionResult result = (CliFunctionResult)results.get(0);
-    assertEquals(memberName, result.getMemberIdOrName());
-    assertEquals("Hdfs store not found on this member", result.getMessage());
-  }
-
-  @Test
-  public void testExecuteOnMemberWithNoCache() throws Throwable {
-
-    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "MockFunctionContext");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final InternalCache mockCache = mockContext.mock(InternalCache.class, "Cache");
-    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-
-    final TestResultSender testResultSender = new TestResultSender();
-	final AlterHDFSStoreAttributes alterHDFSStoreAttributes = new AlterHDFSStoreAttributes(
-				"mockStore", 100, 100, false, false, 100, 100, 100, 100, 100,
-				100);
-
-    final AlterHDFSStoreFunction function = new TestAlterHDFSStoreFunction(mockCache, mockMember, xmlEntity) {
-      @Override
-      protected Cache getCache() {
-        throw new CacheClosedException("Expected");
-      }
-    };
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockFunctionContext).getArguments();
-        will(returnValue(alterHDFSStoreAttributes));
-        oneOf(mockFunctionContext).getResultSender();
-        will(returnValue(testResultSender));
-      }
-    });
-
-    function.execute(mockFunctionContext);
-    final List<?> results = testResultSender.getResults();
-
-    assertNotNull(results);
-    assertEquals(1, results.size());
-
-    final CliFunctionResult result = (CliFunctionResult)results.get(0);
-    assertEquals("", result.getMemberIdOrName());
-    assertNull(result.getMessage());
-  }
-
-  @Test
-  public void testExecuteHandleRuntimeException() throws Throwable {
-
-    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
-    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-
-    final String memberId = "mockMemberId";
-    final String memberName = "mockMemberName";
-    final TestResultSender testResultSender = new TestResultSender();
-    final AlterHDFSStoreFunction function = createAlterHDFSStoreFunction(mockCache, mockMember, xmlEntity);
-
-    final AlterHDFSStoreAttributes alterHDFSStoreAttributes = new AlterHDFSStoreAttributes(
-				"mockStore", 100, 100, false, false, 100, 100, 100, 100, 100,
-				100);
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockMember).getId();
-        will(returnValue(memberId));
-        exactly(2).of(mockMember).getName();
-        will(returnValue(memberName));
-        oneOf(mockFunctionContext).getArguments();
-        will(returnValue(alterHDFSStoreAttributes));
-        oneOf(mockCache).findHDFSStore(alterHDFSStoreAttributes.getHdfsUniqueName());
-        will(throwException(new RuntimeException("expected")));
-        oneOf(mockFunctionContext).getResultSender();
-        will(returnValue(testResultSender));
-      }
-    });
-
-    function.execute(mockFunctionContext);
-    final List<?> results = testResultSender.getResults();
-
-    assertNotNull(results);
-    assertEquals(1, results.size());
-
-    final CliFunctionResult result = (CliFunctionResult)results.get(0);
-    assertEquals(memberName, result.getMemberIdOrName());
-    assertEquals("expected", result.getThrowable().getMessage());
-
-  }
-
-  protected TestAlterHDFSStoreFunction createAlterHDFSStoreFunction(final Cache cache, DistributedMember member,
-      XmlEntity xml) {
-    return new TestAlterHDFSStoreFunction(cache, member, xml);
-  }
-
-  protected static class TestAlterHDFSStoreFunction extends AlterHDFSStoreFunction {
-    private static final long serialVersionUID = 1L;
-
-    private final Cache cache;
-
-    private final DistributedMember member;
-
-    private final XmlEntity xml;
-
-    public TestAlterHDFSStoreFunction(final Cache cache, DistributedMember member, XmlEntity xml) {
-      this.cache = cache;
-      this.member = member;
-      this.xml = xml;
-    }
-
-    @Override
-    protected Cache getCache() {
-      return this.cache;
-    }
-
-    @Override
-    protected DistributedMember getDistributedMember(Cache cache) {
-      return member;
-    }
-
-    @Override
-    protected XmlEntity getXMLEntity(String storeName) {
-      return xml;
-    }
-
-    @Override
-    protected HDFSStore alterHdfsStore(HDFSStore hdfsStore, AlterHDFSStoreAttributes alterAttributes) {
-      return hdfsStore;
-    }
-  }
-
-  protected static class TestResultSender implements ResultSender {
-
-    private final List<Object> results = new LinkedList<Object>();
-
-    private Throwable t;
-
-    protected List<Object> getResults() throws Throwable {
-      if (t != null) {
-        throw t;
-      }
-      return Collections.unmodifiableList(results);
-    }
-
-    public void lastResult(final Object lastResult) {
-      results.add(lastResult);
-    }
-
-    public void sendResult(final Object oneResult) {
-      results.add(oneResult);
-    }
-
-    public void sendException(final Throwable t) {
-      this.t = t;
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7f251978/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunctionJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunctionJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunctionJUnitTest.java
deleted file mode 100644
index 8a012b4..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/functions/CreateHDFSStoreFunctionJUnitTest.java
+++ /dev/null
@@ -1,307 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-
-package com.gemstone.gemfire.management.internal.cli.functions;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Properties;
-
-import org.apache.logging.log4j.Logger;
-import org.jmock.Expectations;
-import org.jmock.Mockery;
-import org.jmock.lib.legacy.ClassImposteriser;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheClosedException;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.execute.ResultSender;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.management.internal.cli.commands.HDFSStoreCommandsJUnitTest;
-import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-/**
- * The AlterHDFSStoreFunctionJUnitTest test suite class tests the contract and
- * functionality of the AlterHDFSStoreFunction class. </p>
- * 
- * @author Namrata Thanvi
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl
- * @see com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreConfigHolder
- * @see com.gemstone.gemfire.management.internal.cli.functions.AlterHDFSStoreFunction
- * @see org.jmock.Expectations
- * @see org.jmock.Mockery
- * @see org.junit.Assert
- * @see org.junit.Test
- */
-@SuppressWarnings( { "unused" })
-@Category({IntegrationTest.class, HoplogTest.class})
-public class CreateHDFSStoreFunctionJUnitTest {
-
-  private static final Logger logger = LogService.getLogger();
-
-  private Mockery mockContext;
-
-  private static Properties props = new Properties();
-  
-  @Before
-  public void setup() {
-    
-    mockContext = new Mockery() {
-      {
-        setImposteriser(ClassImposteriser.INSTANCE);
-      }
-    };
-  }
-
-  @After
-  public void tearDown() {
-    mockContext.assertIsSatisfied();
-    mockContext = null;
-  }
-
-  @Test
-  public void testExecute() throws Throwable {
-
-    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "FunctionContext");
-    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-
-    final String memberId = "mockMemberId";
-    final String memberName = "mockMemberName";
-    
-    final TestResultSender testResultSender = new TestResultSender();
-    
-    final HDFSStoreImpl mockHdfsStore = createMockHDFSStoreImpl(mockContext, "hdfsStoreName", "hdfs://localhost:9000", "testDir",
-        1024, 20, .25f, null, 20, 20, null, false, 0, 1024, false, false, true, 20, 20, 10, 100);
-    
-    final HDFSStoreConfigHolder mockHdfsStoreConfigHolder = HDFSStoreCommandsJUnitTest.createMockHDFSStoreConfigHolder(
-        mockContext, "hdfsStoreName", "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0,
-        2048, true, true, true, 40, 40, 40, 800);
-    
-    final CreateHDFSStoreFunction function = new TestCreateHDFSStoreFunction(mockCache, mockMember, xmlEntity , mockHdfsStore);
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockMember).getId();
-        will(returnValue(memberId));
-        exactly(2).of(mockMember).getName();
-        will(returnValue(memberName));
-        oneOf(mockFunctionContext).getArguments();
-        will(returnValue(mockHdfsStoreConfigHolder));
-        oneOf(mockFunctionContext).getResultSender();
-        will(returnValue(testResultSender));
-      }
-    });
-
-    function.execute(mockFunctionContext);
-
-    final List<?> results = testResultSender.getResults();
-
-    assertNotNull(results);
-    assertEquals(1, results.size());
-
-    final CliFunctionResult result = (CliFunctionResult)results.get(0);
-    assertEquals(memberName, result.getMemberIdOrName());
-    assertEquals("Success", result.getMessage());
-
-  }
-
-  
-  
-  @Test
-  public void testExecuteOnMemberWithNoCache() throws Throwable {
-
-    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "MockFunctionContext");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
-    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-
-    final String memberId = "mockMemberId";
-    final String memberName = "mockMemberName";
-    
-    final TestResultSender testResultSender = new TestResultSender();
-    final HDFSStoreImpl mockHdfsStore = createMockHDFSStoreImpl(mockContext, "hdfsStoreName", "hdfs://localhost:9000", "testDir",
-        1024, 20, .25f, null, 20, 20, null, false, 0, 1024, false, false, true, 20, 20, 10, 100);
-    
-    final HDFSStoreConfigHolder mockHdfsStoreConfigHolder = HDFSStoreCommandsJUnitTest.createMockHDFSStoreConfigHolder(mockContext, "hdfsStoreName",
-        "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0, 2048, true, true, true, 40,
-        40, 40, 800);
-    
-    final CreateHDFSStoreFunction function = new TestCreateHDFSStoreFunction(mockCache, mockMember, xmlEntity , mockHdfsStore) {
-      @Override
-      protected Cache getCache() {
-        throw new CacheClosedException("Expected");
-      }
-    };
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockFunctionContext).getResultSender();
-        will(returnValue(testResultSender));
-      }
-    });
-
-    function.execute(mockFunctionContext);
-    final List<?> results = testResultSender.getResults();
-
-    assertNotNull(results);
-    assertEquals(1, results.size());
-
-    final CliFunctionResult result = (CliFunctionResult)results.get(0);
-    assertEquals("", result.getMemberIdOrName());
-    assertNull(result.getMessage());
-  }
-
-  
-  @Test
-  public void testExecuteHandleRuntimeException() throws Throwable {
-
-    final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "MockFunctionContext");
-    final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
-    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class, "Cache");
-    final XmlEntity xmlEntity = mockContext.mock(XmlEntity.class, "XmlEntity");
-
-    final String memberId = "mockMemberId";
-    final String memberName = "mockMemberName";
-    
-    final TestResultSender testResultSender = new TestResultSender();
-    final HDFSStoreImpl mockHdfsStore = createMockHDFSStoreImpl(mockContext, "hdfsStoreName", "hdfs://localhost:9000", "testDir",
-        1024, 20, .25f, null, 20, 20, null, false, 0, 1024, false, false, true, 20, 20, 10, 100);
-    
-    final HDFSStoreConfigHolder mockHdfsStoreConfigHolder = HDFSStoreCommandsJUnitTest.createMockHDFSStoreConfigHolder(
-        mockContext, "hdfsStoreName", "hdfs://localhost:9000", "testDir", 1024, 20, .25f, null, 40, 40, null, false, 0,
-        2048, true, true, true, 40, 40, 40, 800);
-    
-    final CreateHDFSStoreFunction function = new TestCreateHDFSStoreFunction(mockCache, mockMember, xmlEntity , mockHdfsStore) {
-      @Override
-      protected Cache getCache() {
-        throw new RuntimeException("expected");
-      }
-    };
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockFunctionContext).getResultSender();
-        will(returnValue(testResultSender));
-      }
-    });
-
-
-    function.execute(mockFunctionContext);
-    final List<?> results = testResultSender.getResults();
-
-    assertNotNull(results);
-    assertEquals(1, results.size());
-
-    final CliFunctionResult result = (CliFunctionResult)results.get(0);
-    assertEquals("", result.getMemberIdOrName());
-    assertEquals("expected", result.getThrowable().getMessage());
-
-  }
-
-  public static HDFSStoreImpl createMockHDFSStoreImpl(Mockery mockContext, final String storeName, final String namenode, final String homeDir,
-      final int maxFileSize, final int fileRolloverInterval, final float blockCachesize, final String clientConfigFile,
-      final int batchSize, final int batchInterval, final String diskStoreName, final boolean syncDiskwrite,
-      final int dispatcherThreads, final int maxMemory, final boolean bufferPersistent, final boolean minorCompact,
-      final boolean majorCompact, final int majorCompactionInterval, final int majorCompactionThreads,
-      final int minorCompactionThreads, final int purgeInterval) {
-
-    HDFSStoreImpl mockHdfsStore = mockContext.mock(HDFSStoreImpl.class, "HDFSStoreImpl");
-
-    HDFSStoreCommandsJUnitTest.createMockStore(mockContext, mockHdfsStore, storeName, namenode, homeDir, maxFileSize,
-        fileRolloverInterval, minorCompact, minorCompactionThreads, majorCompact, majorCompactionThreads,
-        majorCompactionInterval, purgeInterval, blockCachesize, clientConfigFile, batchSize, batchInterval,
-        diskStoreName, syncDiskwrite, dispatcherThreads, maxMemory, bufferPersistent);
-    
-    return mockHdfsStore;
-  }
-
-  protected static class TestCreateHDFSStoreFunction extends CreateHDFSStoreFunction {
-    private static final long serialVersionUID = 1L;
-
-    private final Cache cache;
-
-    private final DistributedMember member;
-
-    private final XmlEntity xml;
-    
-    private final HDFSStoreImpl hdfsStore;
-
-    public TestCreateHDFSStoreFunction(Cache cache, DistributedMember member, XmlEntity xml , HDFSStoreImpl hdfsStore) {
-      this.cache = cache;
-      this.member = member;
-      this.xml = xml;
-      this.hdfsStore = hdfsStore;
-    }
-
-    @Override
-    protected Cache getCache() {
-      return this.cache;
-    }
-
-    @Override
-    protected DistributedMember getDistributedMember(Cache cache) {
-      return member;
-    }
-
-    @Override
-    protected XmlEntity getXMLEntity(String storeName) {
-      return xml;
-    }
-    
-    @Override
-    protected HDFSStoreImpl createHdfsStore(Cache cache, HDFSStoreConfigHolder configHolder){
-      return hdfsStore;
-    }
-  }
-
-  protected static class TestResultSender implements ResultSender {
-
-    private final List<Object> results = new LinkedList<Object>();
-
-    private Throwable t;
-
-    protected List<Object> getResults() throws Throwable {
-      if (t != null) {
-        throw t;
-      }
-      return Collections.unmodifiableList(results);
-    }
-
-    public void lastResult(final Object lastResult) {
-      results.add(lastResult);
-    }
-
-    public void sendResult(final Object oneResult) {
-      results.add(oneResult);
-    }
-
-    public void sendException(final Throwable t) {
-      this.t = t;
-    }
-  }
-
-}


[46/50] [abbrv] incubator-geode git commit: GEODE-96: fix race in unit test

Posted by ds...@apache.org.
GEODE-96: fix race in unit test

It looks like the problem might have been that the
test only waited 5 seconds for an event.
It now waits 30 seconds. It also now can sense more
of what is happening on the OffHeapMemmoryMonitor
using the new testHook OffHeapMemoryMonitorObserver.
If this problem reproduces then the test can add
additional validation to its observer to narrow down
what the cause of the missing event is.
This also fixes GEODE-348.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/6aadbf81
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/6aadbf81
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/6aadbf81

Branch: refs/heads/develop
Commit: 6aadbf81c8d8549cd82ae73994997f3a9e83c060
Parents: f437106
Author: Darrel Schneider <ds...@pivotal.io>
Authored: Fri Oct 23 13:40:22 2015 -0700
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Fri Oct 23 13:43:04 2015 -0700

----------------------------------------------------------------------
 .../cache/control/OffHeapMemoryMonitor.java     |  61 ++++++++-
 .../MemoryThresholdsOffHeapDUnitTest.java       | 132 +++++++++++++++++--
 2 files changed, 182 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6aadbf81/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/control/OffHeapMemoryMonitor.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/control/OffHeapMemoryMonitor.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/control/OffHeapMemoryMonitor.java
index 0678c01..27780cc 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/control/OffHeapMemoryMonitor.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/control/OffHeapMemoryMonitor.java
@@ -117,15 +117,48 @@ public class OffHeapMemoryMonitor implements ResourceMonitor, MemoryUsageListene
     }
   }
 
+  public volatile OffHeapMemoryMonitorObserver testHook;
+  
+  /**
+   * Used by unit tests to be notified when OffHeapMemoryMonitor
+   * does something.
+   */
+  public static interface OffHeapMemoryMonitorObserver {
+    /**
+     * Called at the beginning of updateMemoryUsed.
+     * @param bytesUsed the number of bytes of off-heap memory currently used
+     * @param willSendEvent true if an event will be sent to the OffHeapMemoryUsageListener. 
+     */
+    public void beginUpdateMemoryUsed(long bytesUsed, boolean willSendEvent);
+    public void afterNotifyUpdateMemoryUsed(long bytesUsed);
+    /**
+     * Called at the beginning of updateStateAndSendEvent.
+     * @param bytesUsed the number of bytes of off-heap memory currently used
+     * @param willSendEvent true if an event will be sent to the OffHeapMemoryUsageListener. 
+     */
+    public void beginUpdateStateAndSendEvent(long bytesUsed, boolean willSendEvent);
+    public void updateStateAndSendEventBeforeProcess(long bytesUsed, MemoryEvent event);
+    public void updateStateAndSendEventBeforeAbnormalProcess(long bytesUsed, MemoryEvent event);
+    public void updateStateAndSendEventIgnore(long bytesUsed, MemoryState oldState, MemoryState newState, long mostRecentBytesUsed,
+        boolean deliverNextAbnormalEvent);
+  }
   @Override
   public void updateMemoryUsed(final long bytesUsed) {
-    if (!mightSendEvent(bytesUsed)) {
+    final boolean willSendEvent = mightSendEvent(bytesUsed);
+    final OffHeapMemoryMonitorObserver _testHook = this.testHook;
+    if (_testHook != null) {
+      _testHook.beginUpdateMemoryUsed(bytesUsed, willSendEvent);
+    }
+    if (!willSendEvent) {
       return;
     }
     synchronized (this.offHeapMemoryUsageListener) {
       this.offHeapMemoryUsageListener.offHeapMemoryUsed = bytesUsed;
       this.offHeapMemoryUsageListener.notifyAll();
     }
+    if (_testHook != null) {
+      _testHook.afterNotifyUpdateMemoryUsed(bytesUsed);
+    }
   }
   
   void setCriticalThreshold(final float criticalThreshold) {
@@ -242,11 +275,15 @@ public class OffHeapMemoryMonitor implements ResourceMonitor, MemoryUsageListene
       final MemoryEvent mre = this.mostRecentEvent;
       final MemoryState oldState = mre.getState();
       final MemoryThresholds thresholds = this.thresholds;
+      final OffHeapMemoryMonitorObserver _testHook = this.testHook;
       MemoryState newState = thresholds.computeNextState(oldState, bytesUsed);
       if (oldState != newState) {
         this.currentState = newState;
         
         MemoryEvent event = new MemoryEvent(ResourceType.OFFHEAP_MEMORY, oldState, newState, this.cache.getMyId(), bytesUsed, true, thresholds);
+        if (_testHook != null) {
+          _testHook.updateStateAndSendEventBeforeProcess(bytesUsed, event);
+        }
         this.upcomingEvent.set(event);
 
         processLocalEvent(event);
@@ -257,8 +294,16 @@ public class OffHeapMemoryMonitor implements ResourceMonitor, MemoryUsageListene
           && this.deliverNextAbnormalEvent) {
         this.deliverNextAbnormalEvent = false;
         MemoryEvent event = new MemoryEvent(ResourceType.OFFHEAP_MEMORY, oldState, newState, this.cache.getMyId(), bytesUsed, true, thresholds);
+        if (_testHook != null) {
+          _testHook.updateStateAndSendEventBeforeAbnormalProcess(bytesUsed, event);
+        }
         this.upcomingEvent.set(event);
         processLocalEvent(event);
+      } else {
+        if (_testHook != null) {
+          _testHook.updateStateAndSendEventIgnore(bytesUsed, oldState, newState, mre.getBytesUsed(), this.deliverNextAbnormalEvent);
+        }
+        
       }
     }
   }
@@ -452,6 +497,20 @@ public class OffHeapMemoryMonitor implements ResourceMonitor, MemoryUsageListene
                   // The wait timed out. So tell the OffHeapMemoryMonitor
                   // that we need an event if the state is not normal.
                   deliverNextAbnormalEvent();
+                  // TODO: don't we need a "break" here?
+                  //       As it is we set deliverNextAbnormalEvent
+                  //       but then go back to sleep in wait.
+                  //       We need to call updateStateAndSendEvent
+                  //       which tests deliverNextAbnormalEvent.
+                  // But just adding a break is probably not enough.
+                  // We only set deliverNextAbnormalEvent if the wait
+                  // timed out which means that the amount of offHeapMemoryUsed
+                  // did not change.
+                  // But in updateStateAndSendEvent we only deliver an
+                  // abnormal event if the amount of memory changed.
+                  // This code needs to be reviewed with Swapnil but
+                  // it looks to Darrel like deliverNextAbnormalEvent
+                  // can be removed.
                 } else {
                   // we have been notified so exit the inner while loop
                   // and call updateStateAndSendEvent.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6aadbf81/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java
index d65dcc7..816d668 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java
@@ -53,6 +53,7 @@ import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.Resou
 import com.gemstone.gemfire.internal.cache.control.MemoryEvent;
 import com.gemstone.gemfire.internal.cache.control.MemoryThresholds.MemoryState;
 import com.gemstone.gemfire.internal.cache.control.OffHeapMemoryMonitor;
+import com.gemstone.gemfire.internal.cache.control.OffHeapMemoryMonitor.OffHeapMemoryMonitorObserver;
 import com.gemstone.gemfire.internal.cache.control.ResourceAdvisor;
 import com.gemstone.gemfire.internal.cache.control.ResourceListener;
 import com.gemstone.gemfire.internal.cache.control.TestMemoryThresholdListener;
@@ -1383,6 +1384,8 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
     final Host host = Host.getHost(0);
     final VM server = host.getVM(0);
     final VM client = host.getVM(1);
+    final Object bigKey = -1;
+    final Object smallKey = -2;
 
     final int port = AvailablePortHelper.getRandomAvailableTCPPort();
     final int mcastPort = AvailablePortHelper.getRandomAvailableUDPPort();
@@ -1394,20 +1397,52 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
     doPutAlls(client, regionName, false/*catchServerException*/,
         false/*catchLowMemoryException*/, Range.DEFAULT);
 
+    
     //make the region sick in the server
-    server.invoke(new SerializableRunnable() {
-      public void run() {
+    final long bytesUsedAfterSmallKey = (long)server.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
         InternalResourceManager irm = ((GemFireCacheImpl)getCache()).getResourceManager();
         final OffHeapMemoryMonitor ohm = irm.getOffHeapMonitor();
         assertTrue(ohm.getState().isNormal());
         getCache().getLoggerI18n().fine(addExpectedExString);
         final LocalRegion r = (LocalRegion) getRootRegion().getSubregion(regionName);
-        final Object key = 1;
-        r.put(key, new byte[943720]);
+        final long bytesUsedAfterSmallKey;
+        {
+          OffHeapMemoryMonitorObserverImpl _testHook = new OffHeapMemoryMonitorObserverImpl();
+          ohm.testHook = _testHook;
+          try {
+            r.put(smallKey, "1234567890");
+            bytesUsedAfterSmallKey = _testHook.verifyBeginUpdateMemoryUsed(false);
+          } finally {
+            ohm.testHook = null;
+          }
+        }
+        {
+          final OffHeapMemoryMonitorObserverImpl th = new OffHeapMemoryMonitorObserverImpl();
+          ohm.testHook = th;
+          try {
+            r.put(bigKey, new byte[943720]);
+            th.verifyBeginUpdateMemoryUsed(bytesUsedAfterSmallKey + 943720 + 8, true);
+            WaitCriterion waitForCritical = new WaitCriterion() {
+              public boolean done() {
+                return th.checkUpdateStateAndSendEventBeforeProcess(bytesUsedAfterSmallKey + 943720 + 8, MemoryState.EVICTION_DISABLED_CRITICAL);
+              }
+              @Override
+              public String description() {
+                return null;
+              }
+            };
+            waitForCriterion(waitForCritical, 30*1000, 9, false);
+            th.validateUpdateStateAndSendEventBeforeProcess(bytesUsedAfterSmallKey + 943720 + 8, MemoryState.EVICTION_DISABLED_CRITICAL);
+          } finally {
+            ohm.testHook = null;
+          }
+        }
         WaitCriterion wc;
         if (r instanceof PartitionedRegion) {
           final PartitionedRegion pr = (PartitionedRegion) r;
-          final int bucketId = PartitionedRegionHelper.getHashKey(pr, null, key, null, null);
+          final int bucketId = PartitionedRegionHelper.getHashKey(pr, null, bigKey, null, null);
           wc = new WaitCriterion() {
             @Override
             public String description() {
@@ -1419,7 +1454,7 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
               if (!ohm.getState().isCritical()) return false;
               // Only done once the bucket has been marked sick
               try {
-                pr.getRegionAdvisor().checkIfBucketSick(bucketId, key);
+                pr.getRegionAdvisor().checkIfBucketSick(bucketId, bigKey);
                 return false;
               } catch (LowMemoryException ignore) {
                 return true;
@@ -1439,9 +1474,9 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
             }
           };
         }
-        waitForCriterion(wc, 5000, 100, true);
+        waitForCriterion(wc, 30000, 9, true);
         getCache().getLoggerI18n().fine(removeExpectedExString);
-        return;
+        return bytesUsedAfterSmallKey;
       }
     });
 
@@ -1458,7 +1493,14 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
         final OffHeapMemoryMonitor ohm = irm.getOffHeapMonitor();
         assertTrue(ohm.getState().isCritical());
         getCache().getLogger().fine(MemoryThresholdsOffHeapDUnitTest.this.addExpectedBelow);
-        getRootRegion().getSubregion(regionName).destroy(1);
+        OffHeapMemoryMonitorObserverImpl _testHook = new OffHeapMemoryMonitorObserverImpl();
+        ohm.testHook = _testHook;
+        try {
+          getRootRegion().getSubregion(regionName).destroy(bigKey);
+          _testHook.verifyBeginUpdateMemoryUsed(bytesUsedAfterSmallKey, true);
+        } finally {
+          ohm.testHook = null;
+        }
         WaitCriterion wc = new WaitCriterion() {
           @Override
           public String description() {
@@ -1470,13 +1512,83 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
             return ohm.getState().isNormal();
           }
         };
-        waitForCriterion(wc, 5000, 100, true);
+        waitForCriterion(wc, 30000, 9, true);
         getCache().getLogger().fine(MemoryThresholdsOffHeapDUnitTest.this.removeExpectedBelow);
         return;
       }
     });
   }
   
+  private static class OffHeapMemoryMonitorObserverImpl implements OffHeapMemoryMonitorObserver {
+    private boolean beginUpdateMemoryUsed;
+    private long beginUpdateMemoryUsed_bytesUsed;
+    private boolean beginUpdateMemoryUsed_willSendEvent;
+    @Override
+    public synchronized void beginUpdateMemoryUsed(long bytesUsed, boolean willSendEvent) {
+      beginUpdateMemoryUsed = true;
+      beginUpdateMemoryUsed_bytesUsed = bytesUsed;
+      beginUpdateMemoryUsed_willSendEvent = willSendEvent;
+    }
+    @Override
+    public synchronized void afterNotifyUpdateMemoryUsed(long bytesUsed) {
+    }
+    @Override
+    public synchronized void beginUpdateStateAndSendEvent(long bytesUsed, boolean willSendEvent) {
+    }
+    private boolean updateStateAndSendEventBeforeProcess;
+    private long updateStateAndSendEventBeforeProcess_bytesUsed;
+    private MemoryEvent updateStateAndSendEventBeforeProcess_event;
+    @Override
+    public synchronized void updateStateAndSendEventBeforeProcess(long bytesUsed, MemoryEvent event) {
+      updateStateAndSendEventBeforeProcess = true;
+      updateStateAndSendEventBeforeProcess_bytesUsed = bytesUsed;
+      updateStateAndSendEventBeforeProcess_event = event;
+    }
+    @Override
+    public synchronized void updateStateAndSendEventBeforeAbnormalProcess(long bytesUsed, MemoryEvent event) {
+    }
+    @Override
+    public synchronized void updateStateAndSendEventIgnore(long bytesUsed, MemoryState oldState, MemoryState newState, long mostRecentBytesUsed,
+        boolean deliverNextAbnormalEvent) {
+    }
+
+    public synchronized void verifyBeginUpdateMemoryUsed(long expected_bytesUsed, boolean expected_willSendEvent) {
+      if (!beginUpdateMemoryUsed) {
+        fail("beginUpdateMemoryUsed was not called");
+      }
+      assertEquals(expected_bytesUsed, beginUpdateMemoryUsed_bytesUsed);
+      assertEquals(expected_willSendEvent, beginUpdateMemoryUsed_willSendEvent);
+    }
+    /**
+     * Verify that beginUpdateMemoryUsed was called, event will be sent, and return the "bytesUsed" it recorded.
+     */
+    public synchronized long verifyBeginUpdateMemoryUsed(boolean expected_willSendEvent) {
+      if (!beginUpdateMemoryUsed) {
+        fail("beginUpdateMemoryUsed was not called");
+      }
+      assertEquals(expected_willSendEvent, beginUpdateMemoryUsed_willSendEvent);
+      return beginUpdateMemoryUsed_bytesUsed;
+    }
+    public synchronized boolean checkUpdateStateAndSendEventBeforeProcess(long expected_bytesUsed, MemoryState expected_memoryState) {
+      if (!updateStateAndSendEventBeforeProcess) {
+        return false;
+      }
+      if (expected_bytesUsed != updateStateAndSendEventBeforeProcess_bytesUsed) {
+        return false;
+      }
+      if (!expected_memoryState.equals(updateStateAndSendEventBeforeProcess_event.getState())) {
+        return false;
+      }
+      return true;
+    }
+    public synchronized void validateUpdateStateAndSendEventBeforeProcess(long expected_bytesUsed, MemoryState expected_memoryState) {
+      if (!updateStateAndSendEventBeforeProcess) {
+        fail("updateStateAndSendEventBeforeProcess was not called");
+      }
+      assertEquals(expected_bytesUsed, updateStateAndSendEventBeforeProcess_bytesUsed);
+      assertEquals(expected_memoryState, updateStateAndSendEventBeforeProcess_event.getState());
+    }
+   }
   private void registerTestMemoryThresholdListener(VM vm) {
     vm.invoke(new SerializableCallable() {
       public Object call() throws Exception {


[31/50] [abbrv] incubator-geode git commit: GEODE-370: Using SerialDistributionMessage for serializers

Posted by ds...@apache.org.
GEODE-370: Using SerialDistributionMessage for serializers

DataSerializers were being sent using PooledDistributionMessage. That
meant they could  be processed out of order. When DataSerializers were
added out of order to a client queue, some serializers would be lost.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/593d176f
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/593d176f
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/593d176f

Branch: refs/heads/develop
Commit: 593d176f713af6570ff6378fc4b7f884d880033a
Parents: 9390a62
Author: Dan Smith <up...@apache.org>
Authored: Wed Oct 21 13:32:18 2015 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Thu Oct 22 13:12:59 2015 -0700

----------------------------------------------------------------------
 .../com/gemstone/gemfire/internal/InternalDataSerializer.java   | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/593d176f/gemfire-core/src/main/java/com/gemstone/gemfire/internal/InternalDataSerializer.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/InternalDataSerializer.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/InternalDataSerializer.java
index 09f9280..27ba141 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/InternalDataSerializer.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/InternalDataSerializer.java
@@ -65,7 +65,6 @@ import com.gemstone.gemfire.GemFireIOException;
 import com.gemstone.gemfire.GemFireRethrowable;
 import com.gemstone.gemfire.Instantiator;
 import com.gemstone.gemfire.InternalGemFireError;
-import com.gemstone.gemfire.InternalGemFireException;
 import com.gemstone.gemfire.SerializationException;
 import com.gemstone.gemfire.SystemFailure;
 import com.gemstone.gemfire.ToDataException;
@@ -76,7 +75,7 @@ import com.gemstone.gemfire.distributed.internal.DMStats;
 import com.gemstone.gemfire.distributed.internal.DistributionManager;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.distributed.internal.LonerDistributionManager;
-import com.gemstone.gemfire.distributed.internal.PooledDistributionMessage;
+import com.gemstone.gemfire.distributed.internal.SerialDistributionMessage;
 import com.gemstone.gemfire.internal.cache.EnumListenerEvent;
 import com.gemstone.gemfire.internal.cache.EventID;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
@@ -3475,7 +3474,7 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
    * distributed cache of a new <code>DataSerializer</code> being
    * registered.
    */
-  public static final class RegistrationMessage extends PooledDistributionMessage {
+  public static final class RegistrationMessage extends SerialDistributionMessage {
     /** The id of the <code>DataSerializer</code> that was
      * registered
      * since 5.7 an int instead of a byte


[39/50] [abbrv] incubator-geode git commit: [GEODE-338] Disable FDDUnitTest for now, due to invalid test

Posted by ds...@apache.org.
[GEODE-338] Disable FDDUnitTest for now, due to invalid test


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/ded14547
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/ded14547
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/ded14547

Branch: refs/heads/develop
Commit: ded145474c16f802fc970ca8fe114fb3df915038
Parents: a73dc1b
Author: Jason Huynh <jh...@pivotal.io>
Authored: Fri Oct 23 09:14:03 2015 -0700
Committer: Jason Huynh <jh...@pivotal.io>
Committed: Fri Oct 23 09:14:03 2015 -0700

----------------------------------------------------------------------
 .../test/java/com/gemstone/gemfire/internal/FDDUnitTest.java   | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ded14547/gemfire-core/src/test/java/com/gemstone/gemfire/internal/FDDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/FDDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/FDDUnitTest.java
index 7379c86..68385ee 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/FDDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/FDDUnitTest.java
@@ -69,8 +69,12 @@ public class FDDUnitTest extends CacheTestCase {
     vm1.invoke(CacheServerTestUtil.class, "closeCache");
     vm2.invoke(CacheServerTestUtil.class, "closeCache");
   }
+
+  public void testEmpty() {
+    //Ticket #GEODE-338.  Disable the test for now and rewrite as a junit test.
+  }
   
-  public void testFDSocketFixOnlyServers() throws Exception {
+  public void disable_testFDSocketFixOnlyServers() throws Exception {
     String os = System.getProperty("os.name");
     if (os != null) {
       if (os.indexOf("Windows") != -1) {


[09/50] [abbrv] incubator-geode git commit: GEODE-244: Adding debug logging for testRecoverRedundancyParallelAsyncEventQueueSimulation

Posted by ds...@apache.org.
GEODE-244: Adding debug logging for testRecoverRedundancyParallelAsyncEventQueueSimulation

This failure does not reproduce. I've enabled debug logging just for
this one test case so that if it does fail again, I can track down what
happened.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/ef5d9e2d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/ef5d9e2d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/ef5d9e2d

Branch: refs/heads/develop
Commit: ef5d9e2d6c5820b2faf20367b7f4172e77b96efa
Parents: c2db920
Author: Dan Smith <up...@apache.org>
Authored: Tue Oct 20 16:24:06 2015 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Tue Oct 20 17:06:09 2015 -0700

----------------------------------------------------------------------
 .../PartitionedRegionRebalanceOp.java           | 52 ++++++++++++++------
 .../control/RebalanceOperationDUnitTest.java    | 29 +++++------
 2 files changed, 49 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ef5d9e2d/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionRebalanceOp.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionRebalanceOp.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionRebalanceOp.java
index 39f4e97..46eb3df 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionRebalanceOp.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionRebalanceOp.java
@@ -77,6 +77,7 @@ public class PartitionedRegionRebalanceOp {
   private static final Logger logger = LogService.getLogger();
   
   private static final int MAX_PARALLEL_OPERATIONS = Integer.getInteger("gemfire.MAX_PARALLEL_BUCKET_RECOVERIES", 8);
+  private final boolean DEBUG = Boolean.getBoolean("gemfire.LOG_REBALANCE");
   
   private final boolean simulate;
   private final boolean replaceOfflineData;
@@ -197,9 +198,7 @@ public class PartitionedRegionRebalanceOp {
           membershipChange = false;
           //refetch the partitioned region details after
           //a membership change.
-          if (logger.isDebugEnabled()) {
-            logger.debug("Rebalancing {} detected membership changes. Refetching details", leaderRegion);
-          }
+          debug("Rebalancing {} detected membership changes. Refetching details", leaderRegion);
           if(this.stats != null) {
             this.stats.incRebalanceMembershipChanges(1);
           }
@@ -222,9 +221,7 @@ public class PartitionedRegionRebalanceOp {
         }
       }
       
-      if (logger.isDebugEnabled()) {
-        logger.debug("Rebalancing {} complete. Model:{}\n", leaderRegion, model);
-      }
+      debug("Rebalancing {} complete. Model:{}\n", leaderRegion, model);
       long end = System.nanoTime();
       
       for(PartitionRebalanceDetailsImpl details : serialOperator.getDetailSet()) {
@@ -444,22 +441,20 @@ public class PartitionedRegionRebalanceOp {
     int totalNumberOfBuckets = leaderRegion.getTotalNumberOfBuckets();
     Set<InternalDistributedMember> criticalMembers = resourceManager.getResourceAdvisor().adviseCritialMembers();;
     boolean removeOverRedundancy = true;
+    
+    debug("Building Model for rebalancing " + leaderRegion
+        + ". redundantCopies=" + redundantCopies + ", totalNumBuckets="
+        + totalNumberOfBuckets + ", criticalMembers=" + criticalMembers
+        + ", simulate=" + simulate);
+
+    
     model = new PartitionedRegionLoadModel(operator, redundantCopies, 
         totalNumberOfBuckets, comparor, criticalMembers, leaderRegion);
 
     for (Map.Entry<PartitionedRegion, InternalPRInfo> entry : detailsMap.entrySet()) {
       PartitionedRegion region = entry.getKey();
       InternalPRInfo details = entry.getValue();
-      if (isDebugEnabled) {
-        logger.debug("Added Region to model region={} details=", region);
-      }
-      for(PartitionMemberInfo memberDetails: details.getPartitionMemberInfo()) {
-        if (isDebugEnabled) {
-          logger.debug("Member: {} LOAD={}", memberDetails.getDistributedMember(), ((InternalPartitionDetails) memberDetails).getPRLoad());
-        }
-      }
-      Set<InternalPartitionDetails> memberDetailSet = 
-          details.getInternalPartitionDetails();
+      
       OfflineMemberDetails offlineDetails;
       if(replaceOfflineData) {
         offlineDetails = OfflineMemberDetails.EMPTY_DETAILS;
@@ -467,13 +462,38 @@ public class PartitionedRegionRebalanceOp {
         offlineDetails = details.getOfflineMembers();
       }
       boolean enforceLocalMaxMemory = !region.isEntryEvictionPossible();
+
+      debug("Added Region to model region=" + region + ", offlineDetails=" + offlineDetails 
+          + ", enforceLocalMaxMemory=" + enforceLocalMaxMemory);
+
+      for(PartitionMemberInfo memberDetails: details.getPartitionMemberInfo()) {
+        debug(
+            "For Region: " + region + ", Member: " + memberDetails.getDistributedMember() + "LOAD="
+                + ((InternalPartitionDetails) memberDetails).getPRLoad() 
+                +", equivalentMembers=" 
+                + dm.getMembersInSameZone((InternalDistributedMember) memberDetails.getDistributedMember()));
+      }
+      Set<InternalPartitionDetails> memberDetailSet = 
+          details.getInternalPartitionDetails();
+      
       model.addRegion(region.getFullPath(), memberDetailSet, offlineDetails, enforceLocalMaxMemory);
     }
     
     model.initialize();
     
+    debug("Rebalancing {} starting. Model:\n{}", leaderRegion, model);
+    
     return model;
   }
+  private void debug(String message, Object ...params) {
+    if(logger.isDebugEnabled()) {
+      logger.debug(message, params);
+    } else if(logger.isInfoEnabled() && DEBUG) {
+      logger.info(message, params);
+    }
+    
+  }
+
   /**
    * Create a redundant bucket on the target member
    * 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ef5d9e2d/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/control/RebalanceOperationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/control/RebalanceOperationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/control/RebalanceOperationDUnitTest.java
index 31d889c..e40222c 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/control/RebalanceOperationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/control/RebalanceOperationDUnitTest.java
@@ -17,8 +17,6 @@ import java.util.Properties;
 import java.util.Random;
 import java.util.Set;
 import java.util.TreeSet;
-import java.util.concurrent.BrokenBarrierException;
-import java.util.concurrent.Callable;
 import java.util.concurrent.CancellationException;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.CyclicBarrier;
@@ -60,7 +58,6 @@ import com.gemstone.gemfire.internal.cache.DiskStoreImpl;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDataStore;
-import com.gemstone.gemfire.internal.cache.control.InternalResourceManager;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceObserverAdapter;
 
 import dunit.AsyncInvocation;
@@ -1087,8 +1084,19 @@ public class RebalanceOperationDUnitTest extends CacheTestCase {
     }
   }
   
-  public void testRecoverRedundancyParallelAsyncEventQueueSimulation() {
-    recoverRedundancyParallelAsyncEventQueue(true);
+  public void testRecoverRedundancyParallelAsyncEventQueueSimulation() throws NoSuchFieldException, SecurityException {
+    invokeInEveryVM(new SerializableRunnable() {
+
+      @Override
+      public void run () {
+        System.setProperty("gemfire.LOG_REBALANCE", "true");
+      }
+    });
+    try {
+      recoverRedundancyParallelAsyncEventQueue(true);
+    } finally {
+      System.setProperty("gemfire.LOG_REBALANCE", "false");
+    }
   }
   
   public void testRecoverRedundancyParallelAsyncEventQueue() {
@@ -1862,17 +1870,6 @@ public class RebalanceOperationDUnitTest extends CacheTestCase {
    * are correct and we still rebalance correctly
    */
   public void testMoveBucketsOverflowToDisk() throws Throwable {
-    
-    System.setProperty("gemfire.LOG_REBALANCE", "true");
-    invokeInEveryVM(new SerializableCallable() {
-      
-      @Override
-      public Object call() throws Exception {
-        System.setProperty("gemfire.LOG_REBALANCE", "true");
-        return null;
-      }
-    });
-
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);


[47/50] [abbrv] incubator-geode git commit: GEODE-471: figure out why entry is not expiring

Posted by ds...@apache.org.
GEODE-471: figure out why entry is not expiring

Enhanced the test to do additional validation.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/2aec6a54
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/2aec6a54
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/2aec6a54

Branch: refs/heads/develop
Commit: 2aec6a54b9aa71d06b195e57991d0134cd2af096
Parents: 6aadbf8
Author: Darrel Schneider <ds...@pivotal.io>
Authored: Fri Oct 23 16:03:36 2015 -0700
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Fri Oct 23 16:03:36 2015 -0700

----------------------------------------------------------------------
 .../gemstone/gemfire/internal/cache/AbstractRegionMap.java    | 1 +
 .../src/test/java/com/gemstone/gemfire/TXExpiryJUnitTest.java | 2 +-
 .../gemfire/internal/cache/RemoteTransactionDUnitTest.java    | 7 +++++--
 3 files changed, 7 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/2aec6a54/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java
index 494efaf..515a690 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java
@@ -1649,6 +1649,7 @@ RETRY_LOOP:
                 retry = true;
                 continue RETRY_LOOP;
               }
+              logger.info("DARREL: destroy " + re.getKey() + " event.isOriginRemote()=" + event.isOriginRemote() + " event.getOperation().isExpiration()=" + event.getOperation().isExpiration() + " re.isInUseByTransaction()=" + re.isInUseByTransaction());
               if (!event.isOriginRemote() && event.getOperation().isExpiration()) {
                 // If this expiration started locally then only do it if the RE is not being used by a tx.
                 if (re.isInUseByTransaction()) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/2aec6a54/gemfire-core/src/test/java/com/gemstone/gemfire/TXExpiryJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/TXExpiryJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/TXExpiryJUnitTest.java
index 514d45b..76008bd 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/TXExpiryJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/TXExpiryJUnitTest.java
@@ -242,7 +242,7 @@ public class TXExpiryJUnitTest {
     DistributedTestCase.waitForCriterion(waitForExpire, 3000, 10, true);
   }
   
-  private void waitForEntryExpiration(LocalRegion lr, String key) {
+  public static void waitForEntryExpiration(LocalRegion lr, String key) {
     try {
       ExpirationDetector detector;
       do {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/2aec6a54/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoteTransactionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoteTransactionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoteTransactionDUnitTest.java
index ccff0c6..c1b77c7 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoteTransactionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoteTransactionDUnitTest.java
@@ -23,6 +23,7 @@ import javax.transaction.RollbackException;
 import javax.transaction.Status;
 import javax.transaction.UserTransaction;
 
+import com.gemstone.gemfire.TXExpiryJUnitTest;
 import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.AttributesMutator;
 import com.gemstone.gemfire.cache.CacheEvent;
@@ -4083,13 +4084,15 @@ protected static class ClientListener extends CacheListenerAdapter {
         ExpiryTask.suspendExpiration();
         Region.Entry entry = null;
         long tilt;
+        EntryExpiryTask eet;
         try {
           r.put("key", "value");
+          LocalRegion lr = (LocalRegion) r;
           r.put("nonTXkey", "nonTXvalue");
           getCache().getCacheTransactionManager().begin();
           r.put("key", "newvalue");
-        } 
-        finally {
+          TXExpiryJUnitTest.waitForEntryExpiration(lr, "key");
+        } finally {
           ExpiryTask.permitExpiration();
         }
         TransactionId tx = getCache().getCacheTransactionManager().suspend();


[33/50] [abbrv] incubator-geode git commit: GEODE-468: Using 1.8 generated files for AnalyzeSerializablesJUnitTest

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/37f77a90/gemfire-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedDataSerializables.txt
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedDataSerializables.txt b/gemfire-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedDataSerializables.txt
index 490117f..708124f 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedDataSerializables.txt
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedDataSerializables.txt
@@ -3,8 +3,8 @@ fromData,62,2a2bb80023b500082a2bb900240100b5000b2a2bb80025b500052ab40005b9002601
 toData,30,2ab400082bb800202b2ab4000bb9002102002ab40005c000032bb80022b1
 
 com/gemstone/gemfire/admin/internal/FinishBackupRequest,2
-fromData,22,2a2bb700272a2bb80028b500022a2bb80028b50003b1
-toData,22,2a2bb700292ab400022bb8002a2ab400032bb8002ab1
+fromData,22,2a2bb700282a2bb80029b500022a2bb80029b50003b1
+toData,22,2a2bb7002a2ab400022bb8002b2ab400032bb8002bb1
 
 com/gemstone/gemfire/admin/internal/FinishBackupResponse,2
 fromData,14,2a2bb700042a2bb80005b50003b1
@@ -15,12 +15,12 @@ fromData,14,2a2bb700042a2bb80005b50003b1
 toData,14,2a2bb700062ab400032bb80007b1
 
 com/gemstone/gemfire/admin/internal/SystemMemberCacheEventProcessor$SystemMemberCacheMessage,2
-fromData,27,2a2bb7001a2a2bb8001bb5000d2a2bb9001c0100b8001db50010b1
-toData,27,2a2bb7001e2ab4000d2bb8001f2b2ab40010b40020b900210200b1
+fromData,27,2a2bb7001b2a2bb8001cb5000d2a2bb9001d0100b8001eb50010b1
+toData,27,2a2bb7001f2ab4000d2bb800202b2ab40010b40021b900220200b1
 
 com/gemstone/gemfire/admin/jmx/internal/StatAlertNotification,2
-fromData,39,2a2bb80029b600032a2bb8002ab600072a2bb8002bc0002cc0002cb600052a2bb8002db50008b1
-toData,33,2ab600152bb800252ab6001f2bb800262ab6000d2bb800272ab400082bb80028b1
+fromData,39,2a2bb8002ab600032a2bb8002bb600072a2bb8002cc0002dc0002db600052a2bb8002eb50008b1
+toData,33,2ab600162bb800262ab600202bb800272ab6000e2bb800282ab400082bb80029b1
 
 com/gemstone/gemfire/cache/ExpirationAttributes,2
 fromData,22,2a2bb900120100b500022a2bb80013c00014b50004b1
@@ -35,8 +35,8 @@ fromData,14,2a2bb9000f0100b80010b50003b1
 toData,14,2b2ab40003b4000db9000e0200b1
 
 com/gemstone/gemfire/cache/client/internal/CacheServerLoadMessage,2
-fromData,52,2a2bb7000c2abb000d59b7000eb500022ab400022bb8000f2abb001059b70011b500032ab400032bb8000f2a2bb80012b50004b1
-toData,30,2a2bb700132ab400022bb800142ab400032bb800142ab400042bb80015b1
+fromData,52,2a2bb7000d2abb000e59b7000fb500022ab400022bb800102abb001159b70012b500032ab400032bb800102a2bb80013b50004b1
+toData,30,2a2bb700142ab400022bb800152ab400032bb800152ab400042bb80016b1
 
 com/gemstone/gemfire/cache/client/internal/locator/ClientConnectionRequest,2
 fromData,14,2a2bb700042a2bb80005b50003b1
@@ -63,8 +63,8 @@ fromData,41,2a2bb80006b500032a2bb900070100b500052ab40003c600122ab40003b600049a00
 toData,19,2ab400032bb800082b2ab40005b900090200b1
 
 com/gemstone/gemfire/cache/client/internal/locator/LocatorStatusResponse,2
-fromData,56,2a2bb6001e2a2bb6001f2a2bb600202a2bb600212a2bb600222a2bb600232a2bb600242a2bb600252a2bb600262a2bb600272a2bb60028b1
-toData,56,2a2bb600312a2bb600322a2bb600332a2bb600342a2bb600352a2bb600362a2bb600372a2bb600382a2bb600392a2bb6003a2a2bb6003bb1
+fromData,56,2a2bb6001f2a2bb600202a2bb600212a2bb600222a2bb600232a2bb600242a2bb600252a2bb600262a2bb600272a2bb600282a2bb60029b1
+toData,56,2a2bb600322a2bb600332a2bb600342a2bb600352a2bb600362a2bb600372a2bb600382a2bb600392a2bb6003a2a2bb6003b2a2bb6003cb1
 
 com/gemstone/gemfire/cache/client/internal/locator/QueueConnectionRequest,2
 fromData,40,2a2bb700072a2bb80008b500042a2bb80009b500062a2bb8000ab500052a2bb9000b0100b50002b1
@@ -78,17 +78,13 @@ com/gemstone/gemfire/cache/client/internal/locator/ServerLocationRequest,2
 fromData,9,2a2bb80003b50002b1
 toData,9,2ab400022bb80004b1
 
-com/gemstone/gemfire/cache/hdfs/internal/HDFSEventQueueAttributesImpl,2
-fromData,69,2a2bb9001c0100b500032a2bb9001c0100b500042a2bb9001d0100b500052a2bb8001eb500022a2bb9001c0100b500062a2bb9001d0100b500072a2bb9001c0100b50008b1
-toData,69,2b2ab40003b9001902002b2ab40004b9001902002b2ab40005b9001a02002ab400022bb8001b2b2ab40006b9001902002b2ab40007b9001a02002b2ab40008b900190200b1
-
 com/gemstone/gemfire/cache/hdfs/internal/HDFSGatewayEventImpl,2
 fromData,17,2a2bb7001b2a2bb8001cc0001db50009b1
 toData,14,2a2bb700172ab400092bb80018b1
 
 com/gemstone/gemfire/cache/hdfs/internal/PersistedEventImpl,2
-fromData,104,2a2bb900100100b80011b500032a2bb900100100b500062a04b7000999000e2a2bb80012b50004a700402a05b700099900302bb800124d2cc7000b2a01b50004a7001cb8001399000e2a2cb80014b50004a7000b2a2cb80015b50004a7000b2a2bb80016b50004b1
-toData,107,2b2ab40003b40007b9000802002b2ab40006b9000802002a04b700099900142ab40004c0000ac0000a2bb8000ba7003d2a05b7000999002d2ab40004c1000c9900182ab40004c0000c4d2cb9000d01002bb8000ea700162ab400042bb8000ea7000b2ab400042bb8000fb1
+fromData,104,2a2bb900110100b80012b500032a2bb900110100b500072a04b7000a99000e2a2bb80013b50004a700402a05b7000a9900302bb800134d2cc7000b2a01b50004a7001cb8001499000e2a2cb80015b50004a7000b2a2cb80016b50004a7000b2a2bb80017b50004b1
+toData,107,2b2ab40003b40008b9000902002b2ab40007b9000902002a04b7000a9900142ab40004c0000bc0000b2bb8000ca7003d2a05b7000a99002d2ab40004c1000d9900182ab40004c0000d4d2cb9000e01002bb8000fa700162ab400042bb8000fa7000b2ab400042bb80010b1
 
 com/gemstone/gemfire/cache/hdfs/internal/SortedHDFSQueuePersistedEvent,2
 fromData,14,2a2bb7000d2a2bb8000eb5000ab1
@@ -124,11 +120,11 @@ toData,29,2ab400042bb800082b2ab40005b9000902002b2ab40007b9000a0200b1
 
 com/gemstone/gemfire/cache/query/internal/CqEntry,2
 fromData,17,2a2bb80009b500022a2bb80009b50003b1
-toData,17,2ab400022bb8000a2ab400032bb8000ab1
+toData,17,2ab400022bb8000b2ab400032bb8000bb1
 
 com/gemstone/gemfire/cache/query/internal/CumulativeNonDistinctResults,2
-fromData,127,2bb8001dc0001e4d2abb0003591300042cb70005b500012cb9001f01003e2bb90020010037042abb001a59160488b70021b5000816043706160609949e00421d9900232bb800223a082ab40008bb0023592cc000241908b70025b90026020057a700152bb8001d3a082ab400081908b9002602005716060a653706a7ffbdb1
-toData,125,2ab40001b900270100b9001f01003d2ab40001b9002701002bb80028bb00295911040001b7002a4e2db6002b3a042ab600143a050336061905b9001501009900311905b9001601003a071c9900181907c0002cb9002d01003a0819082bb8002ea7000919072db80028840601a7ffcb1904150685b6002f2d2bb60030b1
+fromData,126,2bb8001dc0001e4d2abb00035912042cb70005b500012cb9001f01003e2bb90020010037042abb001a59160488b70021b5000816043706160609949e00421d9900232bb800223a082ab40008bb0023592cc000241908b70025b90026020057a700152bb8001d3a082ab400081908b9002602005716060a653706a7ffbdb1
+toData,125,2ab40001b900280100b9001f01003d2ab40001b9002801002bb80029bb002a5911040001b7002b4e2db6002c3a042ab600143a050336061905b9001501009900311905b9001601003a071c9900181907c0002db9002e01003a0819082bb8002fa7000919072db80029840601a7ffcb1904150685b600302d2bb60031b1
 
 com/gemstone/gemfire/cache/query/internal/LinkedResultSet,2
 fromData,40,2bb9001501003d2a2bb80016c00017b500071c3e1d9e00122a2bb80016b60018578403ffa7fff0b1
@@ -136,11 +132,11 @@ toData,46,2b2ab60019b9001a02002ab400072bb8001b2ab6001c4d2cb9001d01009900102cb900
 
 com/gemstone/gemfire/cache/query/internal/LinkedStructSet,2
 fromData,68,2a2bb900300100b500022bb9003101003d2a2bb80032c00020b500071c3e1d9e00242bb80032c000333a042abb000c592ab400071904b70034b60035578403ffa7ffdeb1
-toData,66,2b2ab40002b9003602002b2ab60037b9003802002ab400072bb800392ab600284d2cb90029010099001a2cb9002a0100c000124e2db9003a01002bb80039a7ffe3b1
+toData,66,2b2ab40002b9003702002b2ab60038b9003902002ab400072bb8003a2ab600284d2cb90029010099001a2cb9002a0100c000124e2db9003b01002bb8003aa7ffe3b1
 
 com/gemstone/gemfire/cache/query/internal/NWayMergeResults,2
-fromData,135,2bb8001ec0001f4d2abb0004591300202cb70006b500022cb9002101003e2a2bb80022b500012bb90023010037042abb001b59160488b70024b5000916043706160609949e00421d9900232bb800253a082ab40009bb0026592cc000271908b70028b90029020057a700152bb8001e3a082ab400091908b9002902005716060a653706a7ffbdb1
-toData,133,2ab40002b9002a0100b9002101003d2ab40002b9002a01002bb8002b2ab400012bb8002cbb002d5911040001b7002e4e2db6002f3a042ab600153a050336061905b9001601009900311905b9001701003a071c9900181907c00030b9003101003a0819082bb80032a7000919072db8002b840601a7ffcb1904150685b600332d2bb60034b1
+fromData,134,2bb8001ec0001f4d2abb00045912202cb70006b500022cb9002101003e2a2bb80022b500012bb90023010037042abb001b59160488b70024b5000916043706160609949e00421d9900232bb800253a082ab40009bb0026592cc000271908b70028b90029020057a700152bb8001e3a082ab400091908b9002902005716060a653706a7ffbdb1
+toData,133,2ab40002b9002b0100b9002101003d2ab40002b9002b01002bb8002c2ab400012bb8002dbb002e5911040001b7002f4e2db600303a042ab600153a050336061905b9001601009900311905b9001701003a071c9900181907c00031b9003201003a0819082bb80033a7000919072db8002c840601a7ffcb1904150685b600342d2bb60035b1
 
 com/gemstone/gemfire/cache/query/internal/NullToken,2
 fromData,1,b1
@@ -152,59 +148,59 @@ toData,29,2b2ab40004b9000502002b2ab40006b9000702002ab400032bb80008b1
 
 com/gemstone/gemfire/cache/query/internal/ResultsBag,2
 fromData,106,2a2bb80018c00019b5001a2a2bb9001b0100b50017b2001c9a00162ab400179c000fbb001d592ab40017b7001ebf2a2ab6001fb500042a2bb600202ab400172ab40021643d1c9e00232bb800184e2bb9001b010036042ab400042d1504b60022571c1504643da7ffdfb1
-toData,116,2ab4001a2bb800232b2ab60024b9002502002a2bb600262ab600242ab40021643d2ab40004b60027b9002801004e2db9000a010099003f1c9e003b2db9000b0100c000293a041904b6002a3a0519052bb800231904b6002b36061c1506a200061c36062b1506b9002502001c1506643da7ffbeb1
+toData,116,2ab4001a2bb800242b2ab60025b9002602002a2bb600272ab600252ab40021643d2ab40004b60028b9002901004e2db9000a010099003f1c9e003b2db9000b0100c0002a3a041904b6002b3a0519052bb800241904b6002c36061c1506a200061c36062b1506b9002602001c1506643da7ffbeb1
 
 com/gemstone/gemfire/cache/query/internal/ResultsCollectionWrapper,2
-fromData,55,2bb9005601003d1c99000e2a2bb80057b50002a7000e2a2bb80058c00059b500022a2bb80058c0005ab5000d2a2bb900560100b50004b1
-toData,60,2ab40002c100523d2b1cb9005302001c9900112ab40002c0004d2bb80054a7000b2ab400022bb800552ab4000d2bb800552b2ab40004b900530200b1
+fromData,55,2bb9005801003d1c99000e2a2bb80059b50002a7000e2a2bb8005ac0005bb500022a2bb8005ac0005cb5000d2a2bb900580100b50004b1
+toData,60,2ab40002c100543d2b1cb9005502001c9900112ab40002c0004e2bb80056a7000b2ab400022bb800572ab4000d2bb800572b2ab40004b900550200b1
 
 com/gemstone/gemfire/cache/query/internal/ResultsSet,2
-fromData,49,2bb9001701003dbb001859b700194e2d2bb8001a2a2db600051c360415049e00122a2bb8001bb6001c578404ffa7ffefb1
-toData,70,2b2ab6001db9001e02002ab6001fb900040100c000184d2cc6000704a70004031220b800212c2bb800222ab600234e2db9002401009900102db9002501002bb80026a7ffedb1
+fromData,49,2bb9001801003dbb001959b7001a4e2d2bb8001b2a2db600051c360415049e00122a2bb8001cb6001d578404ffa7ffefb1
+toData,70,2b2ab6001eb9001f02002ab60020b900040100c000194d2cc6000704a70004031221b800222c2bb800232ab600244e2db9002501009900102db9002601002bb80027a7ffedb1
 
 com/gemstone/gemfire/cache/query/internal/SortedResultSet,2
-fromData,40,2bb9001501003d2a2bb80016c00017b500081c3e1d9e00122a2bb80016b60018578403ffa7fff0b1
-toData,46,2b2ab60019b9001a02002ab400082bb8001b2ab6001c4d2cb9001d01009900102cb9001e01002bb8001ba7ffedb1
+fromData,40,2bb9001601003d2a2bb80017c00018b500081c3e1d9e00122a2bb80017b60019578403ffa7fff0b1
+toData,46,2b2ab6001ab9001b02002ab400082bb8001c2ab6001d4d2cb9001e01009900102cb9001f01002bb8001ca7ffedb1
 
 com/gemstone/gemfire/cache/query/internal/SortedStructSet,2
-fromData,57,2a2bb900430100b500022bb9004401003d2a2bb80045c00034b500091c3e1d9e00192bb80045c000283a042a1904b60013578403ffa7ffe9b1
-toData,64,2b2ab40002b9004602002b2ab6002bb9004702002ab400092bb800482ab6002c4d2cb9002601009900182cb900270100c00028c000284e2d2bb80049a7ffe5b1
+fromData,57,2a2bb900440100b500022bb9004501003d2a2bb80046c00034b500091c3e1d9e00192bb80046c000283a042a1904b60013578403ffa7ffe9b1
+toData,64,2b2ab40002b9004702002b2ab6002bb9004802002ab400092bb800492ab6002c4d2cb9002601009900182cb900270100c00028c000284e2d2bb8004aa7ffe5b1
 
 com/gemstone/gemfire/cache/query/internal/StructBag,2
-fromData,16,2a2bb700472a2bb900480100b50002b1
-toData,16,2a2bb700492b2ab40002b9004a0200b1
+fromData,16,2a2bb700482a2bb900490100b50002b1
+toData,16,2a2bb7004a2b2ab40002b9004b0200b1
 
 com/gemstone/gemfire/cache/query/internal/StructImpl,2
-fromData,72,2a2bb80025c00026b500072a2bb80027b500082ab40008c600302ab400084d2cbe3e03360415041da2001f2c1504323a051905c1000999000b2a04b50002a70009840401a7ffe1b1
-toData,17,2ab400072bb800282ab400082bb80029b1
+fromData,72,2a2bb80026c00027b500072a2bb80028b500082ab40008c600302ab400084d2cbe3e03360415041da2001f2c1504323a051905c1000999000b2a04b50002a70009840401a7ffe1b1
+toData,17,2ab400072bb800292ab400082bb8002ab1
 
 com/gemstone/gemfire/cache/query/internal/StructSet,2
-fromData,58,2abb000359bb000459b70005b70006b500072bb9004f01003d2a2bb80050c00040b5000c1c3e1d9e00122a2bb80050b60051578403ffa7fff0b1
-toData,46,2b2ab60039b9005202002ab4000c2bb800532ab600494d2cb9001201009900102cb9001301002bb80053a7ffedb1
+fromData,58,2abb000359bb000459b70005b70006b500072bb9005001003d2a2bb80051c00040b5000c1c3e1d9e00122a2bb80051b60052578403ffa7fff0b1
+toData,46,2b2ab60039b9005302002ab4000c2bb800542ab600494d2cb9001201009900102cb9001301002bb80054a7ffedb1
 
 com/gemstone/gemfire/cache/query/internal/Undefined,2
 fromData,1,b1
 toData,1,b1
 
 com/gemstone/gemfire/cache/query/internal/index/IndexCreationData,2
-fromData,106,2a2bb9000e0100b500022a2bb9000e0100b500052a2bb9000e0100b500042a2bb9000f0100b500082bb9001001003d031ca0000d2ab2000bb50003a70019041ca0000d2ab2000db50003a7000a2ab20011b500032bb9000f01003e1d99000d2a2bb9000e0100b50006b1
-toData,122,2b2ab40002b9000902002b2ab40005b9000902002b2ab40004b9000902002b2ab40008b9000a0200b2000b2ab40003a6000d2b03b9000c0200a7001eb2000d2ab40003a6000d2b04b9000c0200a7000a2b05b9000c02002ab40006c600172b04b9000a02002b2ab40006b900090200a7000a2b03b9000a0200b1
+fromData,106,2a2bb9000f0100b500022a2bb9000f0100b500052a2bb9000f0100b500042a2bb900100100b500082bb9001101003d031ca0000d2ab2000cb50003a70019041ca0000d2ab2000eb50003a7000a2ab20012b500032bb9001001003e1d99000d2a2bb9000f0100b50006b1
+toData,122,2b2ab40002b9000a02002b2ab40005b9000a02002b2ab40004b9000a02002b2ab40008b9000b0200b2000c2ab40003a6000d2b03b9000d0200a7001eb2000e2ab40003a6000d2b04b9000d0200a7000a2b05b9000d02002ab40006c600172b04b9000b02002b2ab40006b9000a0200a7000a2b03b9000b0200b1
 
 com/gemstone/gemfire/cache/query/internal/types/CollectionTypeImpl,2
-fromData,17,2a2bb700222a2bb80023c00024b50003b1
-toData,14,2a2bb700252ab400032bb80026b1
+fromData,17,2a2bb700232a2bb80024c00025b50003b1
+toData,14,2a2bb700262ab400032bb80027b1
 
 com/gemstone/gemfire/cache/query/internal/types/MapTypeImpl,2
-fromData,17,2a2bb7001b2a2bb8001cc00015b50003b1
-toData,14,2a2bb7001d2ab400032bb8001eb1
+fromData,17,2a2bb7001c2a2bb8001dc00015b50003b1
+toData,14,2a2bb7001e2ab400032bb8001fb1
 
 com/gemstone/gemfire/cache/query/internal/types/ObjectTypeImpl,2
-fromData,9,2a2bb80009b50002b1
-toData,9,2ab400022bb8000ab1
+fromData,9,2a2bb8000ab50002b1
+toData,9,2ab400022bb8000bb1
 
 com/gemstone/gemfire/cache/query/internal/types/StructTypeImpl,2
-fromData,28,2a2bb700282a2bb80029b5000b2a2bb8002ac0002bc0002bb5000db1
-toData,22,2a2bb7002c2ab4000b2bb8002d2ab4000d2bb8002eb1
+fromData,28,2a2bb700292a2bb8002ab5000b2a2bb8002bc0002cc0002cb5000db1
+toData,22,2a2bb7002d2ab4000b2bb8002e2ab4000d2bb8002fb1
 
 com/gemstone/gemfire/cache/server/ServerLoad,2
 fromData,41,2a2bb900070100b500022a2bb900070100b500042a2bb900070100b500032a2bb900070100b50005b1
@@ -219,140 +215,140 @@ fromData,19,2a2bb80012b5000e2a2bb900130100b5000fb1
 toData,19,2ab4000e2bb800102b2ab4000fb900110300b1
 
 com/gemstone/gemfire/distributed/internal/DistributionAdvisor$Profile,2
-fromData,40,2abb001059b70011b500072ab400072bb800122a2bb900130100b500082a2bb900130100b50002b1
-toData,29,2ab400072bb8000e2b2ab40008b9000f02002b2ab40002b9000f0200b1
+fromData,40,2abb001259b70013b500082ab400082bb800142a2bb900150100b500092a2bb900150100b50003b1
+toData,29,2ab400082bb800102b2ab40009b9001102002b2ab40003b900110200b1
 
 com/gemstone/gemfire/distributed/internal/DistributionMessage,2
 fromData,1,b1
 toData,1,b1
 
 com/gemstone/gemfire/distributed/internal/HighPriorityAckedMessage,2
-fromData,51,2a2bb700432a2bb900440100b500122ab800452bb90044010032b500092a2bb900460100b5000a2a2bb80047c00048b50007b1
-toData,47,2a2bb7003f2b2ab40012b9004002002b2ab40009b60024b9004002002b2ab4000ab9004102002ab400072bb80042b1
+fromData,51,2a2bb700442a2bb900450100b500122ab800462bb90045010032b500092a2bb900470100b5000a2a2bb80048c00049b50007b1
+toData,47,2a2bb700402b2ab40012b9004102002b2ab40009b60024b9004102002b2ab4000ab9004202002ab400072bb80043b1
 
 com/gemstone/gemfire/distributed/internal/ReplyMessage,2
-fromData,101,2a2bb7003c2bb9003d01003d2a1c05b8003eb500022a1c1008b8003eb500031c04b8003e99000d2a2bb9003f0100b500041c07b8003e9900132a2bb80040b500062a04b50007a700141c1040b8003e99000b2a2bb80040b500062a1c1080b8003eb50038b1
-toData,132,2a2bb70037033d2ab400029900081c0580913d2ab4000799000b1c0780913da700102ab40006c600091c104080913d2ab400049900081c0480913d2ab400039900091c100880913d2ab400389900091c108080913d2b1cb9003902002ab4000499000d2b2ab40004b9003a02002ab400079a000a2ab40006c6000b2ab400062bb8003bb1
+fromData,101,2a2bb7003d2bb9003e01003d2a1c05b8003fb500022a1c1008b8003fb500031c04b8003f99000d2a2bb900400100b500041c07b8003f9900132a2bb80041b500062a04b50007a700141c1040b8003f99000b2a2bb80041b500062a1c1080b8003fb50039b1
+toData,132,2a2bb70038033d2ab400029900081c0580913d2ab4000799000b1c0780913da700102ab40006c600091c104080913d2ab400049900081c0480913d2ab400039900091c100880913d2ab400399900091c108080913d2b1cb9003a02002ab4000499000d2b2ab40004b9003b02002ab400079a000a2ab40006c6000b2ab400062bb8003cb1
 
 com/gemstone/gemfire/distributed/internal/SerialAckedMessage,2
-fromData,27,2a2bb7002c2a2bb9002d0100b500182a2bb8002ec00020b50007b1
-toData,24,2a2bb700292b2ab40018b9002a02002ab400072bb8002bb1
+fromData,27,2a2bb7002d2a2bb9002e0100b500182a2bb8002fc00020b50007b1
+toData,24,2a2bb7002a2b2ab40018b9002b02002ab400072bb8002cb1
 
 com/gemstone/gemfire/distributed/internal/ServerLocation,2
-fromData,19,2a2bb8000ab500082a2bb9000b0100b50009b1
-toData,19,2ab400082bb8000c2b2ab40009b9000d0200b1
+fromData,19,2a2bb8000bb500092a2bb9000c0100b5000ab1
+toData,19,2ab400092bb8000d2b2ab4000ab9000e0200b1
 
 com/gemstone/gemfire/distributed/internal/ShutdownMessage,2
-fromData,27,2a2bb7000b2a2bb9000c0100b500032a2bb8000dc0000eb50002b1
-toData,24,2a2bb700082b2ab40003b9000902002ab400022bb8000ab1
+fromData,27,2a2bb7000c2a2bb9000d0100b500032a2bb8000ec0000fb50002b1
+toData,24,2a2bb700092b2ab40003b9000a02002ab400022bb8000bb1
 
 com/gemstone/gemfire/distributed/internal/StartupMessage,3
-fromDataProblem,38,2ab40039c7000e2abb006b59b7006cb500392ab400392bb6006d572ab40039126eb6006d57b1
-fromData,368,2a2bb7006f2bb9007001003d1c99000e2a2bb80071b5000ca700082a01b5000c2bb80072b2004fb600509c000704a70004033e1d9900082bb80073572a2bb80074b500092a2bb900750100b5000d2a2bb900700100b5000e2a2bb900700100b500122a2bb900700100b500132bb900750100360403360515051504a200402bb800763a062bb90075010036071906c6000d19060301011507b80077a7001a3a082ab2007804bd00235903190853b900270200b70079840501a7ffbf2bb900750100360503360615061505a2004b2bb800763a072bb800763a082bb90075010036091907c600121908c6000d19071908150903b8007aa7001a3a0a2ab2007b04bd00235903190a53b900270200b70079840601a7ffb42a2bb80073c0007cb500142a2bb900750100b500192a2bb80074b5001a2a2bb900700100b5001bbb006459b700653a0619062bb6007d2a1906b6007eb5000a2a1906b6007fb5000b2a1906b60080b5000f2a1906b60081b50010b1
-toData,409,2a2bb7004b2b2ab4000cc6000704a7000403b9004c02002ab4000cc6000b2ab4000c2bb8004d2bb8004eb2004fb600509c000704a70004033d1c99000ebb005159b700522bb800532ab400092bb800542b2ab4000db9005502002b2ab4000eb9004c02002b2ab40012b9004c02002b2ab40013b9004c0200b800564e2b2dbeb90055020003360415042dbea200212d150432b600572bb800582b2d150432b60059b900550200840401a7ffdeb8005a3a042b1904beb90055020003360515051904bea2007f1904150532c1005b9900331904150532c0005bb6005cb6005d3a061904150532c0005bb6005eb6005d3a071904150532c0005bb6005f3608a7002a1904150532c00060b600613a061904150532c00060b600623a071904150532c00060b60063360819062bb8005819072bb800582b1508b900550200840501a7ff7f2ab400142bb800532b2ab40019b9005502002ab4001a2bb800542b2ab4001bb9004c0200bb006459b700653a0519052ab4000ab6006619052ab4000bb6006719052ab4000fb6006819052ab40010b6006919052bb6006ab1
+fromDataProblem,38,2ab4003bc7000e2abb006e59b7006fb5003b2ab4003b2bb60070572ab4003b1271b6007057b1
+fromData,368,2a2bb700722bb9007301003d1c99000e2a2bb80074b5000ca700082a01b5000c2bb80075b20052b600539c000704a70004033e1d9900082bb80076572a2bb80077b500092a2bb900780100b5000d2a2bb900730100b5000e2a2bb900730100b500122a2bb900730100b500132bb900780100360403360515051504a200402bb800793a062bb90078010036071906c6000d19060301011507b8007aa7001a3a082ab2007b04bd00245903190853b900280200b7007c840501a7ffbf2bb900780100360503360615061505a2004b2bb800793a072bb800793a082bb90078010036091907c600121908c6000d19071908150903b8007da7001a3a0a2ab2007e04bd00245903190a53b900280200b7007c840601a7ffb42a2bb80076c0007fb500142a2bb900780100b500192a2bb80077b5001a2a2bb900730100b5001bbb006759b700683a0619062bb600802a1906b60081b5000a2a1906b60082b5000b2a1906b60083b5000f2a1906b60084b50010b1
+toData,409,2a2bb7004e2b2ab4000cc6000704a7000403b9004f02002ab4000cc6000b2ab4000c2bb800502bb80051b20052b600539c000704a70004033d1c99000ebb005459b700552bb800562ab400092bb800572b2ab4000db9005802002b2ab4000eb9004f02002b2ab40012b9004f02002b2ab40013b9004f0200b800594e2b2dbeb90058020003360415042dbea200212d150432b6005a2bb8005b2b2d150432b6005cb900580200840401a7ffdeb8005d3a042b1904beb90058020003360515051904bea2007f1904150532c1005e9900331904150532c0005eb6005fb600603a061904150532c0005eb60061b600603a071904150532c0005eb600623608a7002a1904150532c00063b600643a061904150532c00063b600653a071904150532c00063b60066360819062bb8005b19072bb8005b2b1508b900580200840501a7ff7f2ab400142bb800562b2ab40019b9005802002ab4001a2bb800572b2ab4001bb9004f0200bb006759b700683a0519052ab4000ab6006919052ab4000bb6006a19052ab4000fb6006b19052ab40010b6006c19052bb6006db1
 
 com/gemstone/gemfire/distributed/internal/StartupResponseMessage,3
-fromDataProblem,43,2ab40026c7000e2abb003859b70039b500262ab400262bb6003a572ab40026123b123cb8003db6003a57b1
-fromData,267,2a2bb7004c2bb8004d4d2a2bb9004e0100b500202cb20041b600429c000a2bb9004f0100582a2bb80050b500092a2bb900510100b5000a2bb9004e01003e2a1dbd0013b500032a1dbc0ab5000203360415041da200362ab4000315042bb80052532ab4000215042bb9004e01004fa700153a052ab4000215042bb9004e01004f1905bf840401a7ffca2bb9004e010036042a1504bd0013b500042a1504bd0013b500052a1504bc0ab5000603360515051504a2002c2ab4000415052bb80052532ab4000515052bb80052532ab4000615052bb9004e01004f840501a7ffd32a2bb80053c00054b5000c2cb20049b600429c00082bb80053572a2bb9004e0100b5000e2a2bb80050b50011b1
-toData,215,2a2bb7003e2bb8003f4d2b2ab40020b9004002002cb20041b600429c000c2bb80043b9004403002ab400092bb800452b2ab4000ab9004602002b2ab40002beb900400200033e1d2ab40002bea2001f2ab400031d322bb800472b2ab400021d2eb900400200840301a7ffde2b2ab40006beb900400200033e1d2ab40006bea200292ab400041d322bb800472ab400051d322bb800472b2ab400061d2eb900400200840301a7ffd42ab4000c2bb800482cb20049b600429c000ebb004a59b7004b2bb800482b2ab4000eb9004002002ab400112bb80045b1
+fromDataProblem,43,2ab40026c7000e2abb003959b7003ab500262ab400262bb6003b572ab40026123c123db8003eb6003b57b1
+fromData,267,2a2bb7004d2bb8004e4d2a2bb9004f0100b500202cb20042b600439c000a2bb900500100582a2bb80051b500092a2bb900520100b5000a2bb9004f01003e2a1dbd0013b500032a1dbc0ab5000203360415041da200362ab4000315042bb80053532ab4000215042bb9004f01004fa700153a052ab4000215042bb9004f01004f1905bf840401a7ffca2bb9004f010036042a1504bd0013b500042a1504bd0013b500052a1504bc0ab5000603360515051504a2002c2ab4000415052bb80053532ab4000515052bb80053532ab4000615052bb9004f01004f840501a7ffd32a2bb80054c00055b5000c2cb2004ab600439c00082bb80054572a2bb9004f0100b5000e2a2bb80051b50011b1
+toData,215,2a2bb7003f2bb800404d2b2ab40020b9004102002cb20042b600439c000c2bb80044b9004503002ab400092bb800462b2ab4000ab9004702002b2ab40002beb900410200033e1d2ab40002bea2001f2ab400031d322bb800482b2ab400021d2eb900410200840301a7ffde2b2ab40006beb900410200033e1d2ab40006bea200292ab400041d322bb800482ab400051d322bb800482b2ab400061d2eb900410200840301a7ffd42ab4000c2bb800492cb2004ab600439c000ebb004b59b7004c2bb800492b2ab4000eb9004102002ab400112bb80046b1
 
 com/gemstone/gemfire/distributed/internal/StartupResponseWithVersionMessage,2
-fromData,43,2a2bb7001f2a2bb80020b50004bb001a59b7001b4d2c2bb600212a2cb60022b500062a2cb60023b50009b1
-toData,43,2a2bb700182ab400042bb80019bb001a59b7001b4d2c2ab40006b6001c2c2ab40009b6001d2c2bb6001eb1
+fromData,43,2a2bb700202a2bb80021b50004bb001b59b7001c4d2c2bb600222a2cb60023b500062a2cb60024b50009b1
+toData,43,2a2bb700192ab400042bb8001abb001b59b7001c4d2c2ab40006b6001d2c2ab40009b6001e2c2bb6001fb1
 
 com/gemstone/gemfire/distributed/internal/WaitForViewInstallation,2
-fromData,26,2a2bb7001a2a2bb9001b0100b500152a2bb9001c0100b50016b1
-toData,26,2a2bb700172b2ab40015b9001803002b2ab40016b900190200b1
+fromData,26,2a2bb7001c2a2bb9001d0100b500162a2bb9001e0100b50017b1
+toData,26,2a2bb700192b2ab40016b9001a03002b2ab40017b9001b0200b1
 
 com/gemstone/gemfire/distributed/internal/locks/DLockQueryProcessor$DLockQueryMessage,2
-fromData,42,2a2bb700462a2bb80047b500032a2bb80048b500422a2bb900490100b500322a2bb9004a0100b50002b1
-toData,42,2a2bb700402ab400032bb800412ab400422bb800432b2ab40032b9004402002b2ab40002b900450200b1
+fromData,42,2a2bb700482a2bb80049b500032a2bb8004ab500442a2bb9004b0100b500332a2bb9004c0100b50002b1
+toData,42,2a2bb700422ab400032bb800432ab400442bb800452b2ab40033b9004602002b2ab40002b900470200b1
 
 com/gemstone/gemfire/distributed/internal/locks/DLockQueryProcessor$DLockQueryReplyMessage,2
-fromData,74,2a2bb700072a2bb900080100b500022ab4000204a000352bb80009c0000a4d2cc600152abb000b592c2bb900080100b7000cb500032a2bb900080100b500052a2bb9000d0100b50006b1
-toData,83,2a2bb7000e2b2ab40002b9000f02002ab4000204a0003e2ab40003c7000b012bb80010a7001b2ab40003b600042bb800102b2ab40003b60011b9000f02002b2ab40005b9000f02002b2ab40006b900120300b1
+fromData,74,2a2bb700092a2bb9000a0100b500032ab4000304a000352bb8000bc0000c4d2cc600152abb000d592c2bb9000a0100b7000eb500042a2bb9000a0100b500062a2bb9000f0100b50007b1
+toData,83,2a2bb700102b2ab40003b9001102002ab4000304a0003e2ab40004c7000b012bb80012a7001b2ab40004b600052bb800122b2ab40004b60013b9001102002b2ab40006b9001102002b2ab40007b900140300b1
 
 com/gemstone/gemfire/distributed/internal/locks/DLockRecoverGrantorProcessor$DLockRecoverGrantorMessage,2
-fromData,55,2a2bb7000f2a2bb80010b500022a2bb900110100b500032a2bb900110100b500052a2bb900120100b500042a2bb80013c00014b50006b1
-toData,52,2a2bb700152ab400022bb800162b2ab40003b9001702002b2ab40005b9001702002b2ab40004b9001803002ab400062bb80019b1
+fromData,55,2a2bb700102a2bb80011b500022a2bb900120100b500032a2bb900120100b500052a2bb900130100b500042a2bb80014c00015b50006b1
+toData,52,2a2bb700162ab400022bb800172b2ab40003b9001802002b2ab40005b9001802002b2ab40004b9001903002ab400062bb8001ab1
 
 com/gemstone/gemfire/distributed/internal/locks/DLockRecoverGrantorProcessor$DLockRecoverGrantorReplyMessage,2
-fromData,30,2a2bb700042a2bb900050100b500022a2bb80006c00007c00007b50003b1
-toData,24,2a2bb700082b2ab40002b9000902002ab400032bb8000ab1
+fromData,30,2a2bb700052a2bb900060100b500022a2bb80007c00008c00008b50003b1
+toData,24,2a2bb700092b2ab40002b9000a02002ab400032bb8000bb1
 
 com/gemstone/gemfire/distributed/internal/locks/DLockReleaseProcessor$DLockReleaseMessage,2
-fromData,52,2a2bb7004e2a2bb8004fb500032a2bb80050b5002c2a2bb900510100b5002b2a2bb900520100b500022a2bb900520100b50042b1
-toData,52,2a2bb700492ab400032bb8004a2ab4002c2bb8004b2b2ab4002bb9004c02002b2ab40002b9004d02002b2ab40042b9004d0200b1
+fromData,52,2a2bb7004f2a2bb80050b500032a2bb80051b5002c2a2bb900520100b5002b2a2bb900530100b500022a2bb900530100b50042b1
+toData,52,2a2bb7004a2ab400032bb8004b2ab4002c2bb8004c2b2ab4002bb9004d02002b2ab40002b9004e02002b2ab40042b9004e0200b1
 
 com/gemstone/gemfire/distributed/internal/locks/DLockReleaseProcessor$DLockReleaseReplyMessage,2
-fromData,24,2a2bb700032a2bb80004b500052a2bb900060100b50002b1
-toData,24,2a2bb700072ab400052bb800082b2ab40002b900090200b1
+fromData,24,2a2bb700052a2bb80006b500072a2bb900080100b50003b1
+toData,24,2a2bb700092ab400072bb8000a2b2ab40003b9000b0200b1
 
 com/gemstone/gemfire/distributed/internal/locks/DLockRemoteToken,2
 fromData,10,bb000e59120fb70010bf
-toData,53,2ab400112bb800242ab40012b600152bb800242b2ab40012b60025b9002602002b2ab40013b9002602002b2ab40014b900270300b1
+toData,53,2ab400112bb800252ab40012b600152bb800252b2ab40012b60026b9002702002b2ab40013b9002702002b2ab40014b900280300b1
 
 com/gemstone/gemfire/distributed/internal/locks/DLockRequestProcessor$DLockRequestMessage,2
-fromData,134,2a2bb700a22a2bb900a30100b500242a2bb800a4b500102a2bb900a50100b500112a2bb900a50100b500122a2bb900a50100b500132a2bb900a60100b500542a2bb900a60100b5000e2a2bb900a70100b5000f2a2bb900a70100b500142a2bb900a70100b500152a2bb900a50100b500162a2bb900a70100b500172a2bb900a70100b5006bb1
-toData,134,2a2bb7009c2b2ab40024b9009d02002ab400102bb8009e2b2ab40011b9009f03002b2ab40012b9009f03002b2ab40013b9009f03002b2ab40054b900a002002b2ab4000eb900a002002b2ab4000fb900a102002b2ab40014b900a102002b2ab40015b900a102002b2ab40016b9009f03002b2ab40017b900a102002b2ab4006bb900a10200b1
+fromData,134,2a2bb700a42a2bb900a50100b500242a2bb800a6b500102a2bb900a70100b500112a2bb900a70100b500122a2bb900a70100b500132a2bb900a80100b500542a2bb900a80100b5000e2a2bb900a90100b5000f2a2bb900a90100b500142a2bb900a90100b500152a2bb900a70100b500162a2bb900a90100b500172a2bb900a90100b5006bb1
+toData,134,2a2bb7009e2b2ab40024b9009f02002ab400102bb800a02b2ab40011b900a103002b2ab40012b900a103002b2ab40013b900a103002b2ab40054b900a202002b2ab4000eb900a202002b2ab4000fb900a302002b2ab40014b900a302002b2ab40015b900a302002b2ab40016b900a103002b2ab40017b900a302002b2ab4006bb900a30200b1
 
 com/gemstone/gemfire/distributed/internal/locks/DLockRequestProcessor$DLockResponseMessage,2
-fromData,72,2a2bb700432a2bb900440100b500022a2bb900450100b500102a2bb80046b500112a2bb900470100b5003e2a2bb80046b500402a2bb900480100b500122a2bb900480100b50042b1
-toData,72,2a2bb7003a2b2ab40002b9003b02002b2ab40010b9003c02002ab400112bb8003d2b2ab4003eb9003f03002ab400402bb8003d2b2ab40012b9004102002b2ab40042b900410200b1
+fromData,72,2a2bb700452a2bb900460100b500032a2bb900470100b500112a2bb80048b500122a2bb900490100b500402a2bb80048b500422a2bb9004a0100b500132a2bb9004a0100b50044b1
+toData,72,2a2bb7003c2b2ab40003b9003d02002b2ab40011b9003e02002ab400122bb8003f2b2ab40040b9004103002ab400422bb8003f2b2ab40013b9004302002b2ab40044b900430200b1
 
 com/gemstone/gemfire/distributed/internal/locks/DLockService$SuspendLockingToken,2
 fromData,1,b1
 toData,1,b1
 
 com/gemstone/gemfire/distributed/internal/locks/DeposeGrantorProcessor$DeposeGrantorMessage,2
-fromData,55,2a2bb700162a2bb900170100b500092a2bb80018b500042a2bb80019c0001ab500052a2bb9001b0100b500062a2bb900170100b50007b1
-toData,52,2a2bb7001c2b2ab40009b9001d02002ab400042bb8001e2ab400052bb8001f2b2ab40006b9002003002b2ab40007b9001d0200b1
+fromData,55,2a2bb700172a2bb900180100b500092a2bb80019b500042a2bb8001ac0001bb500052a2bb9001c0100b500062a2bb900180100b50007b1
+toData,52,2a2bb7001d2b2ab40009b9001e02002ab400042bb8001f2ab400052bb800202b2ab40006b9002103002b2ab40007b9001e0200b1
 
 com/gemstone/gemfire/distributed/internal/locks/ElderInitProcessor$ElderInitMessage,2
-fromData,16,2a2bb7001d2a2bb9001e0100b50005b1
-toData,16,2a2bb7001f2b2ab40005b900200200b1
+fromData,16,2a2bb7001e2a2bb9001f0100b50005b1
+toData,16,2a2bb700202b2ab40005b900210200b1
 
 com/gemstone/gemfire/distributed/internal/locks/ElderInitProcessor$ElderInitReplyMessage,2
-fromData,38,2a2bb7000d2a2bb8000eb500042a2bb8000eb500052a2bb8000eb500062a2bb8000eb50007b1
-toData,38,2a2bb7000f2ab400042bb800102ab400052bb800102ab400062bb800102ab400072bb80010b1
+fromData,38,2a2bb7000e2a2bb8000fb500042a2bb8000fb500052a2bb8000fb500062a2bb8000fb50007b1
+toData,38,2a2bb700102ab400042bb800112ab400052bb800112ab400062bb800112ab400072bb80011b1
 
 com/gemstone/gemfire/distributed/internal/locks/GrantorRequestProcessor$GrantorInfoReplyMessage,2
-fromData,47,2a2bb700132a2bb80014c00015b500052a2bb900160100b500092a2bb900170100b5000b2a2bb900180100b50007b1
-toData,44,2a2bb700192ab400052bb8001a2b2ab40009b9001b03002b2ab4000bb9001c02002b2ab40007b9001d0200b1
+fromData,47,2a2bb700142a2bb80015c00016b500052a2bb900170100b500092a2bb900180100b5000b2a2bb900190100b50007b1
+toData,44,2a2bb7001a2ab400052bb8001b2b2ab40009b9001c03002b2ab4000bb9001d02002b2ab40007b9001e0200b1
 
 com/gemstone/gemfire/distributed/internal/locks/GrantorRequestProcessor$GrantorRequestMessage,2
-fromData,73,2a2bb700282a2bb900290100b500042a2bb9002a0100b500052a2bb8002bb500062a2bb9002a0100b5000a2a2bb9002c0100b500082ab4000804a0000e2a2bb8002dc0002eb50007b1
-toData,70,2a2bb7002f2b2ab40004b9003003002b2ab40005b9003102002ab400062bb800322b2ab4000ab9003102002b2ab40008b9003302002ab4000804a0000b2ab400072bb80034b1
+fromData,73,2a2bb7002a2a2bb9002b0100b500052a2bb9002c0100b500062a2bb8002db500072a2bb9002c0100b5000b2a2bb9002e0100b500092ab4000904a0000e2a2bb8002fc00030b50008b1
+toData,70,2a2bb700312b2ab40005b9003203002b2ab40006b9003302002ab400072bb800342b2ab4000bb9003302002b2ab40009b9003502002ab4000904a0000b2ab400082bb80036b1
 
 com/gemstone/gemfire/distributed/internal/locks/NonGrantorDestroyedProcessor$NonGrantorDestroyedMessage,2
-fromData,24,2a2bb700272a2bb900280100b500082a2bb80029b50006b1
-toData,24,2a2bb7002a2b2ab40008b9002b02002ab400062bb8002cb1
+fromData,24,2a2bb700292a2bb9002a0100b500082a2bb8002bb50006b1
+toData,24,2a2bb7002c2b2ab40008b9002d02002ab400062bb8002eb1
 
 com/gemstone/gemfire/distributed/internal/locks/NonGrantorDestroyedProcessor$NonGrantorDestroyedReplyMessage,2
-fromData,16,2a2bb700162a2bb900170100b50008b1
-toData,16,2a2bb700182b2ab40008b900190200b1
+fromData,16,2a2bb700172a2bb900180100b50008b1
+toData,16,2a2bb700192b2ab40008b9001a0200b1
 
 com/gemstone/gemfire/distributed/internal/membership/InternalDistributedMember,4
-fromData,309,2bb8008b4d2bb9008c01003e2a2bb8007eb500172ab2001499000e2c2ab40017b8008da700072cb60018b500172bb9008e010036041504047e99000704a700040336051504057e99000704a700040336062a1504077e99000704a7000403b500332a2bb9008c0100b500032a2bb9008c0100b500072a2bb9008e0100b500052a2bb80080b5000b2a2bb8007eb500092ab40005100da0000e2a2bb8007eb50020a700172bb8007e3a071907c6000c2a1907b8008fb5000d2bb8007e3a072bb80081b6008236082abb00835919071508b70084b5000f2a15042bb70085bb0040592ab400032ab400072ab400052ab4000d2ab400092ab4000b2ab4000fb700413a092a2c1d150515061909b80026b500102ab40010c00011b600122ab40022b600862ab7003f2ab400059e000704a7000403b8003cb1
-fromDataPre_GFE_7_1_0_0,285,2bb8008b4d2bb9008c01003e2a2bb8007eb500172ab2001499000e2c2ab40017b8008da700072cb60018b500172bb9008e010036041504047e99000704a700040336051504057e99000704a700040336062a1504077e99000704a7000403b500332a2bb9008c0100b500032a2bb9008c0100b500072a2bb9008e0100b500052a2bb80080b5000b2a2bb8007eb500092ab40005100da0000e2a2bb8007eb50020a700172bb8007e3a071907c6000c2a1907b8008fb5000d2bb8007e3a072bb80081b6008236082abb00835919071508b70084b5000fbb0040592ab400032ab400072ab400052ab4000d2ab400092ab4000b2ab4000fb700413a092a2c1d150515061909b80026b500102ab7003f2ab400059e000704a7000403b8003cb1
-toData,240,2ab400059e000704a7000403b8003c2ab6004a2bb800872b2ab60049b9008802002ab400172bb80073033d2ab40010b9006601009900071c04803d2ab40010b9006701009900071c05803d2ab400339900071c07803d1c1008803d2b1c1100ff7e91b9008902002b2ab40003b9008802002b2ab40007b9008802002b2ab40005b9008902002ab4000b2bb800752ab400092bb800732ab40005100da0000e2ab400202bb80073a7000e2ab4000db8008a2bb800732ab4000fc700081243a7000a2ab4000fb600762bb800732ab4000fc7000911012ca7000a2ab4000fb60077b800782bb800792b2ab4002204b8007ab1
-toDataPre_GFE_7_1_0_0,226,2ab400059e000704a7000403b8003c2ab6004a2bb800872b2ab60049b9008802002ab400172bb80073033d2ab40010b9006601009900071c04803d2ab40010b9006701009900071c05803d2ab400339900071c07803d2b1c1100ff7e91b9008902002b2ab40003b9008802002b2ab40007b9008802002b2ab40005b9008902002ab4000b2bb800752ab400092bb800732ab40005100da0000e2ab400202bb80073a7000e2ab4000db8008a2bb800732ab4000fc700081243a7000a2ab4000fb600762bb800732ab4000fc7000911012ca7000a2ab4000fb60077b800782bb80079b1
+fromData,309,2bb8008d4d2bb9008e01003e2a2bb8007fb500172ab2001499000e2c2ab40017b8008fa700072cb60018b500172bb90090010036041504047e99000704a700040336051504057e99000704a700040336062a1504077e99000704a7000403b500342a2bb9008e0100b500032a2bb9008e0100b500072a2bb900900100b500052a2bb80081b5000b2a2bb8007fb500092ab40005100da0000e2a2bb8007fb50020a700172bb8007f3a071907c6000c2a1907b80091b5000d2bb8007f3a072bb80082b6008336082abb00845919071508b70085b5000f2a15042bb70086bb0041592ab400032ab400072ab400052ab4000d2ab400092ab4000b2ab4000fb700423a092a2c1d150515061909b80026b500102ab40010c00011b600122ab40022b600872ab700402ab400059e000704a7000403b8003db1
+fromDataPre_GFE_7_1_0_0,285,2bb8008d4d2bb9008e01003e2a2bb8007fb500172ab2001499000e2c2ab40017b8008fa700072cb60018b500172bb90090010036041504047e99000704a700040336051504057e99000704a700040336062a1504077e99000704a7000403b500342a2bb9008e0100b500032a2bb9008e0100b500072a2bb900900100b500052a2bb80081b5000b2a2bb8007fb500092ab40005100da0000e2a2bb8007fb50020a700172bb8007f3a071907c6000c2a1907b80091b5000d2bb8007f3a072bb80082b6008336082abb00845919071508b70085b5000fbb0041592ab400032ab400072ab400052ab4000d2ab400092ab4000b2ab4000fb700423a092a2c1d150515061909b80026b500102ab700402ab400059e000704a7000403b8003db1
+toData,240,2ab400059e000704a7000403b8003d2ab6004b2bb800892b2ab6004ab9008a02002ab400172bb80074033d2ab40010b9006701009900071c04803d2ab40010b9006801009900071c05803d2ab400349900071c07803d1c1008803d2b1c1100ff7e91b9008b02002b2ab40003b9008a02002b2ab40007b9008a02002b2ab40005b9008b02002ab4000b2bb800762ab400092bb800742ab40005100da0000e2ab400202bb80074a7000e2ab4000db8008c2bb800742ab4000fc700081244a7000a2ab4000fb600772bb800742ab4000fc7000911012ca7000a2ab4000fb60078b800792bb8007a2b2ab4002204b8007bb1
+toDataPre_GFE_7_1_0_0,226,2ab400059e000704a7000403b8003d2ab6004b2bb800892b2ab6004ab9008a02002ab400172bb80074033d2ab40010b9006701009900071c04803d2ab40010b9006801009900071c05803d2ab400349900071c07803d2b1c1100ff7e91b9008b02002b2ab40003b9008a02002b2ab40007b9008a02002b2ab40005b9008b02002ab4000b2bb800762ab400092bb800742ab40005100da0000e2ab400202bb80074a7000e2ab4000db8008c2bb800742ab4000fc700081244a7000a2ab4000fb600772bb800742ab4000fc7000911012ca7000a2ab4000fb60078b800792bb8007ab1
 
 com/gemstone/gemfire/distributed/internal/membership/MemberAttributes,2
 fromData,73,2a2bb9002a0100b500052a2bb9002a0100b500062a2bb9002a0100b500072a2bb8002bb5000b2a2bb8002cb5000c2bb8002b4d2bb8002db6002e3e2abb002f592c1db70030b5000db1
 toData,97,2b2ab40005b9002302002b2ab40006b9002302002b2ab40007b9002302002ab4000b2bb800242ab4000c2bb800252ab4000dc70008120aa7000a2ab4000db600262bb800242ab4000dc7000911012ca7000a2ab4000db60027b800282bb80029b1
 
 com/gemstone/gemfire/distributed/internal/membership/jgroup/ViewMessage,2
-fromData,8,bb000759b70008bf
-toData,8,bb000759b70008bf
+fromData,8,bb000859b70009bf
+toData,8,bb000859b70009bf
 
 com/gemstone/gemfire/distributed/internal/streaming/StreamingOperation$RequestStreamingMessage,2
-fromData,16,2a2bb7001f2a2bb900200100b50002b1
-toData,16,2a2bb700212b2ab40002b900220200b1
+fromData,16,2a2bb700202a2bb900210100b50003b1
+toData,16,2a2bb700222b2ab40003b900230200b1
 
 com/gemstone/gemfire/distributed/internal/streaming/StreamingOperation$StreamingReplyMessage,2
-fromData,339,2a2bb700142bb9001501003d2a2bb900150100b500112a2bb900160100b500122a2bb900160100b500032bb800174e2db20018b600199e000704a700040336041c02a0000b2a01b50002a701082a1cb5000f2abb001a591cb7001bb500022ab4000399000704b8001c2ab40008b8001d3a051905c1001e3606013a07150699000d1905c0001eb6001f3a0703360803360915091ca20087b20020c6000cb2002006b900210200150699000fb80022990009043608a700672bb800233a0a150699004a1907c600451907b90024010099003b1504360b150b99001715099a0012190ac100259a000704a7000403360b150b990019bb0026591907c00027190ac00028c00028b700293a0a2ab40002190ab9002a020057840901a7ff7915089900172a04b50004b20020c6000cb2002005b9002102002ab4000399001a03b8001ca700133a0c2ab4000399000703b8001c190cbfb1
-toData,85,2a2bb7002b2ab4000ec7000d2b02b9002c0200a7000d2b2ab4000fb9002c02002b2ab40011b9002c02002b2ab40012b9002d02002b2ab40003b9002d02002ab4000ec600122ab4000f9e000b2ab4000e2bb6002eb1
+fromData,339,2a2bb700152bb9001601003d2a2bb900160100b500112a2bb900170100b500122a2bb900170100b500032bb800184e2db20019b6001a9e000704a700040336041c02a0000b2a01b50002a701082a1cb5000f2abb001b591cb7001cb500022ab4000399000704b8001d2ab40008b8001e3a051905c1001f3606013a07150699000d1905c0001fb600203a0703360803360915091ca20087b20021c6000cb2002106b900220200150699000fb80023990009043608a700672bb800243a0a150699004a1907c600451907b90025010099003b1504360b150b99001715099a0012190ac100269a000704a7000403360b150b990019bb0027591907c00028190ac00029c00029b7002a3a0a2ab40002190ab9002b020057840901a7ff7915089900172a04b50004b20021c6000cb2002105b9002202002ab4000399001a03b8001da700133a0c2ab4000399000703b8001d190cbfb1
+toData,85,2a2bb7002c2ab4000ec7000d2b02b9002d0200a7000d2b2ab4000fb9002d02002b2ab40011b9002d02002b2ab40012b9002e02002b2ab40003b9002e02002ab4000ec600122ab4000f9e000b2ab4000e2bb6002fb1
 
 com/gemstone/gemfire/distributed/internal/tcpserver/InfoRequest,2
 fromData,1,b1
@@ -383,53 +379,53 @@ fromData,8,bb000259b70003bf
 toData,8,bb000259b70003bf
 
 com/gemstone/gemfire/internal/InternalDataSerializer$RegistrationMessage,2
-fromData,39,2a2bb700282bb800292a2bb8002ab500042a2bb9002b0100b500062a2bb8002cc00008b50009b1
-toData,32,2a2bb700242ab400042bb800252b2ab40006b9002602002ab400092bb80027b1
+fromData,39,2a2bb700292bb8002a2a2bb8002bb500042a2bb9002c0100b500062a2bb8002dc00008b50009b1
+toData,32,2a2bb700252ab400042bb800262b2ab40006b9002702002ab400092bb80028b1
 
 com/gemstone/gemfire/internal/InternalInstantiator$RegistrationContextMessage,2
-fromData,14,2a2bb700192a2bb8001ab5000db1
-toData,14,2a2bb7001b2ab4000d2bb8001cb1
+fromData,14,2a2bb7001a2a2bb8001bb5000db1
+toData,14,2a2bb7001c2ab4000d2bb8001db1
 
 com/gemstone/gemfire/internal/InternalInstantiator$RegistrationMessage,3
-fromDataProblem,38,2ab4000bc7000e2abb001a59b7001bb5000b2ab4000b2bb6001c572ab4000b121db6001c57b1
-fromData,129,2a2bb7001e2a2bb8001fb500122a2bb8001fb50013b80020c600532a2ab40012b80021b50003a7001d4d2ab2002304bd002459032c53b900250200b700262a01b500032a2ab40013b80021b50005a7001d4d2ab2002704bd002459032c53b900250200b700262a01b500052a2bb900280100b500072a2bb80029c00009b5000ab1
-toData,46,2a2bb700152ab40003b600162bb800172ab40005b600162bb800172b2ab40007b9001802002ab4000a2bb80019b1
+fromDataProblem,38,2ab4000bc7000e2abb001b59b7001cb5000b2ab4000b2bb6001d572ab4000b121eb6001d57b1
+fromData,129,2a2bb7001f2a2bb80020b500122a2bb80020b50013b80021c600532a2ab40012b80022b50003a7001d4d2ab2002404bd002559032c53b900260200b700272a01b500032a2ab40013b80022b50005a7001d4d2ab2002804bd002559032c53b900260200b700272a01b500052a2bb900290100b500072a2bb8002ac00009b5000ab1
+toData,46,2a2bb700162ab40003b600172bb800182ab40005b600172bb800182b2ab40007b9001902002ab4000a2bb8001ab1
 
 com/gemstone/gemfire/internal/ManagerInfo,2
 fromData,61,2a2bb900540100b500182a2bb900540100b500192a2bb900540100b5001a2bb9005501003d1c9e00161cbc084e2b2db9005602002a2db80057b5001bb1
 toData,74,2b2ab40018b9005002002b2ab40019b9005002002b2ab4001ab9005002002ab4001bc7000d2b03b900510200a7001d2ab4001bb600524d2b2cbeb9005102002b2c032cbeb900530400b1
 
 com/gemstone/gemfire/internal/admin/ClientMembershipMessage,2
-fromData,32,2a2bb7000c2a2bb8000db500022a2bb8000db500032a2bb9000e0100b50004b1
-toData,32,2a2bb700092ab400022bb8000a2ab400032bb8000a2b2ab40004b9000b0200b1
+fromData,32,2a2bb7000d2a2bb8000eb500022a2bb8000eb500032a2bb9000f0100b50004b1
+toData,32,2a2bb7000a2ab400022bb8000b2ab400032bb8000b2b2ab40004b9000c0200b1
 
 com/gemstone/gemfire/internal/admin/remote/AddHealthListenerRequest,2
-fromData,17,2a2bb7000f2a2bb80010c00011b50007b1
-toData,14,2a2bb7000d2ab400072bb8000eb1
+fromData,17,2a2bb700102a2bb80011c00012b50007b1
+toData,14,2a2bb7000e2ab400072bb8000fb1
 
 com/gemstone/gemfire/internal/admin/remote/AddHealthListenerResponse,2
-fromData,16,2a2bb7000b2a2bb9000c0100b50008b1
-toData,16,2a2bb700092b2ab40008b9000a0200b1
+fromData,16,2a2bb7000c2a2bb9000d0100b50008b1
+toData,16,2a2bb7000a2b2ab40008b9000b0200b1
 
 com/gemstone/gemfire/internal/admin/remote/AddStatListenerRequest,2
-fromData,26,2a2bb700102a2bb900110100b500042a2bb900120100b50006b1
-toData,26,2a2bb7000d2b2ab40004b9000e03002b2ab40006b9000f0200b1
+fromData,26,2a2bb700112a2bb900120100b500042a2bb900130100b50006b1
+toData,26,2a2bb7000e2b2ab40004b9000f03002b2ab40006b900100200b1
 
 com/gemstone/gemfire/internal/admin/remote/AddStatListenerResponse,2
-fromData,16,2a2bb7000b2a2bb9000c0100b50008b1
-toData,16,2a2bb700092b2ab40008b9000a0200b1
+fromData,16,2a2bb7000c2a2bb9000d0100b50008b1
+toData,16,2a2bb7000a2b2ab40008b9000b0200b1
 
 com/gemstone/gemfire/internal/admin/remote/AdminConsoleDisconnectMessage,2
-fromData,34,2a2bb700192a2bb9001a0100b500052a2bb9001a0100b500042a2bb8001bb50007b1
-toData,34,2a2bb700162b2ab40005b9001702002b2ab40004b9001702002ab400072bb80018b1
+fromData,34,2a2bb7001a2a2bb9001b0100b500052a2bb9001b0100b500042a2bb8001cb50007b1
+toData,34,2a2bb700172b2ab40005b9001802002b2ab40004b9001802002ab400072bb80019b1
 
 com/gemstone/gemfire/internal/admin/remote/AdminConsoleMessage,2
-fromData,16,2a2bb7000d2a2bb9000e0100b50005b1
-toData,16,2a2bb7000b2b2ab40005b9000c0200b1
+fromData,16,2a2bb7000f2a2bb900100100b50005b1
+toData,16,2a2bb7000d2b2ab40005b9000e0200b1
 
 com/gemstone/gemfire/internal/admin/remote/AdminFailureResponse,2
-fromData,17,2a2bb700082a2bb80009c0000ab50005b1
-toData,14,2a2bb700062ab400052bb80007b1
+fromData,17,2a2bb700092a2bb8000ac0000bb50005b1
+toData,14,2a2bb700072ab400052bb80008b1
 
 com/gemstone/gemfire/internal/admin/remote/AdminRequest,2
 fromData,24,2a2bb7002b2a2bb9002c0100b500052a2bb8002db50003b1
@@ -440,56 +436,56 @@ fromData,16,2a2bb700062a2bb900070100b50002b1
 toData,16,2a2bb700042b2ab40002b900050200b1
 
 com/gemstone/gemfire/internal/admin/remote/AlertLevelChangeMessage,2
-fromData,16,2a2bb700112a2bb900120100b50004b1
-toData,16,2a2bb7000f2b2ab40004b900100200b1
+fromData,16,2a2bb700132a2bb900140100b50004b1
+toData,16,2a2bb700112b2ab40004b900120200b1
 
 com/gemstone/gemfire/internal/admin/remote/AlertListenerMessage,2
-fromData,69,2a2bb7001e2a2bb9001f0100b500062a2bb80020c00021b500072a2bb80022b500082a2bb80022b5000a2a2bb900230100b5000b2a2bb80022b5000c2a2bb80022b5000db1
-toData,66,2a2bb700192b2ab40006b9001a02002ab400072bb8001b2ab400082bb8001c2ab4000a2bb8001c2b2ab4000bb9001d03002ab4000c2bb8001c2ab4000d2bb8001cb1
+fromData,69,2a2bb7001f2a2bb900200100b500062a2bb80021c00022b500072a2bb80023b500082a2bb80023b5000a2a2bb900240100b5000b2a2bb80023b5000c2a2bb80023b5000db1
+toData,66,2a2bb7001a2b2ab40006b9001b02002ab400072bb8001c2ab400082bb8001d2ab4000a2bb8001d2b2ab4000bb9001e03002ab4000c2bb8001d2ab4000d2bb8001db1
 
 com/gemstone/gemfire/internal/admin/remote/AlertsNotificationMessage,2
 fromData,20,2a2bb700052a2bb80006c00007c00007b50003b1
 toData,14,2a2bb700022ab400032bb80004b1
 
 com/gemstone/gemfire/internal/admin/remote/AppCacheSnapshotMessage,2
-fromData,16,2a2bb700112a2bb900120100b50005b1
-toData,16,2a2bb7000f2b2ab40005b900100200b1
+fromData,16,2a2bb700122a2bb900130100b50005b1
+toData,16,2a2bb700102b2ab40005b900110200b1
 
 com/gemstone/gemfire/internal/admin/remote/BridgeServerRequest,2
-fromData,47,2a2bb700172a2bb900180100b500052a2bb900180100b500062a2bb80019c0001ab5000a2a2bb900180100b5000fb1
-toData,44,2a2bb700142b2ab40005b9001502002b2ab40006b9001502002ab4000a2bb800162b2ab4000fb900150200b1
+fromData,47,2a2bb700182a2bb900190100b500052a2bb900190100b500062a2bb8001ac0001bb5000a2a2bb900190100b5000fb1
+toData,44,2a2bb700152b2ab40005b9001602002b2ab40006b9001602002ab4000a2bb800172b2ab4000fb900160200b1
 
 com/gemstone/gemfire/internal/admin/remote/BridgeServerResponse,2
-fromData,28,2a2bb700272a2bb80028c0000fb5000b2a2bb80028c00023b50024b1
-toData,22,2a2bb700252ab4000b2bb800262ab400242bb80026b1
+fromData,28,2a2bb700282a2bb80029c0000fb5000b2a2bb80029c00023b50024b1
+toData,22,2a2bb700262ab4000b2bb800272ab400242bb80027b1
 
 com/gemstone/gemfire/internal/admin/remote/CacheConfigRequest,2
-fromData,36,2a2bb700102a2bb900110100b500032a2bb900120100b500042a2bb900120100b50006b1
-toData,36,2a2bb7000d2b2ab40003b9000e02002b2ab40004b9000f02002b2ab40006b9000f0200b1
+fromData,36,2a2bb700112a2bb900120100b500032a2bb900130100b500042a2bb900130100b50006b1
+toData,36,2a2bb7000e2b2ab40003b9000f02002b2ab40004b9001002002b2ab40006b900100200b1
 
 com/gemstone/gemfire/internal/admin/remote/CacheConfigResponse,2
-fromData,28,2a2bb7001b2a2bb8001cc00014b500092a2bb8001cc00017b50018b1
-toData,22,2a2bb700192ab400092bb8001a2ab400182bb8001ab1
+fromData,28,2a2bb7001c2a2bb8001dc00014b500092a2bb8001dc00017b50018b1
+toData,22,2a2bb7001a2ab400092bb8001b2ab400182bb8001bb1
 
 com/gemstone/gemfire/internal/admin/remote/CacheInfoRequest,2
-fromData,6,2a2bb7000ab1
-toData,6,2a2bb70009b1
+fromData,6,2a2bb7000bb1
+toData,6,2a2bb7000ab1
 
 com/gemstone/gemfire/internal/admin/remote/CacheInfoResponse,2
-fromData,17,2a2bb7000e2a2bb8000fc00008b5000ab1
-toData,14,2a2bb7000c2ab4000a2bb8000db1
+fromData,17,2a2bb7000f2a2bb80010c00008b5000ab1
+toData,14,2a2bb7000d2ab4000a2bb8000eb1
 
 com/gemstone/gemfire/internal/admin/remote/CancelStatListenerRequest,2
-fromData,16,2a2bb7000c2a2bb9000d0100b50003b1
-toData,16,2a2bb7000a2b2ab40003b9000b0200b1
+fromData,16,2a2bb7000d2a2bb9000e0100b50003b1
+toData,16,2a2bb7000b2b2ab40003b9000c0200b1
 
 com/gemstone/gemfire/internal/admin/remote/CancelStatListenerResponse,2
-fromData,6,2a2bb70009b1
-toData,6,2a2bb70008b1
+fromData,6,2a2bb7000ab1
+toData,6,2a2bb70009b1
 
 com/gemstone/gemfire/internal/admin/remote/CancellationMessage,2
-fromData,16,2a2bb7000b2a2bb9000c0100b50004b1
-toData,16,2a2bb700092b2ab40004b9000a0200b1
+fromData,16,2a2bb7000c2a2bb9000d0100b50004b1
+toData,16,2a2bb7000a2b2ab40004b9000b0200b1
 
 com/gemstone/gemfire/internal/admin/remote/ChangeRefreshIntervalMessage,2
 fromData,16,2a2bb700072a2bb900080100b50003b1
@@ -502,20 +498,20 @@ toData,73,2ab400052bb8000d2ab400062bb8000d2ab400072bb8000d2ab400082bb8000d2ab400
 toDataPre_GFE_8_0_0_0,65,2ab400052bb8000d2ab400062bb8000d2ab400072bb8000d2ab400082bb8000d2ab400092bb8000d2ab4000b2bb8000d2ab4000a2bb8000e2ab4000c2bb8000fb1
 
 com/gemstone/gemfire/internal/admin/remote/CompactRequest,2
-fromData,6,2a2bb70026b1
-toData,6,2a2bb70027b1
+fromData,6,2a2bb70027b1
+toData,6,2a2bb70028b1
 
 com/gemstone/gemfire/internal/admin/remote/CompactResponse,2
 fromData,14,2a2bb700042a2bb80005b50003b1
 toData,14,2a2bb700062ab400032bb80007b1
 
 com/gemstone/gemfire/internal/admin/remote/DestroyEntryMessage,2
-fromData,25,2a2bb7001a2a2bb8001bc0001cb500042a2bb8001bb50005b1
-toData,22,2a2bb700182ab400042bb800192ab400052bb80019b1
+fromData,25,2a2bb7001b2a2bb8001cc0001db500042a2bb8001cb50005b1
+toData,22,2a2bb700192ab400042bb8001a2ab400052bb8001ab1
 
 com/gemstone/gemfire/internal/admin/remote/DestroyRegionMessage,2
-fromData,17,2a2bb700192a2bb8001ac0001bb50004b1
-toData,14,2a2bb700172ab400042bb80018b1
+fromData,17,2a2bb7001a2a2bb8001bc0001cb50004b1
+toData,14,2a2bb700182ab400042bb80019b1
 
 com/gemstone/gemfire/internal/admin/remote/DurableClientInfoRequest,2
 fromData,28,2a2bb7000c2a2bb8000db500032a2bb9000e0100b500042ab80005b1
@@ -526,95 +522,95 @@ fromData,16,2a2bb700172a2bb900180100b50002b1
 toData,16,2a2bb700152b2ab40002b900160200b1
 
 com/gemstone/gemfire/internal/admin/remote/FetchDistLockInfoRequest,2
-fromData,6,2a2bb7000ab1
-toData,6,2a2bb70009b1
+fromData,6,2a2bb7000bb1
+toData,6,2a2bb7000ab1
 
 com/gemstone/gemfire/internal/admin/remote/FetchDistLockInfoResponse,2
-fromData,20,2a2bb7001d2a2bb8001ec00018c00018b50019b1
-toData,14,2a2bb7001b2ab400192bb8001cb1
+fromData,20,2a2bb7001e2a2bb8001fc00018c00018b50019b1
+toData,14,2a2bb7001c2ab400192bb8001db1
 
 com/gemstone/gemfire/internal/admin/remote/FetchHealthDiagnosisRequest,2
-fromData,27,2a2bb7000c2bb9000d01003d2bb8000ec0000f4e2a1c2db70003b1
-toData,24,2a2bb700092b2ab40005b9000a02002ab400062bb8000bb1
+fromData,27,2a2bb7000d2bb9000e01003d2bb8000fc000104e2a1c2db70003b1
+toData,24,2a2bb7000a2b2ab40005b9000b02002ab400062bb8000cb1
 
 com/gemstone/gemfire/internal/admin/remote/FetchHealthDiagnosisResponse,2
-fromData,14,2a2bb7000b2a2bb8000cb50008b1
-toData,14,2a2bb700092ab400082bb8000ab1
+fromData,14,2a2bb7000c2a2bb8000db50008b1
+toData,14,2a2bb7000a2ab400082bb8000bb1
 
 com/gemstone/gemfire/internal/admin/remote/FetchHostRequest,2
-fromData,6,2a2bb7000ab1
-toData,6,2a2bb70009b1
+fromData,6,2a2bb7000bb1
+toData,6,2a2bb7000ab1
 
 com/gemstone/gemfire/internal/admin/remote/FetchHostResponse,2
-fromData,67,2a2bb700322a2bb80033b500122a2bb80034c00035b5000d2a2bb80034c00013b500252a2bb80034c00013b500182a2bb900360100b5002c2a2bb900370100b50002b1
-toData,58,2a2bb7002d2ab400122bb8002e2ab4000d2bb8002f2ab400252bb8002f2ab400182bb8002f2b2ab4002cb9003003002b2ab40002b900310200b1
+fromData,67,2a2bb700342a2bb80035b500132a2bb80036c00037b5000e2a2bb80036c00014b500262a2bb80036c00014b500192a2bb900380100b5002d2a2bb900390100b50002b1
+toData,58,2a2bb7002f2ab400132bb800302ab4000e2bb800312ab400262bb800312ab400192bb800312b2ab4002db9003203002b2ab40002b900330200b1
 
 com/gemstone/gemfire/internal/admin/remote/FetchResourceAttributesRequest,2
-fromData,16,2a2bb7000c2a2bb9000d0100b50003b1
-toData,16,2a2bb7000a2b2ab40003b9000b0300b1
+fromData,16,2a2bb7000d2a2bb9000e0100b50003b1
+toData,16,2a2bb7000b2b2ab40003b9000c0300b1
 
 com/gemstone/gemfire/internal/admin/remote/FetchResourceAttributesResponse,2
-fromData,20,2a2bb7000e2a2bb8000fc00010c00010b50009b1
-toData,14,2a2bb7000c2ab400092bb8000db1
+fromData,20,2a2bb7000f2a2bb80010c00011c00011b50009b1
+toData,14,2a2bb7000d2ab400092bb8000eb1
 
 com/gemstone/gemfire/internal/admin/remote/FetchStatsRequest,2
-fromData,14,2a2bb7000b2a2bb8000cb50003b1
-toData,14,2a2bb700092ab400032bb8000ab1
+fromData,14,2a2bb7000c2a2bb8000db50003b1
+toData,14,2a2bb7000a2ab400032bb8000bb1
 
 com/gemstone/gemfire/internal/admin/remote/FetchStatsResponse,2
-fromData,20,2a2bb700142a2bb80015c00011c00011b5000fb1
-toData,14,2a2bb700122ab4000f2bb80013b1
+fromData,20,2a2bb700152a2bb80016c00011c00011b5000fb1
+toData,14,2a2bb700132ab4000f2bb80014b1
 
 com/gemstone/gemfire/internal/admin/remote/FetchSysCfgRequest,2
-fromData,6,2a2bb7000ab1
-toData,6,2a2bb70009b1
+fromData,6,2a2bb7000bb1
+toData,6,2a2bb7000ab1
 
 com/gemstone/gemfire/internal/admin/remote/FetchSysCfgResponse,2
-fromData,17,2a2bb7000c2a2bb8000dc0000eb50009b1
-toData,14,2a2bb7000a2ab400092bb8000bb1
+fromData,17,2a2bb7000d2a2bb8000ec0000fb50009b1
+toData,14,2a2bb7000b2ab400092bb8000cb1
 
 com/gemstone/gemfire/internal/admin/remote/FlushAppCacheSnapshotMessage,2
-fromData,6,2a2bb70005b1
-toData,6,2a2bb70004b1
+fromData,6,2a2bb70006b1
+toData,6,2a2bb70005b1
 
 com/gemstone/gemfire/internal/admin/remote/HealthListenerMessage,2
-fromData,27,2a2bb7000d2a2bb9000e0100b500042a2bb8000fc00010b50005b1
-toData,24,2a2bb7000a2b2ab40004b9000b02002ab400052bb8000cb1
+fromData,27,2a2bb7000e2a2bb9000f0100b500042a2bb80010c00011b50005b1
+toData,24,2a2bb7000b2b2ab40004b9000c02002ab400052bb8000db1
 
 com/gemstone/gemfire/internal/admin/remote/LicenseInfoRequest,2
-fromData,6,2a2bb7000ab1
-toData,6,2a2bb70009b1
+fromData,6,2a2bb7000bb1
+toData,6,2a2bb7000ab1
 
 com/gemstone/gemfire/internal/admin/remote/LicenseInfoResponse,2
-fromData,17,2a2bb7000a2a2bb8000bc00005b50007b1
-toData,14,2a2bb700082ab400072bb80009b1
+fromData,17,2a2bb7000b2a2bb8000cc00005b50007b1
+toData,14,2a2bb700092ab400072bb8000ab1
 
 com/gemstone/gemfire/internal/admin/remote/MissingPersistentIDsRequest,1
 fromData,6,2a2bb7002eb1
 
 com/gemstone/gemfire/internal/admin/remote/MissingPersistentIDsResponse,2
-fromData,124,2a2bb700062bb9000701003d2abb0008591cb70009b50002033e1d1ca20024bb000a59b7000b3a0419042bb8000c2ab400021904b9000d020057840301a7ffdd2bb9000701003d2abb0008591cb70009b50003033e1d1ca20024bb000a59b7000b3a0419042bb8000c2ab400031904b9000d020057840301a7ffddb1
-toData,110,2a2bb7000e2b2ab40002b9000f0100b9001002002ab40002b9001101004d2cb9001201009900152cb900130100c000144e2d2bb80015a7ffe82b2ab40003b9000f0100b9001002002ab40003b9001101004d2cb9001201009900152cb900130100c000144e2d2bb80015a7ffe8b1
+fromData,124,2a2bb700072bb9000801003d2abb0009591cb7000ab50002033e1d1ca20024bb000b59b7000c3a0419042bb8000d2ab400021904b9000e020057840301a7ffdd2bb9000801003d2abb0009591cb7000ab50003033e1d1ca20024bb000b59b7000c3a0419042bb8000d2ab400031904b9000e020057840301a7ffddb1
+toData,110,2a2bb7000f2b2ab40002b900100100b9001102002ab40002b9001201004d2cb9001301009900152cb900140100c000154e2d2bb80016a7ffe82b2ab40003b900100100b9001102002ab40003b9001201004d2cb9001301009900152cb900140100c000154e2d2bb80016a7ffe8b1
 
 com/gemstone/gemfire/internal/admin/remote/ObjectDetailsRequest,2
-fromData,24,2a2bb700172a2bb80018b500032a2bb900190100b50004b1
-toData,24,2a2bb700142ab400032bb800152b2ab40004b900160200b1
+fromData,24,2a2bb700182a2bb80019b500032a2bb9001a0100b50004b1
+toData,24,2a2bb700152ab400032bb800162b2ab40004b900170200b1
 
 com/gemstone/gemfire/internal/admin/remote/ObjectDetailsResponse,2
-fromData,33,2a2bb700182a2bb80019b5000b2a2bb80019b5000d2a2bb80019c0000eb50011b1
-toData,30,2a2bb700162ab4000b2bb800172ab4000d2bb800172ab400112bb80017b1
+fromData,33,2a2bb700192a2bb8001ab5000b2a2bb8001ab5000d2a2bb8001ac0000eb50011b1
+toData,30,2a2bb700172ab4000b2bb800182ab4000d2bb800182ab400112bb80018b1
 
 com/gemstone/gemfire/internal/admin/remote/ObjectNamesRequest,2
-fromData,6,2a2bb70013b1
-toData,6,2a2bb70012b1
+fromData,6,2a2bb70014b1
+toData,6,2a2bb70013b1
 
 com/gemstone/gemfire/internal/admin/remote/ObjectNamesResponse,2
-fromData,17,2a2bb700152a2bb80016c00008b5000ab1
-toData,14,2a2bb700132ab4000a2bb80014b1
+fromData,17,2a2bb700162a2bb80017c00008b5000ab1
+toData,14,2a2bb700142ab4000a2bb80015b1
 
 com/gemstone/gemfire/internal/admin/remote/PrepareRevokePersistentIDRequest,2
-fromData,35,2a2bb700272abb002859b70029b500022ab400022bb8002a2a2bb9002b0100b50003b1
-toData,24,2a2bb7002c2ab400022bb8002d2b2ab40003b9002e0200b1
+fromData,35,2a2bb700282abb002959b7002ab500022ab400022bb8002b2a2bb9002c0100b50003b1
+toData,24,2a2bb7002d2ab400022bb8002e2b2ab40003b9002f0200b1
 
 com/gemstone/gemfire/internal/admin/remote/RefreshMemberSnapshotRequest,2
 fromData,6,2a2bb70009b1
@@ -633,36 +629,36 @@ fromData,14,2a2bb7000d2a2bb8000eb50002b1
 toData,14,2a2bb7000b2ab400022bb8000cb1
 
 com/gemstone/gemfire/internal/admin/remote/RegionAttributesRequest,2
-fromData,6,2a2bb7000cb1
-toData,6,2a2bb7000bb1
+fromData,6,2a2bb7000db1
+toData,6,2a2bb7000cb1
 
 com/gemstone/gemfire/internal/admin/remote/RegionAttributesResponse,2
-fromData,17,2a2bb7000b2a2bb8000cc00005b50008b1
-toData,14,2a2bb700092ab400082bb8000ab1
+fromData,17,2a2bb7000c2a2bb8000dc00005b50008b1
+toData,14,2a2bb7000a2ab400082bb8000bb1
 
 com/gemstone/gemfire/internal/admin/remote/RegionRequest,2
-fromData,57,2a2bb700132a2bb900140100b500032a2bb900140100b500052a2bb80015b500062a2bb80015b500082a2bb80016c00017b5000b2ab80007b1
-toData,50,2a2bb7000f2b2ab40003b9001002002b2ab40005b9001002002ab400062bb800112ab400082bb800112ab4000b2bb80012b1
+fromData,57,2a2bb700142a2bb900150100b500032a2bb900150100b500052a2bb80016b500062a2bb80016b500082a2bb80017c00018b5000b2ab80007b1
+toData,50,2a2bb700102b2ab40003b9001102002b2ab40005b9001102002ab400062bb800122ab400082bb800122ab4000b2bb80013b1
 
 com/gemstone/gemfire/internal/admin/remote/RegionResponse,2
-fromData,33,2a2bb700242a2bb80025b500162a2bb80025b5001a2a2bb80026c0001cb5001db1
-toData,30,2a2bb700212ab400162bb800222ab4001a2bb800222ab4001d2bb80023b1
+fromData,33,2a2bb700262a2bb80027b500162a2bb80027b5001b2a2bb80028c0001db5001eb1
+toData,30,2a2bb700232ab400162bb800242ab4001b2bb800242ab4001e2bb80025b1
 
 com/gemstone/gemfire/internal/admin/remote/RegionSizeRequest,2
-fromData,6,2a2bb70014b1
-toData,6,2a2bb70013b1
+fromData,6,2a2bb70015b1
+toData,6,2a2bb70014b1
 
 com/gemstone/gemfire/internal/admin/remote/RegionSizeResponse,2
-fromData,26,2a2bb7000d2a2bb9000e0100b500082a2bb9000e0100b5000ab1
-toData,26,2a2bb7000b2b2ab40008b9000c02002b2ab4000ab9000c0200b1
+fromData,26,2a2bb7000e2a2bb9000f0100b500082a2bb9000f0100b5000ab1
+toData,26,2a2bb7000c2b2ab40008b9000d02002b2ab4000ab9000d0200b1
 
 com/gemstone/gemfire/internal/admin/remote/RegionStatisticsRequest,2
-fromData,6,2a2bb7000cb1
-toData,6,2a2bb7000bb1
+fromData,6,2a2bb7000db1
+toData,6,2a2bb7000cb1
 
 com/gemstone/gemfire/internal/admin/remote/RegionStatisticsResponse,2
-fromData,17,2a2bb7000b2a2bb8000cc00005b50008b1
-toData,14,2a2bb700092ab400082bb8000ab1
+fromData,17,2a2bb7000c2a2bb8000dc00005b50008b1
+toData,14,2a2bb7000a2ab400082bb8000bb1
 
 com/gemstone/gemfire/internal/admin/remote/RegionSubRegionSizeRequest,2
 fromData,6,2a2bb70010b1
@@ -717,88 +713,92 @@ fromData,45,2a2bb9001a0100b500032a2bb9001a0100b500052a2bb8001bb500072a2bb8001bb5
 toData,45,2b2ab40003b9001803002b2ab40005b9001803002ab400072bb800192ab4000a2bb800192ab4000c2bb80019b1
 
 com/gemstone/gemfire/internal/admin/remote/RemoveHealthListenerRequest,2
-fromData,16,2a2bb7000c2a2bb9000d0100b50003b1
-toData,16,2a2bb7000a2b2ab40003b9000b0200b1
+fromData,16,2a2bb7000d2a2bb9000e0100b50003b1
+toData,16,2a2bb7000b2b2ab40003b9000c0200b1
 
 com/gemstone/gemfire/internal/admin/remote/RemoveHealthListenerResponse,2
-fromData,6,2a2bb70007b1
-toData,6,2a2bb70006b1
+fromData,6,2a2bb70008b1
+toData,6,2a2bb70007b1
 
 com/gemstone/gemfire/internal/admin/remote/ResetHealthStatusRequest,2
-fromData,16,2a2bb7000c2a2bb9000d0100b50003b1
-toData,16,2a2bb7000a2b2ab40003b9000b0200b1
+fromData,16,2a2bb7000d2a2bb9000e0100b50003b1
+toData,16,2a2bb7000b2b2ab40003b9000c0200b1
 
 com/gemstone/gemfire/internal/admin/remote/ResetHealthStatusResponse,2
-fromData,6,2a2bb70009b1
-toData,6,2a2bb70008b1
+fromData,6,2a2bb7000ab1
+toData,6,2a2bb70009b1
 
 com/gemstone/gemfire/internal/admin/remote/RevokePersistentIDRequest,2
-fromData,25,2a2bb7001b2abb001c59b7001db500022ab400022bb8001eb1
-toData,14,2a2bb7001f2ab400022bb80020b1
+fromData,25,2a2bb7001c2abb001d59b7001eb500022ab400022bb8001fb1
+toData,14,2a2bb700202ab400022bb80021b1
 
 com/gemstone/gemfire/internal/admin/remote/RootRegionRequest,2
-fromData,6,2a2bb7000ab1
-toData,6,2a2bb70009b1
+fromData,6,2a2bb7000bb1
+toData,6,2a2bb7000ab1
 
 com/gemstone/gemfire/internal/admin/remote/RootRegionResponse,2
-fromData,34,2a2bb700202a2bb80021c00017c00017b500182a2bb80021c00017c00017b50019b1
-toData,22,2a2bb7001e2ab400182bb8001f2ab400192bb8001fb1
+fromData,34,2a2bb700222a2bb80023c00018c00018b500192a2bb80023c00018c00018b5001ab1
+toData,22,2a2bb700202ab400192bb800212ab4001a2bb80021b1
+
+com/gemstone/gemfire/internal/admin/remote/ShutdownAllGatewayHubsRequest,2
+fromData,16,2a2bb700072a2bb900080100b50005b1
+toData,16,2a2bb700092b2ab40005b9000a0200b1
 
 com/gemstone/gemfire/internal/admin/remote/ShutdownAllRequest,2
-fromData,6,2a2bb70043b1
-toData,6,2a2bb70044b1
+fromData,6,2a2bb70044b1
+toData,6,2a2bb70045b1
 
 com/gemstone/gemfire/internal/admin/remote/ShutdownAllResponse,2
-fromData,16,2a2bb700062a2bb900070100b50002b1
-toData,16,2a2bb700042b2ab40002b900050200b1
+fromData,16,2a2bb700072a2bb900080100b50002b1
+toData,16,2a2bb700052b2ab40002b900060200b1
 
 com/gemstone/gemfire/internal/admin/remote/SnapshotResultMessage,2
-fromData,27,2a2bb7000d2a2bb8000ec0000fb500062a2bb900100100b50007b1
-toData,24,2a2bb7000a2ab400062bb8000b2b2ab40007b9000c0200b1
+fromData,27,2a2bb7000e2a2bb8000fc00010b500062a2bb900110100b50007b1
+toData,24,2a2bb7000b2ab400062bb8000c2b2ab40007b9000d0200b1
 
 com/gemstone/gemfire/internal/admin/remote/StatAlertsManagerAssignMessage,2
-fromData,30,2a2bb7000f2a2bb900100100b500052a2bb80011c00012c00012b50002b1
-toData,24,2a2bb7000c2b2ab40005b9000d03002ab400022bb8000eb1
+fromData,30,2a2bb700102a2bb900110100b500052a2bb80012c00013c00013b50002b1
+toData,24,2a2bb7000d2b2ab40005b9000e03002ab400022bb8000fb1
 
 com/gemstone/gemfire/internal/admin/remote/StatListenerMessage,2
-fromData,86,2a2bb700102a2bb900110100b500042a2bb900120100b500052a2ab40005bc0ab500062a2ab40005bc07b50007033d1c2ab40005a200212ab400061c2bb9001201004f2ab400071c2bb90013010052840201a7ffddb1
-toData,66,2a2bb7000c2b2ab40004b9000d03002b2ab40005b9000e0200033d1c2ab40005a200212b2ab400061c2eb9000e02002b2ab400071c31b9000f0300840201a7ffddb1
+fromData,86,2a2bb700112a2bb900120100b500042a2bb900130100b500052a2ab40005bc0ab500062a2ab40005bc07b50007033d1c2ab40005a200212ab400061c2bb9001301004f2ab400071c2bb90014010052840201a7ffddb1
+toData,66,2a2bb7000d2b2ab40004b9000e03002b2ab40005b9000f0200033d1c2ab40005a200212b2ab400061c2eb9000f02002b2ab400071c31b900100300840201a7ffddb1
 
 com/gemstone/gemfire/internal/admin/remote/StoreSysCfgRequest,2
-fromData,17,2a2bb7000c2a2bb8000dc0000eb50003b1
-toData,14,2a2bb7000a2ab400032bb8000bb1
+fromData,17,2a2bb7000d2a2bb8000ec0000fb50003b1
+toData,14,2a2bb7000b2ab400032bb8000cb1
 
 com/gemstone/gemfire/internal/admin/remote/StoreSysCfgResponse,2
-fromData,6,2a2bb7000db1
-toData,6,2a2bb7000cb1
+fromData,6,2a2bb7000eb1
+toData,6,2a2bb7000db1
 
 com/gemstone/gemfire/internal/admin/remote/SubRegionRequest,2
-fromData,6,2a2bb7000cb1
-toData,6,2a2bb7000bb1
+fromData,6,2a2bb7000db1
+toData,6,2a2bb7000cb1
 
 com/gemstone/gemfire/internal/admin/remote/SubRegionResponse,2
-fromData,34,2a2bb7001d2a2bb8001ec00012c00012b500132a2bb8001ec00012c00012b50014b1
-toData,22,2a2bb7001b2ab400132bb8001c2ab400142bb8001cb1
+fromData,34,2a2bb7001f2a2bb80020c00013c00013b500142a2bb80020c00013c00013b50015b1
+toData,22,2a2bb7001d2ab400142bb8001e2ab400152bb8001eb1
 
 com/gemstone/gemfire/internal/admin/remote/TailLogRequest,2
-fromData,6,2a2bb7000ab1
-toData,6,2a2bb70009b1
+fromData,6,2a2bb7000bb1
+toData,6,2a2bb7000ab1
 
 com/gemstone/gemfire/internal/admin/remote/TailLogResponse,2
-fromData,22,2a2bb700192a2bb8001ab5000d2a2bb8001ab5000ab1
-toData,22,2a2bb700172ab4000d2bb800182ab4000a2bb80018b1
+fromData,22,2a2bb7001a2a2bb8001bb5000d2a2bb8001bb5000ab1
+toData,22,2a2bb700182ab4000d2bb800192ab4000a2bb80019b1
 
 com/gemstone/gemfire/internal/admin/remote/UpdateAlertDefinitionMessage,2
 fromData,30,2a2bb700092a2bb9000a0100b500042a2bb8000bc0000cc0000cb50003b1
 toData,24,2a2bb700062b2ab40004b9000702002ab400032bb80008b1
 
 com/gemstone/gemfire/internal/admin/remote/VersionInfoRequest,2
-fromData,6,2a2bb7000ab1
-toData,6,2a2bb70009b1
+fromData,6,2a2bb7000bb1
+toData,6,2a2bb7000ab1
 
 com/gemstone/gemfire/internal/admin/remote/VersionInfoResponse,2
-fromData,14,2a2bb700092a2bb8000ab50006b1
-toData,14,2a2bb700072ab400062bb80008b1
+fromData,14,2a2bb7000a2a2bb8000bb50006b1
+toData,14,2a2bb700082ab400062bb80009b1
 
 com/gemstone/gemfire/internal/admin/statalerts/BaseDecoratorImpl,2
 fromData,12,2a2bb8002bc0002cb50002b1
@@ -809,20 +809,20 @@ fromData,31,2a2bb900100100b500022a2bb900100100b500032a2bb900100100b50004b1
 toData,31,2b2ab40002b9000f02002b2ab40003b9000f02002b2ab40004b9000f0200b1
 
 com/gemstone/gemfire/internal/admin/statalerts/FunctionDecoratorImpl,2
-fromData,14,2a2bb700202a2bb80021b50002b1
-toData,14,2a2bb7001e2ab400022bb8001fb1
+fromData,14,2a2bb700212a2bb80022b50002b1
+toData,14,2a2bb7001f2ab400022bb80020b1
 
 com/gemstone/gemfire/internal/admin/statalerts/GaugeThresholdDecoratorImpl,2
-fromData,28,2a2bb700232a2bb80024c00025b500032a2bb80024c00025b50004b1
-toData,22,2a2bb700212ab400032bb800222ab400042bb80022b1
+fromData,28,2a2bb700252a2bb80026c00027b500032a2bb80026c00027b50004b1
+toData,22,2a2bb700232ab400032bb800242ab400042bb80024b1
 
 com/gemstone/gemfire/internal/admin/statalerts/MultiAttrDefinitionImpl,2
 fromData,31,2a2bb8002fb500032a2bb80030b500072a2bb80031c00032c00032b50009b1
 toData,25,2ab400032bb8002c2ab400072bb8002d2ab400092bb8002eb1
 
 com/gemstone/gemfire/internal/admin/statalerts/NumberThresholdDecoratorImpl,2
-fromData,25,2a2bb700242a2bb80025c00026b500032a2bb80027b50004b1
-toData,22,2a2bb700212ab400032bb800222ab400042bb80023b1
+fromData,25,2a2bb700262a2bb80027c00028b500032a2bb80029b50004b1
+toData,22,2a2bb700232ab400032bb800242ab400042bb80025b1
 
 com/gemstone/gemfire/internal/admin/statalerts/SingleAttrDefinitionImpl,2
 fromData,28,2a2bb80031b500032a2bb80032b500072a2bb80033c00020b50002b1
@@ -833,24 +833,16 @@ fromData,1,b1
 toData,1,b1
 
 com/gemstone/gemfire/internal/cache/AbstractRegion,2
-toData,6,2a2bb80188b1
-fromData,8,bb018959b7018abf
+toData,6,2a2bb8017db1
+fromData,8,bb017e59b7017fbf
 
 com/gemstone/gemfire/internal/cache/AbstractUpdateOperation$AbstractUpdateMessage,2
 fromData,16,2a2bb700192a2bb9001a0100b5000db1
 toData,16,2a2bb7001b2b2ab4000db9001c0300b1
 
 com/gemstone/gemfire/internal/cache/AddCacheServerProfileMessage,2
-fromData,16,2a2bb7002f2a2bb900300100b50007b1
-toData,16,2a2bb7002d2b2ab40007b9002e0200b1
-
-com/gemstone/gemfire/internal/cache/ClientRegionEventImpl,2
-fromData,14,2a2bb700132a2bb80014b60003b1
-toData,14,2a2bb700112ab6000d2bb80012b1
-
-com/gemstone/gemfire/internal/cache/CacheServerAdvisor$CacheServerProfile,2
-fromData,53,2a2bb700102a2bb80011b500042a2bb900120100b500062abb001359b70014b500052ab400052bb800152a2bb900160100b60017b1
-toData,42,2a2bb7000a2ab400042bb8000b2b2ab40006b9000c02002ab400052bb8000d2b2ab6000eb9000f0300b1
+fromData,16,2a2bb700302a2bb900310100b50008b1
+toData,16,2a2bb7002e2b2ab40008b9002f0200b1
 
 com/gemstone/gemfire/internal/cache/BucketAdvisor$BucketProfile,2
 fromData,36,2a2bb700132a2bb900140100b500042a2bb900140100b500062a2bb900140100b50012b1
@@ -869,131 +861,138 @@ fromData,93,2a2bb7000a2a2bb8000bb6000cb500032a2bb8000db6000eb500042a2bb8000fb600
 toData,91,2a2bb700132ab40003b800142bb800152ab40004b800162bb800172ab40005b800182bb800192b2ab40009b9001a02002ab400099e00262ab400074d2cbe3e03360415041da200152c1504323a0519052bb8001b840401a7ffebb1
 
 com/gemstone/gemfire/internal/cache/CacheDistributionAdvisor$CacheProfile,2
-fromData,94,2a2bb700622bb9006301003d2a1cb6001b2a1cb700649900162abb006559b70066b500262ab400262bb800672a1cb7006899000e2a2bb80069c0006ab5000d2a1cb7006b99000e2a2bb80069c0006ab5000e2a2bb80069c0006cb50016b1
-toData,81,2a2bb7005b2b2ab6001ab9005c02002ab40026c6000b2ab400262bb8005d2ab4000db9002c01009a000c2a2ab4000d2bb7005e2ab4000eb9002c01009a000c2a2ab4000e2bb7005e2ab400162bb8005fb1
+fromData,94,2a2bb700662bb9006701003d2a1cb6001c2a1cb700689900162abb006959b7006ab500272ab400272bb8006b2a1cb7006c99000e2a2bb8006dc0006eb5000e2a1cb7006f99000e2a2bb8006dc0006eb5000f2a2bb8006dc00070b50017b1
+toData,81,2a2bb7005e2b2ab6001bb9005f02002ab40027c6000b2ab400272bb800602ab4000eb9002d01009a000c2a2ab4000e2bb700612ab4000fb9002d01009a000c2a2ab4000f2bb700612ab400172bb80062b1
+
+com/gemstone/gemfire/internal/cache/CacheServerAdvisor$CacheServerProfile,2
+fromData,53,2a2bb700112a2bb80012b500042a2bb900130100b500062abb001459b70015b500052ab400052bb800162a2bb900170100b60018b1
+toData,42,2a2bb7000b2ab400042bb8000c2b2ab40006b9000d02002ab400052bb8000e2b2ab6000fb900100300b1
+
+com/gemstone/gemfire/internal/cache/ClientRegionEventImpl,2
+fromData,14,2a2bb700142a2bb80015b60003b1
+toData,14,2a2bb700122ab6000d2bb80013b1
 
 com/gemstone/gemfire/internal/cache/CloseCacheMessage,2
-fromData,16,2a2bb700172a2bb900180100b50002b1
-toData,16,2a2bb700192b2ab40002b9001a0200b1
+fromData,16,2a2bb700182a2bb900190100b50002b1
+toData,16,2a2bb7001a2b2ab40002b9001b0200b1
 
 com/gemstone/gemfire/internal/cache/ControllerAdvisor$ControllerProfile,2
 fromData,6,2a2bb70007b1
 toData,6,2a2bb70006b1
 
 com/gemstone/gemfire/internal/cache/CreateRegionProcessor$CreateRegionMessage,2
-fromData,45,2a2bb7008b2a2bb8008cb500092a2bb8008dc00055b500432a2bb9008e0100b500032a2bb9008f0100b50064b1
-toData,42,2a2bb700902ab400092bb800912ab400432bb800922b2ab40003b9009302002b2ab40064b900940200b1
+fromData,45,2a2bb7008c2a2bb8008db5000a2a2bb8008ec00055b500432a2bb9008f0100b500032a2bb900900100b50064b1
+toData,42,2a2bb700922ab4000a2bb800932ab400432bb800942b2ab40003b9009502002b2ab40064b900960200b1
 
 com/gemstone/gemfire/internal/cache/CreateRegionProcessor$CreateRegionReplyMessage,2
-fromData,161,2a2bb700052bb90006010099000e2a2bb80007c00008b500092bb9000a01003d1c9a000b2a01b5000ba700352abb000c591cb7000db5000b033e1d1ca20022bb000e59b7000f3a0419042bb800102ab4000b1904b6001157840301a7ffdf2bb90006010099000c2a2b03b80012b500132bb9000601009900162abb001459b70015b500162ab400162bb800102a2bb900060100b500172a2bb900180100b50004b1
-toData,191,2a2bb700192b2ab40009c6000704a7000403b9001a02002ab40009c6000b2ab400092bb8001b2ab4000bc7000d2b03b9001c0200a700322ab4000bb6001d3d2b1cb9001c0200033e1d1ca2001c2ab4000b1db6001ec0000e3a0419042bb8001f840301a7ffe52ab40013c600192b04b9001a02002b2ab40013c0002003b80021a7000a2b03b9001a02002ab40016c600152b04b9001a02002ab400162bb8001fa7000a2b03b9001a02002b2ab40017b9001a02002b2ab40004b900220300b1
+fromData,161,2a2bb700062bb90007010099000e2a2bb80008c00009b5000a2bb9000b01003d1c9a000b2a01b5000ca700352abb000d591cb7000eb5000c033e1d1ca20022bb000f59b700103a0419042bb800112ab4000c1904b6001257840301a7ffdf2bb90007010099000c2a2b03b80013b500142bb9000701009900162abb001559b70016b500172ab400172bb800112a2bb900070100b500182a2bb900190100b50004b1
+toData,191,2a2bb7001a2b2ab4000ac6000704a7000403b9001b02002ab4000ac6000b2ab4000a2bb8001c2ab4000cc7000d2b03b9001d0200a700322ab4000cb6001e3d2b1cb9001d0200033e1d1ca2001c2ab4000c1db6001fc0000f3a0419042bb80020840301a7ffe52ab40014c600192b04b9001b02002b2ab40014c0002103b80022a7000a2b03b9001b02002ab40017c600152b04b9001b02002ab400172bb80020a7000a2b03b9001b02002b2ab40018b9001b02002b2ab40004b900230300b1
 
 com/gemstone/gemfire/internal/cache/DestroyOperation$DestroyMessage,2
-fromData,45,2a2bb700342a2bb80035c00036b500022a2bb80035b5001a2bb800374d2cb6003899000b2a2bb80039b50005b1
-toData,118,2a2bb7003a2ab400022bb8003b2ab4001a2bb8003b2ab40003b6003cc000084d2cc1003d99002e2cb6003e4e2db6003f990018b200402bb800412ab40003b600422bb80043a7000ab200442bb80041a700262cb60045990018b200402bb800412ab40003b600422bb80043a7000ab200442bb80041b1
+fromData,45,2a2bb700352a2bb80036c00037b500022a2bb80036b5001a2bb800384d2cb6003999000b2a2bb8003ab50005b1
+toData,118,2a2bb7003b2ab400022bb8003c2ab4001a2bb8003c2ab40003b6003dc000084d2cc1003e99002e2cb6003f4e2db60040990018b200412bb800422ab40003b600432bb80044a7000ab200452bb80042a700262cb60046990018b200412bb800422ab40003b600432bb80044a7000ab200452bb80042b1
 
 com/gemstone/gemfire/internal/cache/DestroyOperation$DestroyWithContextMessage,2
-fromData,14,2a2bb7000f2a2bb80010b50008b1
-toData,14,2a2bb700112ab400082bb80012b1
+fromData,14,2a2bb700102a2bb80011b50008b1
+toData,14,2a2bb700122ab400082bb80013b1
 
 com/gemstone/gemfire/internal/cache/DestroyPartitionedRegionMessage,2
-fromData,76,2a2bb700432a2bb80044b500062a2bb900450100b80046b500082a2bb900470100b5000a2bb9004701003d2a1cbc0ab5000c033e1d1ca200152ab4000c1d2bb9004701004f840301a7ffecb1
-toData,77,2a2bb700482ab400062bb800492b2ab40008b4004ab9004b02002b2ab4000ab9004c02002b2ab4000cbeb9004c0200033d1c2ab4000cbea200152b2ab4000c1c2eb9004c0200840201a7ffe8b1
+fromData,76,2a2bb700452a2bb80046b500062a2bb900470100b80048b500082a2bb900490100b5000a2bb9004901003d2a1cbc0ab5000d033e1d1ca200152ab4000d1d2bb9004901004f840301a7ffecb1
+toData,77,2a2bb7004a2ab400062bb8004b2b2ab40008b4004cb9004d02002b2ab4000ab9004e02002b2ab4000dbeb9004e0200033d1c2ab4000dbea200152b2ab4000d1c2eb9004e0200840201a7ffe8b1
 
 com/gemstone/gemfire/internal/cache/DestroyRegionOperation$DestroyRegionMessage,2
-fromData,41,2a2bb700562a2bb80057c00058b500082a2bb80059b500112a2bb8005ab5001e2a2bb8005bb50028b1
-toData,38,2a2bb7005c2ab400082bb8005d2ab400112bb8005e2ab4001e2bb8005f2ab400282bb80060b1
+fromData,41,2a2bb700582a2bb80059c0005ab500082a2bb8005bb500112a2bb8005cb5001f2a2bb8005db50029b1
+toData,38,2a2bb7005e2ab400082bb8005f2ab400112bb800602ab4001f2bb800612ab400292bb80062b1
 
 com/gemstone/gemfire/internal/cache/DestroyRegionOperation$DestroyRegionWithContextMessage,2
-fromData,14,2a2bb7000d2a2bb8000eb50006b1
-toData,14,2a2bb7000f2ab400062bb80010b1
+fromData,14,2a2bb7000e2a2bb8000fb50006b1
+toData,14,2a2bb700102ab400062bb80011b1
 
 com/gemstone/gemfire/internal/cache/DistTXCommitMessage,2
-fromData,14,2a2bb7002e2a2bb8002fb50003b1
-toData,14,2a2bb700302ab400032bb80031b1
+fromData,14,2a2bb7002f2a2bb80030b50003b1
+toData,14,2a2bb700312ab400032bb80032b1
 
 com/gemstone/gemfire/internal/cache/DistTXCommitMessage$DistTXCommitReplyMessage,2
-fromData,17,2a2bb700192a2bb8001ac0001bb50004b1
-toData,14,2a2bb700172ab400042bb80018b1
+fromData,17,2a2bb7001a2a2bb8001bc0001cb50004b1
+toData,14,2a2bb700182ab400042bb80019b1
 
 com/gemstone/gemfire/internal/cache/DistTXPrecommitMessage,2
-fromData,14,2a2bb700312a2bb80032b5000ab1
-toData,14,2a2bb7002f2ab4000a2bb80030b1
+fromData,14,2a2bb700322a2bb80033b5000bb1
+toData,14,2a2bb700302ab4000b2bb80031b1
 
 com/gemstone/gemfire/internal/cache/DistTXPrecommitMessage$DistTXPrecommitReplyMessage,2
-fromData,17,2a2bb700192a2bb8001ac0001bb50004b1
-toData,14,2a2bb700172ab400042bb80018b1
+fromData,17,2a2bb7001a2a2bb8001bc0001cb50004b1
+toData,14,2a2bb700182ab400042bb80019b1
 
 com/gemstone/gemfire/internal/cache/DistTXPrecommitMessage$DistTxPrecommitResponse,2
-fromData,17,2a2bb80007b500032a2bb80008b50004b1
-toData,17,2ab400032bb800052ab400042bb80006b1
+fromData,17,2a2bb80008b500032a2bb80009b50004b1
+toData,17,2ab400032bb800062ab400042bb80007b1
 
 com/gemstone/gemfire/internal/cache/DistTXRollbackMessage,2
-fromData,6,2a2bb70015b1
-toData,6,2a2bb70016b1
+fromData,6,2a2bb70016b1
+toData,6,2a2bb70017b1
 
 com/gemstone/gemfire/internal/cache/DistTXRollbackMessage$DistTXRollbackReplyMessage,2
-fromData,14,2a2bb700192a2bb8001ab50004b1
-toData,14,2a2bb700172ab400042bb80018b1
-
+fromData,14,2a2bb7001a2a2bb8001bb50004b1
+toData,14,2a2bb700182ab400042bb80019b1
 
 com/gemstone/gemfire/internal/cache/DistributedCacheOperation$CacheOperationMessage,2
-fromData,360,2bb800904d2bb9009101003e0336042cb20092b600939b000b2bb90091010036042a1db500942a1d2bb600952a2bb80096b500212a2bb900970100b80098b500072a1d1100807e99000704a7000403b500022a1d10087e99000704a7000403b500521d1102007e99000b2a2bb80099b500832a1d1104007e99000704a7000403b500052a1d10407e99000704a7000403b5001b2ab4001b9900382bb900970100360515059a000b2a03b5001ca7001b150504a0000b2a04b5001ca7000dbb009a59129bb7009cbf2a2bb8009db5001d1d1101007e99000704a700040336052a1d1108007e99000704a7000403b5009e15059900162abb009f59b700a0b5000c2ab4000c2bb800a11d1110007e99001c1d1120007e99000704a700040336062a15062bb800a2b5000815041104007e9900392a04b5000d2ac100a399002d2ac000a315041102007e99000704a7000403b600a42ac000a315041101007e99000704a7000403b600a5b1
-toData,219,033d033e2a1cb600a63d2a1db600a73e2b1cb900a802002bb800a93a041904b20092b600939b000a2b1db900a802002ab4000b9e000d2b2ab4000bb900aa02002ab400212bb800ab2b2ab40007b400acb900ad02002ab40083c6000b2ab400832bb800ae2ab4001b9900542b2ab4001c99000704a7000403b900ad02002ab4001cb800af36052ab4001c9a001f2ab4001dc1001e990015013a062ab4001dc0001ec0001e3a07a7000c2ab4001d3a06013a071505190619072bb800b02ab4000cc6000b2ab4000c2bb800b12ab40008c6000b2ab400082bb800b1b1
+fromData,360,2bb800924d2bb9009301003e0336042cb20094b600959b000b2bb90093010036042a1db500962a1d2bb600972a2bb80098b500212a2bb900990100b8009ab500072a1d1100807e99000704a7000403b500022a1d10087e99000704a7000403b500541d1102007e99000b2a2bb8009cb500852a1d1104007e99000704a7000403b500052a1d10407e99000704a7000403b5001b2ab4001b9900382bb900990100360515059a000b2a03b5001ca7001b150504a0000b2a04b5001ca7000dbb009d59129eb7009fbf2a2bb800a0b5001d1d1101007e99000704a700040336052a1d1108007e99000704a7000403b500a115059900162abb00a259b700a3b5000c2ab4000c2bb800a41d1110007e99001c1d1120007e99000704a700040336062a15062bb800a5b5000815041104007e9900392a04b5000d2ac100a699002d2ac000a615041102007e99000704a7000403b600a72ac000a615041101007e99000704a7000403b600a8b1
+toData,219,033d033e2a1cb600a93d2a1db600aa3e2b1cb900ab02002bb800ac3a041904b20094b600959b000a2b1db900ab02002ab4000b9e000d2b2ab4000bb900ad02002ab400212bb800ae2b2ab40007b400afb900b002002ab40085c6000b2ab400852bb800b12ab4001b9900542b2ab4001c99000704a7000403b900b002002ab4001cb800b236052ab4001c9a001f2ab4001dc1001e990015013a062ab4001dc0001ec0001e3a07a7000c2ab4001d3a06013a071505190619072bb800b32ab4000cc6000b2ab4000c2bb800b42ab40008c6000b2ab400082bb800b4b1
 
 com/gemstone/gemfire/internal/cache/DistributedClearOperation$ClearRegionMessage,2
-fromData,66,2a2bb7001f2ab800202bb90021010032b500022a2bb80022c00023b500052a2bb80022c00024b500162bb80025b20026b600279b000e2a2bb80022c00028b50010b1
-toData,56,2a2bb700292b2ab40002b60015b9002a02002ab400052bb8002b2ab400162bb8002b2bb8002cb20026b600279b000b2ab400102bb8002bb1
+fromData,66,2a2bb700212ab800222bb90023010032b500022a2bb80024c00025b500062a2bb80024c00026b500172bb80027b20028b600299b000e2a2bb80024c0002ab50011b1
+toData,56,2a2bb7002b2b2ab40002b60016b9002c02002ab400062bb8002d2ab400172bb8002d2bb8002eb20028b600299b000b2ab400112bb8002db1
 
 com/gemstone/gemfire/internal/cache/DistributedClearOperation$ClearRegionWithContextMessage,2
-fromData,14,2a2bb7000d2a2bb8000eb50006b1
-toData,14,2a2bb7000f2ab400062bb80010b1
+fromData,14,2a2bb7000e2a2bb8000fb50006b1
+toData,14,2a2bb700102ab400062bb80011b1
 
 com/gemstone/gemfire/internal/cache/DistributedPutAllOperation$EntryVersionsList,2
-fromData,284,2bb9002201003d1c077e07a0000704a70004033e1c10207e1020a0000704a70004033604b80011b20012b900130200990018b80011122304bd001559031cb8001653b9002403001d9900d32bb80025883605b80011b20012b90013020099001cb80011b20012122604bd001559031505b8002753b900170400bb0028591505b700023a0603360715071505a200902bb90022010036081508aa0000000000007d0000000000000003000000200000002900000037000000562a01b6000757a700572a15042bb80029b6000757a7004915042bb800293a0919061909b6001cb9002a0200572a1909b6000757a7002a15042bb800293a092bb8002588360a19091906150ab9002b0200c0002cb600212a1909b6000757840701a7ff6fb1
-toData,299,033d033e2ab600089e003d1c07803d043e2ab6000b3a041904b9000c01009900271904b9000d0100c0000a3a051905c600131905c1001099000e1c1020803da70006a7ffd5b80011b20012b90013020099001fb80011b20012121405bd001559032a5359041cb8001653b9001704002b1cb9001802001d9900b32ab60008852bb80019bb001a592ab60008b7001b3a040336052ab6000b3a061906b9000c010099008a1906b9000d0100c0000a3a071907c7000d2b03b900180200a7006c1907b6001c3a081908c700132b04b90018020019072bb8001da7005019041908b6001e360915099a00242b05b90018020084050115053609190419081509b6001f5719072bb8001da700212b06b90018020019072b03b6002019071908b6002115090464852bb80019a7ff72b1
+fromData,284,2bb9002301003d1c077e07a0000704a70004033e1c10207e1020a0000704a70004033604b80012b20013b900140200990018b80012122404bd001659031cb8001753b9002503001d9900d32bb80026883605b80012b20013b90014020099001cb80012b20013122704bd001659031505b8002853b900180400bb0029591505b700023a0603360715071505a200902bb90023010036081508aa0000000000007d0000000000000003000000200000002900000037000000562a01b6000757a700572a15042bb8002ab6000757a7004915042bb8002a3a0919061909b6001db9002b0200572a1909b6000757a7002a15042bb8002a3a092bb8002688360a19091906150ab9002c0200c0002db600222a1909b6000757840701a7ff6fb1
+toData,299,033d033e2ab600089e003d1c07803d043e2ab6000b3a041904b9000c01009900271904b9000d0100c0000a3a051905c600131905c1001199000e1c1020803da70006a7ffd5b80012b20013b90014020099001fb80012b20013121505bd001659032a5359041cb8001753b9001804002b1cb9001902001d9900b32ab60008852bb8001abb001b592ab60008b7001c3a040336052ab6000b3a061906b9000c010099008a1906b9000d0100c0000a3a071907c7000d2b03b900190200a7006c1907b6001d3a081908c700132b04b90019020019072bb8001ea7005019041908b6001f360915099a00242b05b90019020084050115053609190419081509b600205719072bb8001ea700212b06b90019020019072b03b6002119071908b6002215090464852bb8001aa7ff72b1
 
 com/gemstone/gemfire/internal/cache/DistributedPutAllOperation$PutAllEntryData,1
-toData,236,2ab400094e2ab4000b3a042d2bb8003c1904c1003d9a00081904c700192b03b9003e02001904c0003dc0003d2bb8003fa700341904c1004099001f1904c000403a052b04b9003e02001905b9004101002bb8003fa700102b04b9003e020019042bb800422b2ab40011b40043b9003e02002ab4000536052ab40025c6000a150507809136052ab40016c6001d15051008809136052ab40016c1004499000b150510208091360515051080809136052b1505b9003e02002ab40025c6000b2ab400252bb8003c2ab40016c6000b2ab400162bb800452ab6002799000b2ab400132bb800452ab400072bb80046b1
+toData,236,2ab4000a4e2ab4000c3a042d2bb8003d1904c1003e9a00081904c700192b03b9003f02001904c0003ec0003e2bb80040a700341904c1004199001f1904c000413a052b04b9003f02001905b9004201002bb80040a700102b04b9003f020019042bb800432b2ab40012b40044b9003f02002ab4000636052ab40026c6000a150507809136052ab40017c6001d15051008809136052ab40017c1004599000b150510208091360515051080809136052b1505b9003f02002ab40026c6000b2ab400262bb8003d2ab40017c6000b2ab400172bb800462ab6002899000b2ab400142bb800462ab400082bb80047b1
 
 com/gemstone/gemfire/internal/cache/DistributedPutAllOperation$PutAllMessage,2
-fromData,197,2a2bb7003c2a2bb8003dc0003eb500052a2bb8003f88b500152a2ab40015bd0040b500062ab400159e00722bb800414dbb004259b700434e03360415042ab40015a200202ab400061504bb0040592b2ab4000515042c2db7004453840401a7ffdd2bb9004501003604150499002f2bb800463a0503360615062ab40015a2001d2ab4000615063219051506b60047c00048b5002f840601a7ffe02ab400491140007e99000e2a2bb8003dc0004ab5000b2a2ab400491180007e99000704a7000403b5001ab1
-toData,181,2a2bb7004b2ab400052bb8004c2ab40015852bb8004d2ab400159e008bbb004e592ab40015b7004f4d033e2ab400060332b40050c10025360403360515052ab40015a200531d9a00122ab40006150532b4002fc60005043e2ab40006150532b4002f3a062c1906b60051572ab4000615053201b5002f2ab400061505322b1504b600522ab400061505321906b5002f840501a7ffaa2b1db9005302001d9900082c2bb800542ab4000bc6000b2ab4000b2bb8004cb1
+fromData,197,2a2bb7003e2a2bb8003fc00040b500062a2bb8004188b500162a2ab40016bd0042b500072ab400169e00722bb800434dbb004459b700454e03360415042ab40016a200202ab400071504bb0042592b2ab4000615042c2db7004653840401a7ffdd2bb9004701003604150499002f2bb800483a0503360615062ab40016a2001d2ab4000715063219051506b60049c0004ab50030840601a7ffe02ab4004b1140007e99000e2a2bb8003fc0004db5000c2a2ab4004b1180007e99000704a7000403b5001bb1
+toData,181,2a2bb7004e2ab400062bb8004f2ab40016852bb800502ab400169e008bbb0051592ab40016b700524d033e2ab400070332b40053c10026360403360515052ab40016a200531d9a00122ab40007150532b40030c60005043e2ab40007150532b400303a062c1906b60054572ab4000715053201b500302ab400071505322b1504b600552ab400071505321906b50030840501a7ffaa2b1db9005602001d9900082c2bb800572ab4000cc6000b2ab4000c2bb8004fb1
 
 com/gemstone/gemfire/internal/cache/DistributedRegionFunctionStreamingMessage,2
-fromData,171,2a2bb7005f2bb9006001003d1c047e9900142a2bb900610100b500072ab40007b800621c077e99000d2a2bb900610100b500041c057e99000e2a2bb80063c00064b500052bb800634e2dc100659900252a03b5000c2a2dc00065b80066b500062ab40006c7001b2a2dc00065b5004ba700102a2dc00067b500062a04b5000c2a2bb80063c00068b500082a2bb80069b5000a2a2bb8006ab500092a1c10407e99000704a7000403b5000bb1
-toData,173,2a2bb7006b033d2ab400079900081c0480933d2ab40004029f00081c0780933d2ab40005c600081c0580933d2ab4000b9900091c104080933d2b1cb9006c02002ab4000799000d2b2ab40007b9006d02002ab40004029f000d2b2ab40004b9006d02002ab40005c6000b2ab400052bb8006e2ab4000c99000e2ab400062bb8006ea700102ab40006b9005601002bb8006e2ab400082bb8006e2ab4000ac0006f2bb800702ab400092bb80071b1
+fromData,171,2a2bb700612bb9006201003d1c047e9900142a2bb900640100b500082ab40008b800651c077e99000d2a2bb900640100b500051c057e99000e2a2bb80066c00067b500062bb800664e2dc100689900252a03b5000d2a2dc00068b80069b500072ab40007c7001b2a2dc00068b5004ca700102a2dc0006ab500072a04b5000d2a2bb80066c0006bb500092a2bb8006cb5000b2a2bb8006db5000a2a1c10407e99000704a7000403b5000cb1
+toData,173,2a2bb7006f033d2ab400089900081c0480933d2ab40005029f00081c0780933d2ab40006c600081c0580933d2ab4000c9900091c104080933d2b1cb9007002002ab4000899000d2b2ab40008b9007102002ab40005029f000d2b2ab40005b9007102002ab40006c6000b2ab400062bb800722ab4000d99000e2ab400072bb80072a700102ab40007b9005701002bb800722ab400092bb800722ab4000bc000732bb800742ab4000a2bb80075b1
 
 com/gemstone/gemfire/internal/cache/DistributedRemoveAllOperation$RemoveAllEntryData,1
-toData,146,2ab400094e2d2bb8003e2b2ab4000fb4003fb9004002002ab4000536042ab40021c6000a150407809136042ab40014c6001d15041008809136042ab40014c1004199000b150410208091360415041080809136042b1504b9004002002ab40021c6000b2ab400212bb8003e2ab40014c6000b2ab400142bb800422ab6002399000b2ab400112bb800422ab400072bb80043b1
+toData,146,2ab4000a4e2d2bb8003f2b2ab40010b40040b9004102002ab4000636042ab40022c6000a150407809136042ab40015c6001d15041008809136042ab40015c1004299000b150410208091360415041080809136042b1504b9004102002ab40022c6000b2ab400222bb8003f2ab40015c6000b2ab400152bb800432ab6002499000b2ab400122bb800432ab400082bb80044b1
 
 com/gemstone/gemfire/internal/cache/DistributedRemoveAllOperation$RemoveAllMessage,2
-fromData,197,2a2bb700372a2bb80038c00039b500032a2bb8003a88b500132a2ab40013bd003bb500042ab400139e00722bb8003c4dbb003d59b7003e4e03360415042ab40013a200202ab400041504bb003b592b2ab4000315042c2db7003f53840401a7ffdd2bb9004001003604150499002f2bb800413a0503360615062ab40013a2001d2ab4000415063219051506b60042c00043b5002b840601a7ffe02ab400441140007e99000e2a2bb80038c00045b500092a2ab400441180007e99000704a7000403b50018b1
-toData,181,2a2bb700462ab400032bb800472ab40013852bb800482ab400139e008bbb0049592ab40013b7004a4d033e2ab400040332b4004bc10026360403360515052ab40013a200531d9a00122ab40004150532b4002bc60005043e2ab40004150532b4002b3a062c1906b6004c572ab4000415053201b5002b2ab400041505322b1504b6004d2ab400041505321906b5002b840501a7ffaa2b1db9004e02001d9900082c2bb8004f2ab40009c6000b2ab400092bb80047b1
+fromData,197,2a2bb700382a2bb80039c0003ab500032a2bb8003b88b500132a2ab40013bd003cb500042ab400139e00722bb8003d4dbb003e59b7003f4e03360415042ab40013a200202ab400041504bb003c592b2ab4000315042c2db7004053840401a7ffdd2bb9004101003604150499002f2bb800423a0503360615062ab40013a2001d2ab4000415063219051506b60043c00044b5002b840601a7ffe02ab400451140007e99000e2a2bb80039c00047b500092a2ab400451180007e99000704a7000403b50018b1
+toData,181,2a2bb700482ab400032bb800492ab40013852bb8004a2ab400139e008bbb004b592ab40013b7004c4d033e2ab400040332b4004dc100263604033

<TRUNCATED>


[06/50] [abbrv] incubator-geode git commit: GEODE-450: fix race in unit test

Posted by ds...@apache.org.
GEODE-450: fix race in unit test

The test now waits 30 seconds instead of 3.
Also when it fails it will print info about when it
thought the entry that is not expiring is scheduled to expire.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/a23c33c7
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/a23c33c7
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/a23c33c7

Branch: refs/heads/develop
Commit: a23c33c71228f3104abb81d73fad8bf031600657
Parents: f8935b3
Author: Darrel Schneider <ds...@pivotal.io>
Authored: Tue Oct 20 14:33:07 2015 -0700
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Tue Oct 20 14:33:07 2015 -0700

----------------------------------------------------------------------
 .../gemfire/cache30/MultiVMRegionTestCase.java        | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a23c33c7/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
index b6ba865..e9a8e29 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
@@ -4090,10 +4090,20 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
                   return region.getEntry(key) == null;
                 }
                 public String description() {
-                  return "Entry for key " + key + " never expired (since it still exists)";
+                  LocalRegion lr = (LocalRegion) region;
+                  String expiryInfo = "";
+                  try {
+                    EntryExpiryTask eet = lr.getEntryExpiryTask(key);
+                    if (eet != null) {
+                      expiryInfo = "expirationTime= " + eet.getExpirationTime() + " now=" + eet.getNow();
+                    }
+                  } catch (EntryNotFoundException ex) {
+                    expiryInfo ="EntryNotFoundException when getting expiry task";
+                  }
+                  return "Entry for key " + key + " never expired (since it still exists) " + expiryInfo;
                 }
               };
-              DistributedTestCase.waitForCriterion(waitForUpdate, 3000, 1, true);
+              DistributedTestCase.waitForCriterion(waitForUpdate, 30000, 1, true);
             }
             assertNull(region.getEntry(key));
           }


[25/50] [abbrv] incubator-geode git commit: GEODE-392: Removing catch Throwable from CacheTestCase.closeCache

Posted by ds...@apache.org.
GEODE-392: Removing catch Throwable from CacheTestCase.closeCache

I'm fairly certain that GEODE-392 was caused by cache.close throwing an
exception in this method. Later on, we delete the disk store files, and
then end up calling cache.close again in DistributedTestCase. The
failure we are seeing is because the disk store files are deleted but
the cache is still open.

Unfortunately, this catch throwable was swallowing all errors, so we
were not seeing failures that happened when closing the cache.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/25bce964
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/25bce964
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/25bce964

Branch: refs/heads/develop
Commit: 25bce9646cc6f6d44212d0067d2d44bde0ad2268
Parents: 07d55bd
Author: Dan Smith <up...@apache.org>
Authored: Tue Oct 20 11:17:47 2015 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Wed Oct 21 09:01:59 2015 -0700

----------------------------------------------------------------------
 .../gemstone/gemfire/cache30/CacheTestCase.java | 36 ++++++--------------
 1 file changed, 11 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/25bce964/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheTestCase.java
index 951c985..856f6b3 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheTestCase.java
@@ -16,6 +16,8 @@ import java.util.Iterator;
 import java.util.Map;
 import java.util.Properties;
 
+import org.apache.logging.log4j.Logger;
+
 import com.gemstone.gemfire.InternalGemFireError;
 import com.gemstone.gemfire.SystemFailure;
 import com.gemstone.gemfire.cache.AttributesFactory;
@@ -45,6 +47,7 @@ import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheCreation;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheXmlGenerator;
+import com.gemstone.gemfire.internal.logging.LogService;
 import com.gemstone.org.jgroups.Event;
 import com.gemstone.org.jgroups.JChannel;
 import com.gemstone.org.jgroups.stack.Protocol;
@@ -62,6 +65,7 @@ import dunit.VM;
  * @since 3.0
  */
 public abstract class CacheTestCase extends DistributedTestCase {
+  private static final Logger logger = LogService.getLogger();
 
   /** The Cache from which regions are obtained 
    * 
@@ -402,19 +406,8 @@ public abstract class CacheTestCase extends DistributedTestCase {
               }
             }
           }
-          try {
-            cache.close();
-          }
-          catch (VirtualMachineError e) {
-            SystemFailure.initiateFailure(e);
-            throw e;
-          }
-          catch (Throwable t) {
-          }
-          finally {
-          }
+          cache.close();
         }
-        // @todo darrel: destroy DiskStore files
       }
       finally {
         cache = null;
@@ -460,21 +453,14 @@ public abstract class CacheTestCase extends DistributedTestCase {
       try {
         closeCache();
       }
-      catch (VirtualMachineError e) {
-        SystemFailure.initiateFailure(e);
-        throw e;
-      }
-      catch (Throwable t) {
-        getLogWriter().error("Error in closing the cache ", t);
-        
+      finally {
+        try {
+          cleanDiskDirs();
+        } catch(Exception e) {
+          getLogWriter().error("Error cleaning disk dirs", e);
+        }
       }
     }
-
-    try {
-      cleanDiskDirs();
-    } catch(IOException e) {
-      getLogWriter().error("Error cleaning disk dirs", e);
-    }
   }
 
   /**


[44/50] [abbrv] incubator-geode git commit: GEODE-376: fix race in waiting for serializer

Posted by ds...@apache.org.
GEODE-376: fix race in waiting for serializer

This also fixes GEODE-400, GEODE-455, GEODE-457,
GEODE-470, GEODE-472, and GEODE-476.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/f0bd8b04
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/f0bd8b04
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/f0bd8b04

Branch: refs/heads/develop
Commit: f0bd8b04afe226f5878b0f27636f507b330b8d35
Parents: 4a42443
Author: Darrel Schneider <ds...@pivotal.io>
Authored: Fri Oct 23 09:58:27 2015 -0700
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Fri Oct 23 11:17:57 2015 -0700

----------------------------------------------------------------------
 .../internal/InternalDataSerializer.java        |  6 ++--
 .../gemfire/cache30/MultiVMRegionTestCase.java  | 38 ++++++++++++--------
 2 files changed, 27 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0bd8b04/gemfire-core/src/main/java/com/gemstone/gemfire/internal/InternalDataSerializer.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/InternalDataSerializer.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/InternalDataSerializer.java
index 27ba141..ca1fe68 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/InternalDataSerializer.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/InternalDataSerializer.java
@@ -3384,14 +3384,16 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
    * updates.  If the serialized bytes arrive at a VM before the
    * registration message does, the deserializer will wait an amount
    * of time for the registration message to arrive.
+   * Made public for unit test access.
    * @since 5.7
    */
-  static class GetMarker extends Marker {
+  public static class GetMarker extends Marker {
     /**
      * Number of milliseconds to wait. Also used by InternalInstantiator.
      * Note that some tests set this to a small amount to speed up failures.
+     * Made public for unit test access.
      */
-    static int WAIT_MS = Integer.getInteger("gemfire.InternalDataSerializer.WAIT_MS", 60 * 1000);
+    public static int WAIT_MS = Integer.getInteger("gemfire.InternalDataSerializer.WAIT_MS", 60 * 1000);
 
     /**
      * Returns the serializer associated with this marker.  If the

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0bd8b04/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
index b6b4c10..06e8166 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
@@ -5410,8 +5410,6 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
 
     final String name = this.getUniqueName();
 
-    disconnectAllFromDS(); // possible fix for GEODE-376
-
     SerializableRunnable create =
       new CacheSerializableRunnable("Create Region") {
           public void run2() throws CacheException {
@@ -5465,21 +5463,31 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
     SerializableRunnable get = new CacheSerializableRunnable("Get int") {
         public void run2() throws CacheException {
           Region region = getRootRegion().getSubregion(name);
-//          if (region.getAttributes().getScope().isDistributedNoAck()) {
-            // wait a while for the serializer to be registered
-            long end = System.currentTimeMillis() + 30000;
-            while (InternalDataSerializer.getSerializer((byte)120) == null) {
-              assertTrue("This test sometimes fails due to timing issues",
-                  System.currentTimeMillis() <= end);
-              try {
-                Thread.sleep(1000);
+          // wait a while for the serializer to be registered
+          // A race condition exists in the product in which
+          // this thread can be stuck waiting in getSerializer
+          // for 60 seconds. So it only calls getSerializer once
+          // causing it to fail intermittently (see GEODE-376).
+          // To workaround this the test wets WAIT_MS to 1 ms.
+          // So the getSerializer will only block for 1 ms.
+          // This allows the WaitCriterion to make multiple calls
+          // of getSerializer and the subsequent calls will find
+          // the DataSerializer.
+          final int savVal = InternalDataSerializer.GetMarker.WAIT_MS;
+          InternalDataSerializer.GetMarker.WAIT_MS = 1;
+          try {
+            WaitCriterion ev = new WaitCriterion() {
+              public boolean done() {
+                return InternalDataSerializer.getSerializer((byte)120) != null;
               }
-              catch (InterruptedException e) {
-                // no need to keep interrupt bit here
-                throw new CacheException("Test interrupted") { };
+              public String description() {
+                return "DataSerializer with id 120 was never registered";
               }
-            }
-//          }
+            };
+            DistributedTestCase.waitForCriterion(ev, 30 * 1000, 10, true);
+          } finally {
+            InternalDataSerializer.GetMarker.WAIT_MS = savVal;
+          }
           IntWrapper value = (IntWrapper) region.get(key);
           assertNotNull(InternalDataSerializer.getSerializer((byte)120));
           assertNotNull(value);


[03/50] [abbrv] incubator-geode git commit: GEODE-407, GEODE-349: Fixing race in testDatastoreCommitsWithPutAllAndRI

Posted by ds...@apache.org.
GEODE-407, GEODE-349: Fixing race in testDatastoreCommitsWithPutAllAndRI

This test performed and operation on the server and then asserted that
an update happened on the client. But clients are notified
asynchronously, so the client needs to wait until the update arrives.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/f5a44dcc
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/f5a44dcc
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/f5a44dcc

Branch: refs/heads/develop
Commit: f5a44dcc89c92c2bba37819af137d2fa5a8e2d79
Parents: 26fbf65
Author: Dan Smith <up...@apache.org>
Authored: Mon Oct 19 16:25:46 2015 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Mon Oct 19 16:27:12 2015 -0700

----------------------------------------------------------------------
 .../cache/ClientServerTransactionDUnitTest.java | 22 ++++++++++++++------
 1 file changed, 16 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f5a44dcc/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionDUnitTest.java
index 1bf0953..bb47574 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionDUnitTest.java
@@ -917,8 +917,6 @@ public void testClientCommitAndDataStoreGetsEvent() throws Exception {
     });
   }
   
-  
-  
   public void testDatastoreCommitsWithPutAllAndRI() throws Exception {
 	    Host host = Host.getHost(0);
 	    VM accessor = host.getVM(0);
@@ -959,12 +957,24 @@ public void testClientCommitAndDataStoreGetsEvent() throws Exception {
 	    
 	    client.invoke(new SerializableCallable() {
 	      public Object call() throws Exception {
-	        Region<CustId, Customer> custRegion = getCache().getRegion(CUSTOMER);
+	        final Region<CustId, Customer> custRegion = getCache().getRegion(CUSTOMER);
 //	        Region<OrderId, Order> orderRegion = getCache().getRegion(ORDER);
 //	        Region<CustId,Customer> refRegion = getCache().getRegion(D_REFERENCE);
-	        ClientListener cl = (ClientListener) custRegion.getAttributes().getCacheListeners()[0];
-	        assertTrue(cl.invoked);
-	        assertEquals("it should be 1 but its:"+cl.invokeCount,1,cl.invokeCount);
+	        final ClientListener cl = (ClientListener) custRegion.getAttributes().getCacheListeners()[0];
+	        waitForCriterion(new WaitCriterion() {
+                  
+                  @Override
+                  public boolean done() {
+                    return cl.invoked;
+                  }
+                  
+                  @Override
+                  public String description() {
+                    return "Listener was not invoked in 30 seconds";
+                  }
+                }, 30000, 100, true);
+	        
+	        assertEquals(1,cl.invokeCount);
 	        return null;
 	      }
 	    });


[49/50] [abbrv] incubator-geode git commit: Merge remote-tracking branch 'origin/develop' into feature/GEODE-409

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/build.gradle
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/cache/Cache.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/cache/DataPolicy.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionFactory.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionShortcut.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/index/AbstractIndex.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/index/IndexManager.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/internal/HostStatSampler.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/internal/InternalDataSerializer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/control/OffHeapMemoryMonitor.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/execute/FunctionServiceStats.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionRebalanceOp.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheServerCreation.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlGenerator.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParser.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/internal/redis/RegionProvider.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/management/cli/ConverterHint.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionCreateFunction.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/i18n/CliStrings.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/web/controllers/ShellCommandsController.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/TXExpiryJUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SignalledFlushObserverJUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/SortedListForAsyncQueueJUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/StructSetOrResultsSet.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/CopyOnReadIndexDUnitTest.java
----------------------------------------------------------------------
diff --cc gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/CopyOnReadIndexDUnitTest.java
index a6e7d92,d9d3ef4..18e4f3e
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/CopyOnReadIndexDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/CopyOnReadIndexDUnitTest.java
@@@ -1,10 -1,26 +1,19 @@@
- /*=========================================================================
-  * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
-  * This product is protected by U.S. and international copyright
-  * and intellectual property laws. Pivotal products are covered by
-  * one or more patents listed at http://www.pivotal.io/patents.
-  *=========================================================================
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  *      http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
   */
 -/*
 - * IndexTest.java
 - * JUnit based test
 - *
 - * Created on March 9, 2005, 3:30 PM
 - */
 -
  package com.gemstone.gemfire.cache.query.internal.index;
  
  import java.util.HashMap;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/RangeIndexAPIJUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheTestCase.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml40DUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/codeAnalysis/AnalyzeSerializablesJUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherJUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherLocalJUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherRemoteFileJUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorLauncherRemoteJUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/ServerLauncherJUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/ServerLauncherLocalJUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/ServerLauncherRemoteJUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/deadlock/GemFireDeadlockDetectorDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXDebugDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXPersistentDebugDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/internal/FDDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/NetSearchMessagingDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionRedundancyZoneDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoteTransactionDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/control/RebalanceOperationDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/FunctionServiceStatsDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/OnGroupsFunctionExecutionDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OutOfOffHeapMemoryDUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/internal/process/ProcessStreamReaderTestCase.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-core/src/test/java/com/gemstone/gemfire/redis/SortedSetsJUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-rebalancer/src/main/java/com/gemstone/gemfire/cache/util/AutoBalancer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f0b81325/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerJUnitTest.java
----------------------------------------------------------------------


[37/50] [abbrv] incubator-geode git commit: Adding some debug code to log registered serializer's id

Posted by ds...@apache.org.
Adding some debug code to log registered serializer's id


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/1e6cc66d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/1e6cc66d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/1e6cc66d

Branch: refs/heads/develop
Commit: 1e6cc66d430efe6182624a3c95fc50e771107cc0
Parents: e87e3b7
Author: Jens Deppe <jd...@gopivotal.com>
Authored: Thu Oct 22 15:42:28 2015 -0700
Committer: Jens Deppe <jd...@gopivotal.com>
Committed: Thu Oct 22 15:43:28 2015 -0700

----------------------------------------------------------------------
 .../java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java   | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1e6cc66d/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
index 14ec5f3..b6b4c10 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
@@ -5450,6 +5450,8 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           IntWrapper.IntWrapperSerializer serializer =
             (IntWrapper.IntWrapperSerializer)
             DataSerializer.register(c);
+          getLogWriter().info("Registered serializer id:" + serializer.getId()
+              + " class:" + c.getName());
 
           Region region = getRootRegion().getSubregion(name);
           region.put(key, new IntWrapper(intValue));


[16/50] [abbrv] incubator-geode git commit: GEODE-429: Remove api for setting HdfsStore in Attributes

Posted by ds...@apache.org.
GEODE-429: Remove api for setting HdfsStore in Attributes


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/07d55bda
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/07d55bda
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/07d55bda

Branch: refs/heads/develop
Commit: 07d55bda1c1c9d641ca16b3b6804994ecb53bf9d
Parents: 8fb5edd
Author: Ashvin Agrawal <as...@apache.org>
Authored: Tue Oct 20 09:28:06 2015 -0700
Committer: Ashvin Agrawal <as...@apache.org>
Committed: Wed Oct 21 08:55:23 2015 -0700

----------------------------------------------------------------------
 .../gemfire/cache/AttributesFactory.java        | 31 --------------------
 .../java/com/gemstone/gemfire/cache/Cache.java  |  2 --
 .../gemstone/gemfire/cache/GemFireCache.java    | 10 -------
 .../admin/remote/RemoteRegionAttributes.java    |  2 +-
 .../internal/cache/xmlcache/CacheCreation.java  |  6 ----
 5 files changed, 1 insertion(+), 50 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/07d55bda/gemfire-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java
index 7acd72a..406e596 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/AttributesFactory.java
@@ -20,7 +20,6 @@ import com.gemstone.gemfire.GemFireIOException;
 import com.gemstone.gemfire.cache.client.ClientCache;
 import com.gemstone.gemfire.cache.client.ClientRegionShortcut;
 import com.gemstone.gemfire.cache.client.PoolManager;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
 import com.gemstone.gemfire.compression.Compressor;
 import com.gemstone.gemfire.internal.cache.AbstractRegion;
 import com.gemstone.gemfire.internal.cache.CustomEvictionAttributesImpl;
@@ -456,7 +455,6 @@ public class AttributesFactory<K,V> {
     this.regionAttributes.multicastEnabled = regionAttributes.getMulticastEnabled();
     this.regionAttributes.gatewaySenderIds = new CopyOnWriteArraySet<String>(regionAttributes.getGatewaySenderIds());
     this.regionAttributes.asyncEventQueueIds = new CopyOnWriteArraySet<String>(regionAttributes.getAsyncEventQueueIds());
-    this.regionAttributes.hdfsStoreName = regionAttributes.getHDFSStoreName();
     this.regionAttributes.isLockGrantor = regionAttributes.isLockGrantor(); // fix for bug 47067
     if (regionAttributes instanceof UserSpecifiedRegionAttributes) {
       this.regionAttributes.setIndexes(((UserSpecifiedRegionAttributes<K,V>) regionAttributes).getIndexes());
@@ -483,10 +481,6 @@ public class AttributesFactory<K,V> {
     }
     
     this.regionAttributes.compressor = regionAttributes.getCompressor();
-    this.regionAttributes.hdfsWriteOnly = regionAttributes.getHDFSWriteOnly();
-    if (regionAttributes instanceof UserSpecifiedRegionAttributes) {
-      this.regionAttributes.setHasHDFSWriteOnly(((UserSpecifiedRegionAttributes<K,V>) regionAttributes).hasHDFSWriteOnly());
-    }
     this.regionAttributes.offHeap = regionAttributes.getOffHeap();
   }
 
@@ -1288,31 +1282,6 @@ public class AttributesFactory<K,V> {
   }
   
   /**
-   * Sets the HDFSStore name attribute.
-   * This causes the region to use the {@link HDFSStore}.
-   * @param name the name of the HDFSstore
-   */
-  public void setHDFSStoreName(String name) {
-    //TODO:HDFS throw an exception if the region is already configured for a disk store and 
-    // vice versa
-    this.regionAttributes.hdfsStoreName = name;
-    this.regionAttributes.setHasHDFSStoreName(true);
-  }
-  
-  /**
-   * Sets the HDFS write only attribute. if the region
-   * is configured to be write only to HDFS, events that have 
-   * been evicted from memory cannot be read back from HDFS.
-   * Events are written to HDFS in the order in which they occurred.
-   */
-  public void setHDFSWriteOnly(boolean writeOnly) {
-    //TODO:HDFS throw an exception if the region is already configured for a disk store and 
-    // vice versa
-    this.regionAttributes.hdfsWriteOnly = writeOnly;
-    this.regionAttributes.setHasHDFSWriteOnly(true);
-  }
-  
-  /**
    * Sets this region's compressor for compressing entry values.
    * @since 8.0
    * @param compressor a compressor.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/07d55bda/gemfire-core/src/main/java/com/gemstone/gemfire/cache/Cache.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/Cache.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/Cache.java
index c6495d0..63e8041 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/Cache.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/Cache.java
@@ -16,7 +16,6 @@ import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueue;
 import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueueFactory;
 import com.gemstone.gemfire.cache.client.ClientCache;
 import com.gemstone.gemfire.cache.client.Pool;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
 import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.cache.snapshot.CacheSnapshotService;
 import com.gemstone.gemfire.cache.util.GatewayConflictResolver;
@@ -27,7 +26,6 @@ import com.gemstone.gemfire.cache.wan.GatewaySenderFactory;
 import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.i18n.LogWriterI18n;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
 
 
 /** 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/07d55bda/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
index b948c5d..18455c7 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/GemFireCache.java
@@ -18,8 +18,6 @@ import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.cache.client.ClientCache;
 import com.gemstone.gemfire.cache.client.ClientCacheFactory;
 import com.gemstone.gemfire.cache.control.ResourceManager;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
 import com.gemstone.gemfire.cache.lucene.LuceneService;
 import com.gemstone.gemfire.cache.wan.GatewaySenderFactory;
 import com.gemstone.gemfire.distributed.DistributedSystem;
@@ -259,12 +257,4 @@ public interface GemFireCache extends RegionService {
    * @since 8.5
    */
   public LuceneService getLuceneService();
-	
-  /**
-   * Returns the HDFSStore by name or <code>null</code> if no HDFSStore is
-   * found.
-   * 
-   * @param name the name of the HDFSStore to find.
-   */
-  public HDFSStore findHDFSStore(String name);
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/07d55bda/gemfire-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java
index 0aa40a7..5c08516 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteRegionAttributes.java
@@ -152,7 +152,7 @@ public class RemoteRegionAttributes implements RegionAttributes,
     this.isDiskSynchronous = attr.isDiskSynchronous();
     this.gatewaySendersDescs = getDescs(attr.getGatewaySenderIds().toArray());
     this.asyncEventQueueDescs = getDescs(attr.getAsyncEventQueueIds().toArray());
-	this.hdfsStoreName = attr.getHDFSStoreName();
+  	this.hdfsStoreName = attr.getHDFSStoreName();
     this.hdfsWriteOnly = attr.getHDFSWriteOnly();
     this.compressorDesc = getDesc(attr.getCompressor());
     this.offHeap = attr.getOffHeap();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/07d55bda/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
index e4bea7f..13eea93 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheCreation.java
@@ -83,7 +83,6 @@ import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.i18n.LogWriterI18n;
 import com.gemstone.gemfire.internal.Assert;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
 import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
 import com.gemstone.gemfire.cache.hdfs.internal.HDFSIntegrationUtil;
 import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreCreation;
@@ -1378,11 +1377,6 @@ public class CacheCreation implements InternalCache, Extensible<Cache> {
   }
   
   @Override
-  public HDFSStore findHDFSStore(String storeName) {
-    return (HDFSStore)this.hdfsStores.get(storeName);
-  }
-
-  @Override
   public Collection<HDFSStoreImpl> getHDFSStores() {
     return this.hdfsStores.values();
   }


[26/50] [abbrv] incubator-geode git commit: [GEODE-409] - Fixing CacheXml41DUnitTest.testBridgeServers test by using InetSocketAddress ephemeral port set to 0 and changing the cacheServer comparison to ignore port check if it's configured to 0.

Posted by ds...@apache.org.
[GEODE-409] - Fixing CacheXml41DUnitTest.testBridgeServers test by using
InetSocketAddress ephemeral port set to 0 and changing the cacheServer comparison
to ignore port check if it's configured to 0.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/e792e29d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/e792e29d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/e792e29d

Branch: refs/heads/develop
Commit: e792e29d1d3a5d5414a9fc976182d6ec23aa0fc1
Parents: 25bce96
Author: William Markito <wm...@pivotal.io>
Authored: Wed Oct 21 13:30:10 2015 -0700
Committer: William Markito <wm...@pivotal.io>
Committed: Wed Oct 21 13:32:50 2015 -0700

----------------------------------------------------------------------
 .../cache/xmlcache/CacheServerCreation.java     | 24 ++++++++++++++------
 .../gemfire/cache30/CacheXml40DUnitTest.java    | 16 +++++++++----
 2 files changed, 28 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e792e29d/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheServerCreation.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheServerCreation.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheServerCreation.java
index d961d14..d555798 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheServerCreation.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/CacheServerCreation.java
@@ -7,11 +7,6 @@
  */
 package com.gemstone.gemfire.internal.cache.xmlcache;
 
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Set;
-
-import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.ClientSession;
 import com.gemstone.gemfire.cache.InterestRegistrationListener;
 import com.gemstone.gemfire.cache.server.CacheServer;
@@ -21,6 +16,10 @@ import com.gemstone.gemfire.internal.cache.AbstractCacheServer;
 import com.gemstone.gemfire.internal.cache.InternalCache;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
 
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Set;
+
 /**
  * Represents a {@link CacheServer} that is created declaratively.
  *
@@ -166,8 +165,7 @@ public class CacheServerCreation extends AbstractCacheServer {
   public boolean sameAs(CacheServer other) {
     ClientSubscriptionConfig cscThis = this.getClientSubscriptionConfig();
     ClientSubscriptionConfig cscOther = other.getClientSubscriptionConfig();
-    boolean result = 
-        this.getPort() == other.getPort() &&
+    boolean result = isCacheServerPortEquals(other) &&
         this.getSocketBufferSize() == other.getSocketBufferSize() &&
         this.getMaximumTimeBetweenPings() == other.getMaximumTimeBetweenPings() &&
         this.getNotifyBySubscription() == other.getNotifyBySubscription() &&
@@ -187,6 +185,18 @@ public class CacheServerCreation extends AbstractCacheServer {
     return result;
   }
 
+  /**
+   * Compare configured cacheServer port against the running cacheServer port.
+   * If the current cacheServer port is set to 0 a random ephemeral
+   * port will be used so there is no need to compare returning <code>true</code>.
+   * If a port is specified, return the proper comparison.
+   * @param other CacheServer
+   * @return
+   */
+  private boolean isCacheServerPortEquals(CacheServer other) {
+    return (this.getPort() == 0) ? true : this.getPort() == other.getPort();
+  }
+
   @Override
   public String toString()
   {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e792e29d/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml40DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml40DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml40DUnitTest.java
index acaf91a..49b6149 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml40DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml40DUnitTest.java
@@ -7,10 +7,14 @@
  */
 package com.gemstone.gemfire.cache30;
 
-import com.gemstone.gemfire.cache.*;
+import com.gemstone.gemfire.cache.CacheException;
+import com.gemstone.gemfire.cache.MirrorType;
+import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.server.CacheServer;
-import com.gemstone.gemfire.internal.AvailablePortHelper;
-import com.gemstone.gemfire.internal.cache.xmlcache.*;
+import com.gemstone.gemfire.internal.cache.xmlcache.CacheCreation;
+import com.gemstone.gemfire.internal.cache.xmlcache.CacheTransactionManagerCreation;
+import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
+import com.gemstone.gemfire.internal.cache.xmlcache.RegionAttributesCreation;
 
 /**
  * Tests the declarative caching functionality introduced in GemFire
@@ -58,8 +62,9 @@ public class CacheXml40DUnitTest extends CacheXml30DUnitTest {
 
     CacheServer bridge1 = cache.addCacheServer();
     setBridgeAttributes(bridge1);
+
     CacheServer bridge2 = cache.addCacheServer();
-    bridge2.setPort(AvailablePortHelper.getRandomAvailableTCPPort());
+    setBridgeAttributes(bridge2);
 
     testXml(cache);
   }
@@ -70,7 +75,8 @@ public class CacheXml40DUnitTest extends CacheXml30DUnitTest {
    */
   public void setBridgeAttributes(CacheServer bridge1)
   {
-    bridge1.setPort(AvailablePortHelper.getRandomAvailableTCPPort());
+    //@see http://docs.oracle.com/javase/7/docs/api/java/net/InetSocketAddress.html#InetSocketAddress(int)
+    bridge1.setPort(0);
   }
 
   /**


[43/50] [abbrv] incubator-geode git commit: GEODE-392: Using CacheTestCase.cache in DistTXDebugDUnitTest

Posted by ds...@apache.org.
GEODE-392: Using CacheTestCase.cache in DistTXDebugDUnitTest

This test was was extending CacheTestCase, but it had it's own static
cache. That means that in certain cases CacheTestCase's code to close
the cache nicely before removing the disk stores files wasn't being
invoked, if CacheTestCase had a static reference to a closed cache from
a previous test.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/ca4991d8
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/ca4991d8
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/ca4991d8

Branch: refs/heads/develop
Commit: ca4991d89296db3bbba2adcfede517311971cd6e
Parents: 20c39d7
Author: Dan Smith <up...@apache.org>
Authored: Fri Oct 23 10:52:13 2015 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Fri Oct 23 11:01:51 2015 -0700

----------------------------------------------------------------------
 .../gemfire/disttx/DistTXDebugDUnitTest.java    | 13 +------------
 .../disttx/DistTXPersistentDebugDUnitTest.java  | 20 ++++++++++++++++++++
 2 files changed, 21 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ca4991d8/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXDebugDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXDebugDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXDebugDUnitTest.java
index 668f1e6..84dd2dc 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXDebugDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXDebugDUnitTest.java
@@ -37,7 +37,6 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
   VM dataStore1 = null;
   VM dataStore2 = null;
   VM dataStore3 = null;
-  protected static Cache cache = null;
 
   public DistTXDebugDUnitTest(String name) {
     super(name);
@@ -62,20 +61,10 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
       }
     });
     InternalResourceManager.setResourceObserver(null);
-    if (cache != null) {
-      cache.close();
-    }
   }
 
   public static void createCacheInVm() {
-    Properties props = new Properties();
-//    props.setProperty(DistributionConfig.LOG_LEVEL_NAME, "fine");
-    // CacheFactory cf = new CacheFactory(props);
-    // new TxDUnit("temp").getCache(cf);
-    // new TxDUnit("temp").getCache();
-
-    InternalDistributedSystem ds = new DistTXDebugDUnitTest("temp").getSystem(props);
-    cache = CacheFactory.create(ds);
+    new DistTXDebugDUnitTest("temp").getCache();
   }
 
   protected void createCacheInAllVms() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ca4991d8/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXPersistentDebugDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXPersistentDebugDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXPersistentDebugDUnitTest.java
index 48f933c..1824caa 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXPersistentDebugDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXPersistentDebugDUnitTest.java
@@ -8,11 +8,31 @@ import com.gemstone.gemfire.cache.DiskStore;
 import com.gemstone.gemfire.cache.PartitionAttributesFactory;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.RegionAttributes;
+import com.gemstone.gemfire.distributed.DistributedMemberDUnitTest;
+import com.gemstone.gemfire.distributed.DistributedSystemDUnitTest;
+import com.gemstone.gemfire.distributed.LocatorDUnitTest;
+import com.gemstone.gemfire.distributed.RoleDUnitTest;
+import com.gemstone.gemfire.distributed.SystemAdminDUnitTest;
+import com.gemstone.gemfire.distributed.internal.Bug40751DUnitTest;
+import com.gemstone.gemfire.distributed.internal.ConsoleDistributionManagerDUnitTest;
+import com.gemstone.gemfire.distributed.internal.DistributionAdvisorDUnitTest;
+import com.gemstone.gemfire.distributed.internal.DistributionManagerDUnitTest;
+import com.gemstone.gemfire.distributed.internal.GemFireTimeSyncServiceDUnitTest;
+import com.gemstone.gemfire.distributed.internal.ProductUseLogDUnitTest;
+import com.gemstone.gemfire.distributed.internal.deadlock.GemFireDeadlockDetectorDUnitTest;
+import com.gemstone.gemfire.distributed.internal.streaming.StreamingOperationManyDUnitTest;
+import com.gemstone.gemfire.distributed.internal.streaming.StreamingOperationOneDUnitTest;
 import com.gemstone.gemfire.internal.cache.TXManagerImpl;
 import com.gemstone.gemfire.internal.cache.execute.data.CustId;
 import com.gemstone.gemfire.internal.cache.execute.data.Customer;
+import com.gemstone.gemfire.pdx.DistributedSystemIdDUnitTest;
+import com.gemstone.gemfire.pdx.JSONPdxClientServerDUnitTest;
+import com.gemstone.gemfire.pdx.PdxDeserializationDUnitTest;
+import com.gemstone.gemfire.pdx.PdxSerializableDUnitTest;
 
 import dunit.SerializableCallable;
+import junit.framework.Test;
+import junit.framework.TestSuite;
 
 public class DistTXPersistentDebugDUnitTest extends DistTXDebugDUnitTest {
 


[27/50] [abbrv] incubator-geode git commit: Previous commit message should be GEODE-449 instead of GEODE-409

Posted by ds...@apache.org.
Previous commit message should be GEODE-449 instead of GEODE-409


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/937134b6
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/937134b6
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/937134b6

Branch: refs/heads/develop
Commit: 937134b6b2e4b1c7b0a5bcb4a05630409aa970d8
Parents: e792e29
Author: William Markito <wm...@pivotal.io>
Authored: Wed Oct 21 13:40:03 2015 -0700
Committer: William Markito <wm...@pivotal.io>
Committed: Wed Oct 21 13:40:03 2015 -0700

----------------------------------------------------------------------

----------------------------------------------------------------------



[15/50] [abbrv] incubator-geode git commit: GEODE-429: Remove HDFS RegionShortcuts

Posted by ds...@apache.org.
GEODE-429: Remove HDFS RegionShortcuts


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/b3f838ea
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/b3f838ea
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/b3f838ea

Branch: refs/heads/develop
Commit: b3f838ea6a0b0eb150dcb92b7f6e46e5ee9db1e4
Parents: ef5d9e2
Author: Ashvin Agrawal <as...@apache.org>
Authored: Mon Oct 19 11:55:53 2015 -0700
Committer: Ashvin Agrawal <as...@apache.org>
Committed: Wed Oct 21 08:55:22 2015 -0700

----------------------------------------------------------------------
 .../gemstone/gemfire/cache/RegionShortcut.java  | 50 --------------------
 .../internal/cache/GemFireCacheImpl.java        | 42 ----------------
 .../hdfs/internal/HDFSConfigJUnitTest.java      |  8 ++--
 .../hdfs/internal/HDFSEntriesSetJUnitTest.java  |  2 +-
 .../internal/hoplog/BaseHoplogTestCase.java     |  2 +-
 ...FSQueueRegionOperationsOffHeapJUnitTest.java |  2 +-
 .../cache/HDFSRegionOperationsJUnitTest.java    |  2 +-
 .../HDFSRegionOperationsOffHeapJUnitTest.java   |  2 +-
 .../HDFSRegionMBeanAttributeJUnitTest.java      |  2 +-
 9 files changed, 10 insertions(+), 102 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3f838ea/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionShortcut.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionShortcut.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionShortcut.java
index ae3cbdb..5000032 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionShortcut.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/RegionShortcut.java
@@ -226,54 +226,4 @@ public enum RegionShortcut {
    * The actual RegionAttributes for a REPLICATE_PROXY region set the {@link DataPolicy} to {@link DataPolicy#EMPTY} and {@link Scope} to {@link Scope#DISTRIBUTED_ACK}.
    */
   REPLICATE_PROXY,  
-  
-  /**
-   * A PARTITION_HDFS has local state that is partitioned across each peer member 
-   * that created the region. 
-   * In addition its state is written to HDFS.
-   * The random access to the data in HDFS is also enabled. 
-   * The actual RegionAttributes for a PARTITION_HDFS region set the {@link DataPolicy} to {@link DataPolicy#HDFS_PARTITION}.
-   * The HDFS event queue's property random-access is set to true. 
-   * The {@link EvictionAttributes} are set to {@link EvictionAlgorithm#LRU_HEAP}
-   * with {@link EvictionAction#OVERFLOW_TO_DISK}.
-   */
-  PARTITION_HDFS,  
-  
-  /**
-   * A PARTITION_REDUNDANT_HDFS has local state that is partitioned across each peer member 
-   * that created the region. 
-   * In addition its state is written to HDFS and recovered from HDFS when the region is 
-   * created. The random access to the data in HDFS is also enabled. 
-   * In addition an extra copy of the data is kept in memory.
-   * The actual RegionAttributes for a PARTITION_REDUNDANT_HDFS region set the {@link DataPolicy} to {@link DataPolicy#HDFS_PARTITION} 
-   * and the redundant-copies to 1. The HDFS event queue's property random-access is set to true.
-   * The {@link EvictionAttributes} are set to {@link EvictionAlgorithm#LRU_HEAP}
-   * with {@link EvictionAction#OVERFLOW_TO_DISK}.
-   */
-  PARTITION_REDUNDANT_HDFS,  
-  
-  /**
-   * A PARTITION_WRITEONLY_HDFS_STORE has local state that is partitioned across each peer member 
-   * that created the region. 
-   * In addition its state is written to HDFS and recovered from HDFS when the region is 
-   * created. The random access to the data in HDFS is disabled. 
-   * The actual RegionAttributes for a PARTITION_WRITEONLY_HDFS_STORE region set the {@link DataPolicy} to {@link DataPolicy#HDFS_PARTITION}. 
-   * The HDFS event queue's property write only is set as true. 
-   * The {@link EvictionAttributes} are set to {@link EvictionAlgorithm#LRU_HEAP}
-   * with {@link EvictionAction#OVERFLOW_TO_DISK}.
-   */
-  PARTITION_WRITEONLY_HDFS_STORE,  
-  
-  /**
-   * A PARTITION_REDUNDANT_WRITEONLY_HDFS_STORE has local state that is partitioned across each peer member 
-   * that created the region. 
-   * In addition its state is written to HDFS and recovered from HDFS when the region is 
-   * created. The random access to the data in HDFS is disabled. 
-   * In addition an extra copy of the data is kept in memory.
-   * The actual RegionAttributes for a PARTITION_REDUNDANT_WRITEONLY_HDFS_STORE region set the {@link DataPolicy} to {@link DataPolicy#HDFS_PARTITION} 
-   * and the redundant-copies to 1. The HDFS event queue's property write only is set as true.
-   * The {@link EvictionAttributes} are set to {@link EvictionAlgorithm#LRU_HEAP}
-   * with {@link EvictionAction#OVERFLOW_TO_DISK}.
-   */
-  PARTITION_REDUNDANT_WRITEONLY_HDFS_STORE
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3f838ea/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
index 4bf0f42..0d4961b 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java
@@ -4916,48 +4916,6 @@ public class GemFireCacheImpl implements InternalCache, ClientCache, HasCachePer
         c.setRegionAttributes(pra.toString(), af.create());
         break;
       }
-      case PARTITION_HDFS: {
-    	  AttributesFactory af = new AttributesFactory();
-          af.setDataPolicy(DataPolicy.HDFS_PARTITION);
-          PartitionAttributesFactory paf = new PartitionAttributesFactory();
-          af.setPartitionAttributes(paf.create());
-          af.setEvictionAttributes(EvictionAttributes.createLRUHeapAttributes(null, EvictionAction.OVERFLOW_TO_DISK));
-          af.setHDFSWriteOnly(false);
-          c.setRegionAttributes(pra.toString(), af.create());
-          break;
-        }
-      case PARTITION_REDUNDANT_HDFS: {
-    	  AttributesFactory af = new AttributesFactory();
-          af.setDataPolicy(DataPolicy.HDFS_PARTITION);
-          PartitionAttributesFactory paf = new PartitionAttributesFactory();
-          paf.setRedundantCopies(1);
-          af.setPartitionAttributes(paf.create());
-          af.setEvictionAttributes(EvictionAttributes.createLRUHeapAttributes(null, EvictionAction.OVERFLOW_TO_DISK));
-          af.setHDFSWriteOnly(false);
-          c.setRegionAttributes(pra.toString(), af.create());
-          break;
-        }
-      case PARTITION_WRITEONLY_HDFS_STORE: {
-        AttributesFactory af = new AttributesFactory();
-          af.setDataPolicy(DataPolicy.HDFS_PARTITION);
-          PartitionAttributesFactory paf = new PartitionAttributesFactory();
-          af.setPartitionAttributes(paf.create());
-          af.setEvictionAttributes(EvictionAttributes.createLRUHeapAttributes(null, EvictionAction.OVERFLOW_TO_DISK));
-          af.setHDFSWriteOnly(true);
-          c.setRegionAttributes(pra.toString(), af.create());
-          break;
-        }
-      case PARTITION_REDUNDANT_WRITEONLY_HDFS_STORE: {
-        AttributesFactory af = new AttributesFactory();
-          af.setDataPolicy(DataPolicy.HDFS_PARTITION);
-          PartitionAttributesFactory paf = new PartitionAttributesFactory();
-          paf.setRedundantCopies(1);
-          af.setPartitionAttributes(paf.create());
-          af.setEvictionAttributes(EvictionAttributes.createLRUHeapAttributes(null, EvictionAction.OVERFLOW_TO_DISK));
-          af.setHDFSWriteOnly(true);
-          c.setRegionAttributes(pra.toString(), af.create());
-          break;
-        }
       default:
         throw new IllegalStateException("unhandled enum " + pra);
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3f838ea/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
index a1c9eb1..b0c6520 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
@@ -71,7 +71,7 @@ public class HDFSConfigJUnitTest extends TestCase {
       try {
         HDFSStoreFactory hsf = this.c.createHDFSStoreFactory();
         HDFSStore store = hsf.create("myHDFSStore");
-        RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+        RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
         Region r1 = rf1.setHDFSStoreName("myHDFSStore").create("r1");
        
         r1.put("k1", "v1");
@@ -89,7 +89,7 @@ public class HDFSConfigJUnitTest extends TestCase {
         hsf = this.c.createHDFSStoreFactory();
         hsf.create("myHDFSStore");
         
-        r1 = this.c.createRegionFactory(RegionShortcut.PARTITION_WRITEONLY_HDFS_STORE).setHDFSStoreName("myHDFSStore")
+        r1 = this.c.createRegionFactory(RegionShortcut.PARTITION).setHDFSStoreName("myHDFSStore")
               .create("r1");
        
         r1.put("k1", "v1");
@@ -126,7 +126,7 @@ public class HDFSConfigJUnitTest extends TestCase {
         hsf.create("myHDFSStore");
         
         
-        r1 = this.c.createRegionFactory(RegionShortcut.PARTITION_WRITEONLY_HDFS_STORE).setHDFSStoreName("myHDFSStore")
+        r1 = this.c.createRegionFactory(RegionShortcut.PARTITION).setHDFSStoreName("myHDFSStore")
             .setHDFSWriteOnly(true).create("r1");
        
         r1.put("k1", "v1");
@@ -467,7 +467,7 @@ public class HDFSConfigJUnitTest extends TestCase {
       float percentage = 100 * (float) blockCacheSize / (float) heapSize;
       hsf.setBlockCacheSize(percentage);
       HDFSStoreImpl store = (HDFSStoreImpl) hsf.create("myHDFSStore");
-      RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+      RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
       //Create a region that evicts everything
       LocalRegion r1 = (LocalRegion) rf1.setHDFSStoreName("myHDFSStore").setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(1)).create("r1");
      

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3f838ea/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
index 75dfa93..f864176 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSEntriesSetJUnitTest.java
@@ -72,7 +72,7 @@ public class HDFSEntriesSetJUnitTest extends TestCase {
     PartitionAttributesFactory paf = new PartitionAttributesFactory();
     paf.setTotalNumBuckets(1);
     
-    RegionFactory rf = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+    RegionFactory rf = cache.createRegionFactory(RegionShortcut.PARTITION);
     region = (PartitionedRegion) rf.setHDFSStoreName("test").setPartitionAttributes(paf.create()).create("test");
     
     // prime the region so buckets get created

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3f838ea/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
index eb713c0..b35f756 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/hoplog/BaseHoplogTestCase.java
@@ -89,7 +89,7 @@ public abstract class BaseHoplogTestCase extends TestCase {
     configureHdfsStoreFactory();
     hdfsStore = (HDFSStoreImpl) hsf.create(HDFS_STORE_NAME);
 
-    regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+    regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION);
     regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
     region = regionfactory.create(getName());
     

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3f838ea/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
index f28c138..4565568 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
@@ -33,7 +33,7 @@ public class HDFSQueueRegionOperationsOffHeapJUnitTest extends HDFSQueueRegionOp
   }
   @Override
   protected Region<Integer, String> createRegion(String regionName) {
-    RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+    RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION);
     PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
     rf.setPartitionAttributes(prAttr);
     rf.setOffHeap(true);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3f838ea/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
index 50b213a..b24ee5d 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
@@ -105,7 +105,7 @@ public class HDFSRegionOperationsJUnitTest extends TestCase {
   }
 
   protected Region<Integer, String> createRegion(String regionName) {
-    RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+    RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION);
     PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
     rf.setPartitionAttributes(prAttr);
     rf.setHDFSStoreName(hdfsStore.getName());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3f838ea/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
index 421cd28..f9c96a2 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
@@ -55,7 +55,7 @@ public class HDFSRegionOperationsOffHeapJUnitTest extends HDFSRegionOperationsJU
   }
   @Override
   protected Region<Integer, String> createRegion(String regionName) {
-    RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+    RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION);
     PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
     rf.setPartitionAttributes(prAttr);
     rf.setOffHeap(true);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b3f838ea/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
index 38145d1..c563d5a 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/HDFSRegionMBeanAttributeJUnitTest.java
@@ -76,7 +76,7 @@ public class HDFSRegionMBeanAttributeJUnitTest extends TestCase {
     configureHdfsStoreFactory();
     hdfsStore = (HDFSStoreImpl) hsf.create(HDFS_STORE_NAME);
 
-    RegionFactory<Object, Object> regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
+    RegionFactory<Object, Object> regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION);
     regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
 
     // regionfactory.setCompressionCodec("Some");


[29/50] [abbrv] incubator-geode git commit: GEODE-408: Fixed race condition in tests

Posted by ds...@apache.org.
GEODE-408: Fixed race condition in tests

- For local functions stats might be checked before they are actually
  updated.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/ec307d2c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/ec307d2c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/ec307d2c

Branch: refs/heads/develop
Commit: ec307d2cb3e6c21fe04abf8a000189e5dd2fcafe
Parents: 5c7bbd0
Author: Jens Deppe <jd...@gopivotal.com>
Authored: Wed Oct 21 14:01:40 2015 -0700
Committer: Jens Deppe <jd...@gopivotal.com>
Committed: Wed Oct 21 16:02:33 2015 -0700

----------------------------------------------------------------------
 .../cache/execute/FunctionServiceStats.java     |  3 +-
 .../execute/FunctionServiceStatsDUnitTest.java  | 43 ++++++++++++++++++--
 2 files changed, 40 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ec307d2c/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/execute/FunctionServiceStats.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/execute/FunctionServiceStats.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/execute/FunctionServiceStats.java
index 26ebf5e..f491f7d 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/execute/FunctionServiceStats.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/execute/FunctionServiceStats.java
@@ -13,7 +13,6 @@ import com.gemstone.gemfire.StatisticsFactory;
 import com.gemstone.gemfire.StatisticsType;
 import com.gemstone.gemfire.StatisticsTypeFactory;
 import com.gemstone.gemfire.distributed.internal.DistributionStats;
-import com.gemstone.gemfire.internal.NanoTimer;
 import com.gemstone.gemfire.internal.StatisticsTypeFactoryImpl;
 
 public class FunctionServiceStats {
@@ -406,7 +405,7 @@ public class FunctionServiceStats {
       // Increment function execution with haveResult = true complete processing time
       this._stats.incLong(_functionExecutionsHasResultCompletedProcessingTimeId, elapsed);
     }
-    
+
   }
   
   /**

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ec307d2c/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/FunctionServiceStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/FunctionServiceStatsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/FunctionServiceStatsDUnitTest.java
index 20b1e00..3cf714d 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/FunctionServiceStatsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/FunctionServiceStatsDUnitTest.java
@@ -155,7 +155,26 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
       return Boolean.TRUE;
     }
   };
-  
+
+  /*
+   * This helper method prevents race conditions in local functions. Typically, when
+   * calling ResultCollector.getResult() one might expect the function to have completed.
+   * For local functions this is true, however, at this point the function stats may
+   * not have been updated yet thus any code which checks stats after calling getResult()
+   * may get wrong data.
+   */
+  private void waitNoFunctionsRunning(FunctionServiceStats stats) {
+    int count = 100;
+    while (stats.getFunctionExecutionsRunning() > 0 && count > 0) {
+      count--;
+      try {
+        Thread.sleep(50);
+      } catch (InterruptedException ex) {
+        // Ignored
+      }
+    }
+  }
+
   /*
    * 1-client 3-Servers 
    * Function : TEST_FUNCTION2 
@@ -250,6 +269,8 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
         InternalDistributedSystem iDS = (InternalDistributedSystem)cache.getDistributedSystem();
         FunctionServiceStats functionServiceStats = iDS
             .getFunctionServiceStats();
+        waitNoFunctionsRunning(functionServiceStats);
+
         assertEquals(noOfExecutionCalls_Aggregate, functionServiceStats
             .getFunctionExecutionCalls());
         assertEquals(noOfExecutionsCompleted_Aggregate, functionServiceStats
@@ -286,7 +307,9 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
             .getDistributedSystem();
         FunctionServiceStats functionServiceStats = iDS
             .getFunctionServiceStats();
-        //functions are executed 3 times 
+        waitNoFunctionsRunning(functionServiceStats);
+
+        //functions are executed 3 times
         noOfExecutionCalls_Aggregate +=3;
         assertTrue(functionServiceStats
             .getFunctionExecutionCalls() >= noOfExecutionCalls_Aggregate);
@@ -507,7 +530,8 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
             .getDistributedSystem();
         FunctionServiceStats functionServiceStats = iDS
             .getFunctionServiceStats();
-        
+        waitNoFunctionsRunning(functionServiceStats);
+
         assertEquals(noOfExecutionCalls_Aggregate, functionServiceStats
             .getFunctionExecutionCalls());
         assertEquals(noOfExecutionsCompleted_Aggregate, functionServiceStats
@@ -609,6 +633,8 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
             .getDistributedSystem();
         FunctionServiceStats functionServiceStats = iDS
             .getFunctionServiceStats();
+        waitNoFunctionsRunning(functionServiceStats);
+
         assertEquals(noOfExecutionCalls_Aggregate, functionServiceStats
             .getFunctionExecutionCalls());
         assertEquals(noOfExecutionsCompleted_Aggregate, functionServiceStats
@@ -646,6 +672,8 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
             .getDistributedSystem();
         FunctionServiceStats functionServiceStats = iDS
             .getFunctionServiceStats();
+        waitNoFunctionsRunning(functionServiceStats);
+
         // functions are executed 2 times
         noOfExecutionCalls_Aggregate += 2;
         assertEquals(noOfExecutionCalls_Aggregate, functionServiceStats
@@ -827,6 +855,8 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
         InternalDistributedSystem iDS = ((InternalDistributedSystem)getCache()
             .getDistributedSystem());
         FunctionServiceStats functionServiceStats = iDS.getFunctionServiceStats();
+        waitNoFunctionsRunning(functionServiceStats);
+
         assertEquals(noOfExecutionCalls_Aggregate, functionServiceStats
             .getFunctionExecutionCalls());
         assertEquals(noOfExecutionsCompleted_Aggregate, functionServiceStats
@@ -860,6 +890,8 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
             .getDistributedSystem());
         //3 Function Executions took place 
         FunctionServiceStats functionServiceStats = iDS.getFunctionServiceStats();
+        waitNoFunctionsRunning(functionServiceStats);
+
         noOfExecutionCalls_Aggregate += 3;
         noOfExecutionsCompleted_Aggregate += 3;
         assertEquals(noOfExecutionCalls_Aggregate, functionServiceStats
@@ -1119,7 +1151,8 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
         "checkFunctionExecutionStatsForMember1") {
       public Object call() throws Exception {
         FunctionServiceStats functionServiceStats = ds.getFunctionServiceStats();
-        
+        waitNoFunctionsRunning(functionServiceStats);
+
         assertEquals(noOfExecutionCalls_Aggregate, functionServiceStats
             .getFunctionExecutionCalls());
         assertEquals(noOfExecutionsCompleted_Aggregate, functionServiceStats
@@ -1140,6 +1173,8 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
         "checkFunctionExecutionStatsForOtherMember") {
       public Object call() throws Exception {
         FunctionServiceStats functionServiceStats = ds.getFunctionServiceStats();
+        waitNoFunctionsRunning(functionServiceStats);
+
         // One function Execution took place on there members
         //noOfExecutionCalls_Aggregate++;
         //noOfExecutionsCompleted_Aggregate++;


[48/50] [abbrv] incubator-geode git commit: removed debug logging accidently checked in

Posted by ds...@apache.org.
removed debug logging accidently checked in


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/daa0725d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/daa0725d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/daa0725d

Branch: refs/heads/develop
Commit: daa0725d8e108032ba39c8064271123813ee7655
Parents: 2aec6a5
Author: Darrel Schneider <ds...@pivotal.io>
Authored: Mon Oct 26 09:58:34 2015 -0700
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Mon Oct 26 09:58:34 2015 -0700

----------------------------------------------------------------------
 .../java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/daa0725d/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java
index 515a690..494efaf 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java
@@ -1649,7 +1649,6 @@ RETRY_LOOP:
                 retry = true;
                 continue RETRY_LOOP;
               }
-              logger.info("DARREL: destroy " + re.getKey() + " event.isOriginRemote()=" + event.isOriginRemote() + " event.getOperation().isExpiration()=" + event.getOperation().isExpiration() + " re.isInUseByTransaction()=" + re.isInUseByTransaction());
               if (!event.isOriginRemote() && event.getOperation().isExpiration()) {
                 // If this expiration started locally then only do it if the RE is not being used by a tx.
                 if (re.isInUseByTransaction()) {


[18/50] [abbrv] incubator-geode git commit: GEODE-429: Remove HDFS persistence DataPolicy

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSPersistenceBasicDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSPersistenceBasicDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSPersistenceBasicDUnitTest.java
deleted file mode 100644
index 2f0cb3f..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/RegionWithHDFSPersistenceBasicDUnitTest.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.File;
-
-import com.gemstone.gemfire.cache.AttributesFactory;
-import com.gemstone.gemfire.cache.DataPolicy;
-import com.gemstone.gemfire.cache.EvictionAction;
-import com.gemstone.gemfire.cache.EvictionAttributes;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-
-import dunit.SerializableCallable;
-
-@SuppressWarnings({ "serial", "rawtypes", "deprecation" })
-public class RegionWithHDFSPersistenceBasicDUnitTest extends
-    RegionWithHDFSBasicDUnitTest {
-
-  public RegionWithHDFSPersistenceBasicDUnitTest(String name) {
-    super(name);
-  }
-
-  @Override
-  protected SerializableCallable getCreateRegionCallable(final int totalnumOfBuckets,
-      final int batchSizeMB, final int maximumEntries, final String folderPath,
-      final String uniqueName, final int batchInterval, final boolean queuePersistent,
-      final boolean writeonly, final long timeForRollover, final long maxFileSize) {
-    SerializableCallable createRegion = new SerializableCallable() {
-      public Object call() throws Exception {
-        AttributesFactory af = new AttributesFactory();
-        af.setDataPolicy(DataPolicy.HDFS_PERSISTENT_PARTITION);
-        PartitionAttributesFactory paf = new PartitionAttributesFactory();
-        paf.setTotalNumBuckets(totalnumOfBuckets);
-        paf.setRedundantCopies(1);
-        
-        af.setHDFSStoreName(uniqueName);
-        
-        af.setPartitionAttributes(paf.create());
-        HDFSStoreFactory hsf = getCache().createHDFSStoreFactory();
-        // Going two level up to avoid home directories getting created in
-        // VM-specific directory. This avoids failures in those tests where
-        // datastores are restarted and bucket ownership changes between VMs.
-        homeDir = new File(tmpDir + "/../../" + folderPath).getCanonicalPath();
-        hsf.setHomeDir(homeDir);
-        hsf.setBatchSize(batchSizeMB);
-        hsf.setBufferPersistent(queuePersistent);
-        hsf.setMaxMemory(3);
-        hsf.setBatchInterval(batchInterval);
-        if (timeForRollover != -1) {
-          hsf.setWriteOnlyFileRolloverInterval((int)timeForRollover);
-          System.setProperty("gemfire.HDFSRegionDirector.FILE_ROLLOVER_TASK_INTERVAL_SECONDS", "1");
-        }
-        if (maxFileSize != -1) {
-          hsf.setWriteOnlyFileRolloverSize((int) maxFileSize);
-        }
-        hsf.create(uniqueName);
-        
-        af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY));
-        
-        af.setHDFSWriteOnly(writeonly);
-        Region r = createRootRegion(uniqueName, af.create());
-        ((LocalRegion)r).setIsTest();
-        
-        return 0;
-      }
-    };
-    return createRegion;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsJUnitTest.java
deleted file mode 100644
index 5e2ba4f..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsJUnitTest.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.internal.cache;
-
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-/**
- * Tests regions operations when entries are not yet persisted
- * in HDFS but are in HDFSAsyncQueue
- * @author sbawaska
- */
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSQueueRegionOperationsJUnitTest extends
-    HDFSRegionOperationsJUnitTest {
-
-  @Override
-  protected int getBatchTimeInterval() {
-    return 50*1000;
-  }
-
-  @Override
-  protected void sleep(String regionPath) {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
deleted file mode 100644
index 24cd1dc..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSQueueRegionOperationsOffHeapJUnitTest.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.internal.cache;
-
-import java.util.Properties;
-
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.PartitionAttributes;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionFactory;
-import com.gemstone.gemfire.cache.RegionShortcut;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSQueueRegionOperationsOffHeapJUnitTest extends HDFSQueueRegionOperationsJUnitTest {
-  static {
-    System.setProperty("gemfire.trackOffHeapRefCounts", "true");
-  }
-  
-  @Override
-  public void tearDown() throws Exception {
-    super.tearDown();
-    OffHeapTestUtil.checkOrphans();
-  }
-  @Override
-  protected Region<Integer, String> createRegion(String regionName) {
-    RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION);
-    PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
-    rf.setPartitionAttributes(prAttr);
-    rf.setOffHeap(true);
-//    rf.setHDFSStoreName(hdfsStore.getName());
-    Region<Integer, String> r = rf.create(regionName);
-//    addListener(r);
-    
-    ((PartitionedRegion) r).setQueryHDFS(true);
-    return r;
-  }
-  @Override
-  protected Properties getDSProps() {
-    Properties props = super.getDSProps();
-    props.setProperty("off-heap-memory-size", "50m");
-    return props;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
deleted file mode 100644
index d96e31b..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsJUnitTest.java
+++ /dev/null
@@ -1,542 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.internal.cache;
-
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.Properties;
-import java.util.Random;
-import java.util.Set;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.fs.Path;
-import org.junit.FixMethodOrder;
-import org.junit.experimental.categories.Category;
-import org.junit.runners.MethodSorters;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.DataPolicy;
-import com.gemstone.gemfire.cache.DiskStore;
-import com.gemstone.gemfire.cache.EvictionAction;
-import com.gemstone.gemfire.cache.EvictionAlgorithm;
-import com.gemstone.gemfire.cache.PartitionAttributes;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionAttributes;
-import com.gemstone.gemfire.cache.RegionDestroyedException;
-import com.gemstone.gemfire.cache.RegionFactory;
-import com.gemstone.gemfire.cache.RegionShortcut;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreFactoryImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
-import com.gemstone.gemfire.cache.hdfs.internal.cardinality.HyperLogLog;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
-import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceType;
-import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-/**
- * Tests that region operations work as expected when data is in HDFS.
- * This test explicitly clears in-memory ConcurrentHashMap that back
- * AbstractRegionMap before validating region operations.
- * 
- * @author sbawaska
- */
-@FixMethodOrder(MethodSorters.NAME_ASCENDING)
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSRegionOperationsJUnitTest extends TestCase {
-
-  protected Cache cache;
-  protected HDFSStore hdfsStore;
-
-  public void setUp() throws Exception {
-    Properties props = getDSProps();
-    cache = new CacheFactory(props).create();
-    System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");
-    String storeName = getName()+"-store";
-    HDFSStoreFactory hsf = cache.createHDFSStoreFactory();
-    hsf.setHomeDir(getName()+"-test");
-    hsf.setBatchInterval(getBatchTimeInterval());
-    hdfsStore = hsf.create(storeName);
-  }
-
-  protected Properties getDSProps() {
-    Properties props = new Properties();
-    props.put("mcast-port", "0");
-    props.put("locators", "");
-    props.put("log-level", "config");
-    return props;
-  }
-
-  public void tearDown() throws Exception {
-    for (Region r : cache.rootRegions()) {
-      if (r != null) {
-        r.close();
-      }
-    }
-
-    if (cache.getRegion(getName()) != null) {
-      cache.getRegion(getName()).destroyRegion();
-    }
-    DiskStore ds = cache.findDiskStore(null);
-    if (ds != null) {
-      ds.destroy();
-    }
-    
-    ((HDFSStoreImpl)hdfsStore).getFileSystem().delete(new Path(hdfsStore.getHomeDir()), true);
-  }
-
-  protected int getBatchTimeInterval() {
-    return 1000;
-  }
-
-  protected Region<Integer, String> createRegion(String regionName) {
-    RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION);
-    PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
-    rf.setPartitionAttributes(prAttr);
-//    rf.setHDFSStoreName(hdfsStore.getName());
-    Region<Integer, String> r = rf.create(regionName);
-    
-    ((PartitionedRegion) r).setQueryHDFS(true);
-    return r;
-  }
-
-  protected void clearBackingCHM(Region<Integer, String> r) {
-    PartitionedRegion pr = (PartitionedRegion)r;
-    for (BucketRegion br : pr.getDataStore().getAllLocalBucketRegions()) {
-      assertTrue(br.getRegionMap() instanceof HDFSRegionMap);
-      ((AbstractRegionMap)br.getRegionMap())._getMap().clear();
-      // wait here to make sure that the queue has been flushed
-    }
-    sleep(pr.getFullPath());
-  }
-
-  protected void sleep(String regionPath) {
-    String qname = HDFSStoreFactoryImpl.getEventQueueName(regionPath);
-    GemFireCacheImpl.getExisting().waitForSenderQueueFlush(qname, true, 30);
-  }
-
-  public void test010PUTDMLSupport() {
-    Region<Integer, String> r = createRegion(getName());
-    SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + getName());
-    assertEquals(0, stats.getRead().getCount());
-    for (int i=0; i<100; i++) {
-      r.put(i, "value"+i);
-    }
-    assertEquals(100, stats.getRead().getCount());
-    sleep(r.getFullPath());
-    clearBackingCHM(r);
-    LocalRegion lr = (LocalRegion) r;
-    for (int i=0; i<200; i++) {
-      EntryEventImpl ev = lr.newPutEntryEvent(i, "value"+i, null);
-      lr.validatedPut(ev, System.currentTimeMillis());
-    }
-    // verify that read count on HDFS does not change
-    assertEquals(100, stats.getRead().getCount());
-    sleep(r.getFullPath());
-    clearBackingCHM(r);
-    for (int i=0; i<200; i++) {
-      assertEquals("value"+i, r.get(i));
-    }
-    if (getBatchTimeInterval() > 1000) {
-      // reads from async queue
-      assertEquals(100, stats.getRead().getCount());
-    } else {
-      assertEquals(300, stats.getRead().getCount());
-    }
-  }
-
-  public void test020GetOperationalData() throws Exception {
-    Region<Integer, String> r = createRegion(getName());
-    SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + getName());
-    assertEquals(0, stats.getRead().getCount());
-    for (int i=0; i<100; i++) {
-      r.put(i, "value"+i);
-    }
-    int expectedReadsFromHDFS = 100;
-    assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
-    sleep(r.getFullPath());
-    clearBackingCHM(r);
-    LocalRegion lr = (LocalRegion) r;
-    for (int i=0; i<200; i++) {
-      if (i < 100) {
-        assertEquals("value"+i, r.get(i));
-      } else {
-        assertNull(r.get(i));
-      }
-    }
-    if (getBatchTimeInterval() > 1000) {
-      // reads from async queue
-      expectedReadsFromHDFS = 200; // initial 100 + 100 for misses
-    } else {
-      expectedReadsFromHDFS = 300; // initial 100 + 200 for reads
-    }
-    assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
-    for (int i=0; i<200; i++) {
-      assertNull(lr.get(i, null, true, false, false, null, null, false, false/*allowReadFromHDFS*/));
-    }
-    // no increase in HDFS reads
-    assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
-    
-    /**MergeGemXDHDFSToGFE Have not merged this API as this api is not called by any code*/ 
-    //   test the dataView API
-    //for (int i=0; i<200; i++) {
-    //  assertNull(lr.getDataView().getLocally(i, null, i%10, lr, true, true, null, null, false, false/*allowReadFromHDFS*/));
-    //}
-    // no increase in HDFS reads
-    assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
-  }
-  
-  public void test030RemoveOperationalData() throws Exception {
-    Region<Integer, String> r = createRegion(getName());
-    SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + getName());
-    assertEquals(0, stats.getRead().getCount());
-    for (int i=0; i<100; i++) {
-      r.put(i, "value"+i);
-    }
-    int expectedReadsFromHDFS = 100;
-    assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
-    sleep(r.getFullPath());
-    PartitionedRegion lr = (PartitionedRegion) r;
-    for(int i =0; i < 50; i++) {
-      lr.getBucketRegion(i).customEvictDestroy(i);
-    }
-    for (int i=0; i<200; i++) {
-      if (i < 100) {
-        assertEquals("value"+i, r.get(i));
-      } else {
-        assertNull(r.get(i));
-      }
-    }
-    if (getBatchTimeInterval() > 1000) {
-      // reads from async queue
-      expectedReadsFromHDFS = 200; // initial 100 + 100 for misses
-    } else {
-      expectedReadsFromHDFS = 250; // initial 100 + 200 for reads + 50 for 
-    }
-    assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
-    for (int i=0; i<50; i++) {
-      assertNull(lr.get(i, null, true, false, false, null,  null, false, false/*allowReadFromHDFS*/));
-    }
-    for (int i=50; i<100; i++) {
-      assertEquals("value"+i, lr.get(i, null, true, false, false, null,null, false, false/*allowReadFromHDFS*/));
-    }
-    for (int i=100; i<200; i++) {
-      assertNull(lr.get(i, null, true, false, false, null,  null, false, false/*allowReadFromHDFS*/));
-    }
-    // no increase in HDFS reads
-    assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
-  }
-
-  public void _test040NoAutoEviction() throws Exception {
-    if (!cache.isClosed()) {
-      tearDown();
-      cache.close();
-      System.setProperty("gemfire.disableAutoEviction", "true");
-      setUp();
-    }
-    Region<Integer, String> r = createRegion(getName());
-    System.setProperty("gemfire.disableAutoEviction", "false");
-    for (int i =0; i<5; i++) {
-      r.put(i, "value"+i);
-    }
-    PartitionedRegion pr = (PartitionedRegion) r;
-    BucketRegion br = pr.getBucketRegion(1);
-    assertNotNull(br.getAttributes().getEvictionAttributes());
-    assertEquals(EvictionAlgorithm.NONE, br.getAttributes().getEvictionAttributes().getAlgorithm());
-
-    GemFireCacheImpl cache = (GemFireCacheImpl) r.getCache();
-    assertEquals(0.0f, cache.getResourceManager().getEvictionHeapPercentage());
-  }
-
-  public void test050LRURegionAttributesForPR() {
-    RegionFactory<Integer, String> rf = cache.createRegionFactory();
-//    rf.setHDFSStoreName(hdfsStore.getName());
-    rf.setDataPolicy(DataPolicy.HDFS_PARTITION);
-    verifyLRURegionAttributesForPR(rf.create(getName()));
-  }
-
-  public void test060LRURegionAttributesForRegionShortcutPR() {
-    verifyLRURegionAttributesForPR(createRegion(getName()));
-  }
-
-  private void verifyLRURegionAttributesForPR(Region r) {
-    for (int i =0; i<200; i++) {
-      r.put(i, "value"+i);
-    }
-    RegionAttributes<Integer, String> ra = r.getAttributes();
-    assertNotNull(ra.getEvictionAttributes());
-    // default eviction action for region shortcut
-    assertEquals(EvictionAction.OVERFLOW_TO_DISK, ra.getEvictionAttributes().getAction());
-
-    GemFireCacheImpl cache = (GemFireCacheImpl) r.getCache();
-    assertEquals(80.0f, cache.getResourceManager().getEvictionHeapPercentage());
-    DiskStore ds = cache.findDiskStore(null);
-    assertNotNull(ds);
-    Set s = cache.getResourceManager().getResourceListeners(ResourceType.HEAP_MEMORY);
-    Iterator it = s.iterator();
-    boolean regionFound = false;
-    while (it.hasNext()) {
-      Object o = it.next();
-      if (o instanceof PartitionedRegion) {
-        PartitionedRegion pr = (PartitionedRegion) o;
-        if (getName().equals(pr.getName())) {
-          regionFound = true;
-        } else {
-          continue;
-        }
-        for (BucketRegion br : pr.getDataStore().getAllLocalBucketRegions()) {
-          assertNotNull(br.getAttributes().getEvictionAttributes());
-          assertEquals(EvictionAlgorithm.LRU_HEAP, br.getAttributes().getEvictionAttributes().getAlgorithm());
-          assertEquals(EvictionAction.OVERFLOW_TO_DISK, br.getAttributes().getEvictionAttributes().getAction());
-        }
-      }
-    }
-    assertTrue(regionFound);
-
-  }
-
-  public void test070SizeEstimate() {
-    Region<Integer, String> r = createRegion(getName());
-    int size = 226;
-    Random rand = new Random();
-    for (int i=0; i<size; i++) {
-      r.put(rand.nextInt(), "value"+i);
-    }
-    // size before flush
-    LocalRegion lr = (LocalRegion) r;
-    long estimate = lr.sizeEstimate();
-    double err = Math.abs(estimate - size) / (double) size;
-    // on a busy system flush might start before we call estimateSize, so rather than equality,
-    // test for error margin. fixes bug 49381
-    assertTrue("size:"+size+" estimate:"+estimate, err < 0.02 * 10); // each bucket can have an error of 0.02
-
-    // size after flush
-    sleep(r.getFullPath());
-    estimate = lr.sizeEstimate();
-    err = Math.abs(estimate - size) / (double) size;
-    assertTrue("size:"+size+" estimate:"+estimate, err < 0.02 * 10); // each bucket can have an error of 0.02
-  }
-
-  public void test080PutGet() throws InterruptedException {
-    Region<Integer, String> r = createRegion(getName());
-    for (int i=0; i<100; i++) {
-      r.put(i, "value"+i);
-    }
-    clearBackingCHM(r);
-    for (int i=0; i<100; i++) {
-      assertEquals("value"+i, r.get(i));
-    }
-    
-    //Do a put while there are entries in the map
-    r.put(0, "value"+0);
-    
-    r.destroy(1, "value"+1);
-  }
-
-  public void test090Delete() {
-    Region<Integer, String> r = createRegion(getName());
-    for (int i=0; i<11; i++) {
-      r.put(i, "value"+i);
-    }
-    clearBackingCHM(r);
-    int delKey = 9;
-    r.destroy(delKey);
-    assertNull(r.get(delKey));
-    assertFalse(r.containsKey(delKey));
-  }
-
-  public void test100Invalidate() {
-    Region<Integer, String> r = createRegion(getName());
-    for (int i=0; i<100; i++) {
-      r.put(i, "value"+i);
-    }
-    clearBackingCHM(r);
-    int invKey = 9;
-    r.invalidate(invKey);
-    assertNull(r.get(invKey));
-    assertTrue(r.containsKey(invKey));
-  }
-
-  public void test110Size() {
-    Region<Integer, String> r = createRegion(getName());
-    for (int i=0; i<100; i++) {
-      r.put(i, "value"+i);
-    }
-    clearBackingCHM(r);
-    assertEquals(100, r.size());
-    r.destroy(45);
-    assertEquals(99, r.size());
-    r.invalidate(55);
-    r.invalidate(65);
-    assertEquals(99, r.size());
-  }
-
-  public void test120KeyIterator() {
-    Region<Integer, String> r = createRegion(getName());
-    for (int i=0; i<100; i++) {
-      r.put(i, "value"+i);
-    }
-    clearBackingCHM(r);
-    Set<Integer> keys = r.keySet();
-    int c = 0;
-    for (int i : keys) {
-//      assertEquals(c, i);
-      c++;
-    }
-    assertEquals(100, c);
-    assertEquals(100, keys.size());
-    int delKey = 88;
-    r.destroy(delKey);
-    r.invalidate(39);
-    keys = r.keySet();
-    c = 0;
-    for (int i : keys) {
-      if (c == delKey) {
-        c++;
-      }
-//      assertEquals(c, i);
-      c++;
-    }
-    assertEquals(99, keys.size());
-  }
-
-  public void test130EntriesIterator() {
-    Region<Integer, String> r = createRegion(getName());
-    for (int i=0; i<100; i++) {
-      r.put(i, "value"+i);
-    }
-    clearBackingCHM(r);
-    Set<Entry<Integer, String>> entries = r.entrySet();
-    int c = 0;
-    for (Entry<Integer, String> e : entries) {
-//      assertEquals(c, (int) e.getKey());
-      assertEquals("value"+e.getKey(), e.getValue());
-      c++;
-    }
-    assertEquals(100, c);
-    assertEquals(100, entries.size());
-    int delKey = 88;
-    r.destroy(delKey);
-    int invKey = 39;
-    r.invalidate(invKey);
-    entries = r.entrySet();
-    c = 0;
-    for (Entry<Integer, String> e : entries) {
-      if (c == delKey) {
-        c++;
-      } else if (e.getKey() == invKey) {
-//        assertEquals(c, (int) e.getKey());
-        assertNull(e.getValue());
-      } else {
-//        assertEquals(c, (int) e.getKey());
-        assertEquals("value"+e.getKey(), e.getValue());
-      }
-      c++;
-    }
-    assertEquals(99, entries.size());
-  }
-
-  public void test140ContainsKey() {
-    Region<Integer, String> r = createRegion(getName());
-    for (int i=0; i<100; i++) {
-      r.put(i, "value"+i);
-    }
-    clearBackingCHM(r);
-    assertTrue(r.containsKey(80));
-    r.destroy(80);
-    assertFalse(r.containsKey(80));
-    r.invalidate(64);
-    assertTrue(r.containsKey(64));
-  }
-
-  public void test150ContainsValue() {
-    Region<Integer, String> r = createRegion(getName());
-    for (int i=0; i<100; i++) {
-      r.put(i, "value"+i);
-    }
-    clearBackingCHM(r);
-    assertTrue(r.containsValue("value45"));
-    r.destroy(45);
-    assertFalse(r.containsValue("value45"));
-    r.invalidate(64);
-    assertFalse(r.containsValue("value64"));
-  }
-
-  public void test160DestroyRegion() {
-    Region<Integer, String> r = createRegion(getName());
-    for (int i=0; i<100; i++) {
-      r.put(i, "value"+i);
-    }
-    clearBackingCHM(r);
-    r.destroyRegion();
-    try {
-      r.get(3);
-      fail("expected exception not thrown");
-    } catch (RegionDestroyedException expected) {
-    }
-  }
-
-  public void test170PutIfAbsent() {
-    Region<Integer, String> r = createRegion(getName());
-    r.put(1, "value1");
-    clearBackingCHM(r);
-    assertEquals("value1", r.putIfAbsent(1, "value2"));
-  }
-
-  public void test180Replace() {
-    Region<Integer, String> r = createRegion(getName());
-    assertNull(r.replace(1, "value"));
-    r.put(1, "value1");
-    clearBackingCHM(r);
-    assertEquals("value1", r.replace(1, "value2"));
-  }
-
-  public void test190ReplaceKVV() {
-    Region<Integer, String> r = createRegion(getName());
-    assertFalse(r.replace(1, "oldValue", "newValue"));
-    r.put(1, "value1");
-    clearBackingCHM(r);
-    assertTrue(r.replace(1, "value1", "value2"));
-  }
-
-  public void test200Accuracy() throws IOException {
-    double sum=0.0;
-    int iter = 10;
-    for (int t=0; t<iter; t++) {
-      Random r = new Random();
-      HashSet<Integer> vals = new HashSet<Integer>();
-      HyperLogLog hll = new HyperLogLog(0.03);
-      //HyperLogLog hll = new HyperLogLog(0.1);
-      double accuracy = 0.0;
-      for (int i = 0; i < 2 * 1000000; i++) {
-        int val = r.nextInt();
-        vals.add(val);
-        hll.offer(val);
-      }
-      long size = vals.size();
-      long est = hll.cardinality();
-      
-      accuracy = 100.0 * (size - est) / est;
-      System.out.printf("Accuracy is %f hll size is %d\n", accuracy, hll.getBytes().length);
-      sum+=Math.abs(accuracy);
-    }
-    double avgAccuracy = sum/(iter*1.0);
-    System.out.println("Avg accuracy is:"+avgAccuracy);
-    assertTrue(avgAccuracy < 6);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1b4fd2fe/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
deleted file mode 100644
index de2aae3..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HDFSRegionOperationsOffHeapJUnitTest.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-package com.gemstone.gemfire.internal.cache;
-
-import java.util.Iterator;
-import java.util.Properties;
-
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.PartitionAttributes;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionFactory;
-import com.gemstone.gemfire.cache.RegionShortcut;
-import com.gemstone.gemfire.internal.util.concurrent.CustomEntryConcurrentHashMap;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest
-;
-
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSRegionOperationsOffHeapJUnitTest extends HDFSRegionOperationsJUnitTest {
-  static {
-    System.setProperty("gemfire.trackOffHeapRefCounts", "true");
-    System.setProperty("gemfire.trackOffHeapFreedRefCounts", "true");
-  }
-
-  @Override
-  protected void clearBackingCHM(Region<Integer, String> r) {
-    PartitionedRegion pr = (PartitionedRegion)r;
-    for (BucketRegion br : pr.getDataStore().getAllLocalBucketRegions()) {
-      assertTrue(br.getRegionMap() instanceof HDFSRegionMap);
-      CustomEntryConcurrentHashMap chm = ((AbstractRegionMap)br.getRegionMap())._getMap();
-      Iterator it = chm.keySet().iterator();
-      while (it.hasNext()) {
-        Object key = it.next();
-        OffHeapRegionEntry re = (OffHeapRegionEntry) chm.remove(key);
-        assert re != null;
-        re.release();
-      }
-      // wait here to make sure that the queue has been flushed
-    }
-    sleep(pr.getFullPath());
-  }
-
-  @Override
-  public void tearDown() throws Exception {
-    
-    OffHeapTestUtil.checkOrphans();
-    super.tearDown();
-  }
-  @Override
-  protected Region<Integer, String> createRegion(String regionName) {
-    RegionFactory<Integer, String> rf = cache.createRegionFactory(RegionShortcut.PARTITION);
-    PartitionAttributes prAttr = new PartitionAttributesFactory().setTotalNumBuckets(10).create();
-    rf.setPartitionAttributes(prAttr);
-    rf.setOffHeap(true);
-//    rf.setHDFSStoreName(hdfsStore.getName());
-    Region<Integer, String> r = rf.create(regionName);
-//    addListener(r);
-    
-    ((PartitionedRegion) r).setQueryHDFS(true);
-    return r;
-  }
-  @Override
-  protected Properties getDSProps() {
-    Properties props = super.getDSProps();
-    props.setProperty("off-heap-memory-size", "50m");
-    return props;
-  }
-  
-  
-
-}


[35/50] [abbrv] incubator-geode git commit: GEODE-464: Fix Auto-Rebalancer test race condition

Posted by ds...@apache.org.
GEODE-464: Fix Auto-Rebalancer test race condition

Remove dependency on static instance of Cache in AutoRebalacer. Also as
identified in f801d1c, the tests and constructors needed to be refactored.
This allows easier injection of dependencies and determinstic test execution


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/b4902570
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/b4902570
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/b4902570

Branch: refs/heads/develop
Commit: b49025701685008a5c73b3c01b864da01ea3195d
Parents: 37f77a9
Author: Ashvin Agrawal <as...@apache.org>
Authored: Thu Oct 22 14:07:40 2015 -0700
Committer: Ashvin Agrawal <as...@apache.org>
Committed: Thu Oct 22 14:39:44 2015 -0700

----------------------------------------------------------------------
 .../gemfire/cache/util/AutoBalancer.java        | 158 ++++++++-------
 ...erAuditorInvocationIntegrationJUnitTest.java |  80 --------
 .../util/AutoBalancerIntegrationJUnitTest.java  | 180 ++++-------------
 .../cache/util/AutoBalancerJUnitTest.java       | 202 +++++++++++--------
 4 files changed, 242 insertions(+), 378 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b4902570/gemfire-rebalancer/src/main/java/com/gemstone/gemfire/cache/util/AutoBalancer.java
----------------------------------------------------------------------
diff --git a/gemfire-rebalancer/src/main/java/com/gemstone/gemfire/cache/util/AutoBalancer.java b/gemfire-rebalancer/src/main/java/com/gemstone/gemfire/cache/util/AutoBalancer.java
index 00ebc5f..bcc5608 100644
--- a/gemfire-rebalancer/src/main/java/com/gemstone/gemfire/cache/util/AutoBalancer.java
+++ b/gemfire-rebalancer/src/main/java/com/gemstone/gemfire/cache/util/AutoBalancer.java
@@ -17,6 +17,7 @@ import org.quartz.CronExpression;
 import org.springframework.scheduling.support.CronSequenceGenerator;
 
 import com.gemstone.gemfire.GemFireConfigException;
+import com.gemstone.gemfire.cache.CacheClosedException;
 import com.gemstone.gemfire.cache.Declarable;
 import com.gemstone.gemfire.cache.GemFireCache;
 import com.gemstone.gemfire.cache.control.RebalanceOperation;
@@ -51,7 +52,7 @@ import com.gemstone.gemfire.internal.logging.LogService;
  * <P>
  * {@link AutoBalancer} can be controlled using the following configurations
  * <OL>
- * <LI> {@link AutoBalancer#SCHEDULE}
+ * <LI>{@link AutoBalancer#SCHEDULE}
  * <LI>TBD THRESHOLDS
  * 
  * @author Ashvin Agrawal
@@ -119,14 +120,25 @@ public class AutoBalancer implements Declarable {
 
   public static final Object AUTO_BALANCER_LOCK = "__AUTO_B_LOCK";
 
-  private AuditScheduler scheduler = new CronScheduler();
-  private OOBAuditor auditor = new SizeBasedOOBAuditor();
-  private TimeProvider clock = new SystemClockTimeProvider();
-  private CacheOperationFacade cacheFacade = new GeodeCacheFacade();
-  private AtomicBoolean isLockAcquired = new AtomicBoolean(false);
+  private final AuditScheduler scheduler;
+  private final OOBAuditor auditor;
+  private final TimeProvider clock;
+  private final CacheOperationFacade cacheFacade;
 
   private static final Logger logger = LogService.getLogger();
 
+  public AutoBalancer() {
+    this(null, null, null, null);
+  }
+
+  public AutoBalancer(AuditScheduler scheduler, OOBAuditor auditor, TimeProvider clock,
+      CacheOperationFacade cacheFacade) {
+    this.cacheFacade = cacheFacade == null ? new GeodeCacheFacade() : cacheFacade;
+    this.scheduler = scheduler == null ? new CronScheduler() : scheduler;
+    this.auditor = auditor == null ? new SizeBasedOOBAuditor(this.cacheFacade) : auditor;
+    this.clock = clock == null ? new SystemClockTimeProvider() : clock;
+  }
+
   @Override
   public void init(Properties props) {
     if (logger.isDebugEnabled()) {
@@ -215,10 +227,16 @@ public class AutoBalancer implements Declarable {
    * <LI>updates auto-balance stat
    * <LI>release lock
    */
-  class SizeBasedOOBAuditor implements OOBAuditor {
+  static class SizeBasedOOBAuditor implements OOBAuditor {
     private int sizeThreshold = DEFAULT_SIZE_THRESHOLD_PERCENT;
     private int sizeMinimum = DEFAULT_MINIMUM_SIZE;
 
+    final CacheOperationFacade cache;
+
+    public SizeBasedOOBAuditor(CacheOperationFacade cache) {
+      this.cache = cache;
+    }
+
     @Override
     public void init(Properties props) {
       if (logger.isDebugEnabled()) {
@@ -243,24 +261,16 @@ public class AutoBalancer implements Declarable {
 
     @Override
     public void execute() {
-      if (!isLockAcquired.get()) {
-        synchronized (isLockAcquired) {
-          if (!isLockAcquired.get()) {
-            boolean result = cacheFacade.acquireAutoBalanceLock();
-            if (result) {
-              isLockAcquired.set(true);
-            } else {
-              if (logger.isDebugEnabled()) {
-                logger.debug("Another member owns auto-balance lock. Skip this attempt to rebalance the cluster");
-              }
-              return;
-            }
-          }
+      boolean result = cache.acquireAutoBalanceLock();
+      if (!result) {
+        if (logger.isDebugEnabled()) {
+          logger.debug("Another member owns auto-balance lock. Skip this attempt to rebalance the cluster");
         }
+        return;
       }
 
-      cacheFacade.incrementAttemptCounter();
-      boolean result = needsRebalancing();
+      cache.incrementAttemptCounter();
+      result = needsRebalancing();
       if (!result) {
         if (logger.isDebugEnabled()) {
           logger.debug("Rebalancing is not needed");
@@ -268,7 +278,7 @@ public class AutoBalancer implements Declarable {
         return;
       }
 
-      cacheFacade.rebalance();
+      cache.rebalance();
     }
 
     /**
@@ -284,13 +294,13 @@ public class AutoBalancer implements Declarable {
      */
     boolean needsRebalancing() {
       // test cluster level status
-      long transferSize = cacheFacade.getTotalTransferSize();
+      long transferSize = cache.getTotalTransferSize();
       if (transferSize <= sizeMinimum) {
         return false;
       }
 
-      Map<PartitionedRegion, InternalPRInfo> details = cacheFacade.getRegionMemberDetails();
-      long totalSize = cacheFacade.getTotalDataSize(details);
+      Map<PartitionedRegion, InternalPRInfo> details = cache.getRegionMemberDetails();
+      long totalSize = cache.getTotalDataSize(details);
 
       if (totalSize > 0) {
         int transferPercent = (int) ((100.0 * transferSize) / totalSize);
@@ -318,6 +328,18 @@ public class AutoBalancer implements Declarable {
    * auto-balancing
    */
   static class GeodeCacheFacade implements CacheOperationFacade {
+    private final AtomicBoolean isLockAcquired = new AtomicBoolean(false);
+
+    private GemFireCacheImpl cache;
+
+    public GeodeCacheFacade() {
+      this(null);
+    }
+
+    public GeodeCacheFacade(GemFireCacheImpl cache) {
+      this.cache = cache;
+    }
+
     @Override
     public Map<PartitionedRegion, InternalPRInfo> getRegionMemberDetails() {
       GemFireCacheImpl cache = getCache();
@@ -354,12 +376,12 @@ public class AutoBalancer implements Declarable {
         RebalanceOperation operation = getCache().getResourceManager().createRebalanceFactory().simulate();
         RebalanceResults result = operation.getResults();
         if (logger.isDebugEnabled()) {
-          logger.debug("Rebalance estimate: RebalanceResultsImpl [TotalBucketCreateBytes="
-              + result.getTotalBucketCreateBytes() + ", TotalBucketCreatesCompleted="
-              + result.getTotalBucketCreatesCompleted() + ", TotalBucketTransferBytes="
-              + result.getTotalBucketTransferBytes() + ", TotalBucketTransfersCompleted="
-              + result.getTotalBucketTransfersCompleted() + ", TotalPrimaryTransfersCompleted="
-              + result.getTotalPrimaryTransfersCompleted() + "]");
+          logger.debug(
+              "Rebalance estimate: RebalanceResultsImpl [TotalBucketCreateBytes=" + result.getTotalBucketCreateBytes()
+                  + ", TotalBucketCreatesCompleted=" + result.getTotalBucketCreatesCompleted()
+                  + ", TotalBucketTransferBytes=" + result.getTotalBucketTransferBytes()
+                  + ", TotalBucketTransfersCompleted=" + result.getTotalBucketTransfersCompleted()
+                  + ", TotalPrimaryTransfersCompleted=" + result.getTotalPrimaryTransfersCompleted() + "]");
         }
         return result.getTotalBucketTransferBytes();
       } catch (CancellationException e) {
@@ -390,9 +412,8 @@ public class AutoBalancer implements Declarable {
             + result.getTotalBucketCreatesCompleted() + ", TotalBucketTransferBytes="
             + result.getTotalBucketTransferBytes() + ", TotalBucketTransferTime=" + result.getTotalBucketTransferTime()
             + ", TotalBucketTransfersCompleted=" + +result.getTotalBucketTransfersCompleted()
-            + ", TotalPrimaryTransferTime=" + result.getTotalPrimaryTransferTime()
-            + ", TotalPrimaryTransfersCompleted=" + result.getTotalPrimaryTransfersCompleted() + ", TotalTime="
-            + result.getTotalTime() + "]");
+            + ", TotalPrimaryTransferTime=" + result.getTotalPrimaryTransferTime() + ", TotalPrimaryTransfersCompleted="
+            + result.getTotalPrimaryTransfersCompleted() + ", TotalTime=" + result.getTotalTime() + "]");
       } catch (CancellationException e) {
         logger.info("Error rebalancing the cluster", e);
       } catch (InterruptedException e) {
@@ -401,22 +422,44 @@ public class AutoBalancer implements Declarable {
     }
 
     GemFireCacheImpl getCache() {
-      GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
       if (cache == null) {
-        throw new IllegalStateException("Missing cache instance.");
+        synchronized (this) {
+          if (cache == null) {
+            cache = GemFireCacheImpl.getInstance();
+            if (cache == null) {
+              throw new IllegalStateException("Missing cache instance.");
+            }
+          }
+        }
+      }
+      if (cache.isClosed()) {
+        throw new CacheClosedException();
       }
       return cache;
     }
 
     @Override
     public boolean acquireAutoBalanceLock() {
-      DistributedLockService dls = getDLS();
+      if (!isLockAcquired.get()) {
+        synchronized (isLockAcquired) {
+          if (!isLockAcquired.get()) {
+            DistributedLockService dls = getDLS();
 
-      boolean result = dls.lock(AUTO_BALANCER_LOCK, 0, -1);
-      if (logger.isDebugEnabled()) {
-        logger.debug("Grabbed AutoBalancer lock? " + result);
+            boolean result = dls.lock(AUTO_BALANCER_LOCK, 0, -1);
+            if (result) {
+              isLockAcquired.set(true);
+              if (logger.isDebugEnabled()) {
+                logger.debug("Grabbed AutoBalancer lock");
+              }
+            } else {
+              if (logger.isDebugEnabled()) {
+                logger.debug("Another member owns auto-balance lock. Skip this attempt to rebalance the cluster");
+              }
+            }
+          }
+        }
       }
-      return result;
+      return isLockAcquired.get();
     }
 
     @Override
@@ -471,41 +514,10 @@ public class AutoBalancer implements Declarable {
     long getTotalTransferSize();
   }
 
-  /**
-   * Test hook to inject custom triggers
-   */
-  void setScheduler(AuditScheduler trigger) {
-    logger.info("Setting custom AuditScheduler");
-    this.scheduler = trigger;
-  }
-
-  /**
-   * Test hook to inject custom auditors
-   */
-  void setOOBAuditor(OOBAuditor auditor) {
-    logger.info("Setting custom Auditor");
-    this.auditor = auditor;
-  }
-
   OOBAuditor getOOBAuditor() {
     return auditor;
   }
 
-  /**
-   * Test hook to inject a clock
-   */
-  void setTimeProvider(TimeProvider clock) {
-    logger.info("Setting custom TimeProvider");
-    this.clock = clock;
-  }
-
-  /**
-   * Test hook to inject a Cache operation facade
-   */
-  public void setCacheOperationFacade(CacheOperationFacade facade) {
-    this.cacheFacade = facade;
-  }
-
   public CacheOperationFacade getCacheOperationFacade() {
     return this.cacheFacade;
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b4902570/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerAuditorInvocationIntegrationJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerAuditorInvocationIntegrationJUnitTest.java b/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerAuditorInvocationIntegrationJUnitTest.java
deleted file mode 100755
index bd6a3ff..0000000
--- a/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerAuditorInvocationIntegrationJUnitTest.java
+++ /dev/null
@@ -1,80 +0,0 @@
-package com.gemstone.gemfire.cache.util;
-
-import static org.junit.Assert.*;
-
-import java.util.Properties;
-import java.util.concurrent.TimeUnit;
-
-import org.jmock.Expectations;
-import org.jmock.Mockery;
-import org.jmock.lib.concurrent.Synchroniser;
-import org.jmock.lib.legacy.ClassImposteriser;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.util.AutoBalancer.OOBAuditor;
-import com.gemstone.gemfire.cache.util.AutoBalancer.TimeProvider;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-/**
- * IntegrationTest for AuditorInvocation in AutoBalancer. 
- * 
- * <p>AutoBalancer should:<br>
- * 1) be refactored to extract out all inner-classes and inner-interfaces<br>
- * 2) have constructor changed to accept every collaborator as an argument<br>
- * 3) then this test can correctly use mocking without any real threads to wait on
- * 
- * <p>Extracted from AutoBalancerJUnitTest
- */
-@Category(IntegrationTest.class)
-public class AutoBalancerAuditorInvocationIntegrationJUnitTest {
-
-  Mockery mockContext;
-
-  @Before
-  public void setupMock() {
-    mockContext = new Mockery() {
-      {
-        setImposteriser(ClassImposteriser.INSTANCE);
-        setThreadingPolicy(new Synchroniser());
-      }
-    };
-  }
-
-  @After
-  public void validateMock() {
-    mockContext.assertIsSatisfied();
-    mockContext = null;
-  }
-
-  @Test
-  public void testAuditorInvocation() throws InterruptedException {
-    int count = 0;
-
-    final OOBAuditor mockAuditor = mockContext.mock(OOBAuditor.class);
-    final TimeProvider mockClock = mockContext.mock(TimeProvider.class);
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockAuditor).init(with(any(Properties.class)));
-        exactly(2).of(mockAuditor).execute();
-        allowing(mockClock).currentTimeMillis();
-        will(returnValue(950L));
-      }
-    });
-
-    Properties props = AutoBalancerJUnitTest.getBasicConfig();
-
-    assertEquals(0, count);
-    AutoBalancer autoR = new AutoBalancer();
-    autoR.setOOBAuditor(mockAuditor);
-    autoR.setTimeProvider(mockClock);
-
-    // the trigger should get invoked after 50 milliseconds
-    autoR.init(props);
-    
-    // TODO: this sleep should NOT be here -- use Awaitility to await a condition instead or use mocking to avoid this altogether
-    TimeUnit.MILLISECONDS.sleep(120); // removal causes failure in validateMock
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b4902570/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerIntegrationJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerIntegrationJUnitTest.java b/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerIntegrationJUnitTest.java
index 38b7bf9..cff9d69 100755
--- a/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerIntegrationJUnitTest.java
+++ b/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerIntegrationJUnitTest.java
@@ -1,22 +1,18 @@
 package com.gemstone.gemfire.cache.util;
 
-import static com.jayway.awaitility.Awaitility.*;
-import static java.util.concurrent.TimeUnit.*;
-import static org.junit.Assert.*;
-import static org.hamcrest.Matchers.*;
+import static com.jayway.awaitility.Awaitility.await;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.hamcrest.Matchers.equalTo;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import java.io.ByteArrayInputStream;
-import java.util.HashSet;
-import java.util.Map;
 import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.jmock.Expectations;
-import org.jmock.Mockery;
-import org.jmock.api.Invocation;
-import org.jmock.lib.action.CustomAction;
-import org.jmock.lib.concurrent.Synchroniser;
-import org.jmock.lib.legacy.ClassImposteriser;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -31,38 +27,20 @@ import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.distributed.internal.locks.DLockService;
 import com.gemstone.gemfire.internal.HostStatSampler;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.PRHARedundancyProvider;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.internal.cache.partitioned.InternalPRInfo;
-import com.gemstone.gemfire.internal.cache.partitioned.LoadProbe;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
- * IntegrationTests for AutoBalancer that include usage of Cache, StatSampler 
- * and DistributedLockService. Some collaborators may be mocked while others
- * are real.
- * 
- * <p>Extracted from AutoBalancerJUnitTest
+ * IntegrationTests for AutoBalancer that include usage of Cache, StatSampler
+ * and DistributedLockService.
  */
 @Category(IntegrationTest.class)
 public class AutoBalancerIntegrationJUnitTest {
-  
+
   private static final int TIMEOUT_SECONDS = 5;
 
   private GemFireCacheImpl cache;
-  private Mockery mockContext;
 
   @Before
-  public void setupMock() {
-    mockContext = new Mockery() {
-      {
-        setImposteriser(ClassImposteriser.INSTANCE);
-        setThreadingPolicy(new Synchroniser());
-      }
-    };
-  }
-  
-  @Before
   public void setUpCacheAndDLS() {
     cache = createBasicCache();
   }
@@ -75,7 +53,7 @@ public class AutoBalancerIntegrationJUnitTest {
 
     if (cache != null && !cache.isClosed()) {
       try {
-        final HostStatSampler statSampler = ((InternalDistributedSystem)cache.getDistributedSystem()).getStatSampler();
+        final HostStatSampler statSampler = ((InternalDistributedSystem) cache.getDistributedSystem()).getStatSampler();
         cache.close();
         // wait for the stat sampler to stand down
         await().atMost(TIMEOUT_SECONDS, SECONDS).until(isAlive(statSampler), equalTo(false));
@@ -84,80 +62,41 @@ public class AutoBalancerIntegrationJUnitTest {
       }
     }
   }
-  
-  @After
-  public void validateMock() {
-    mockContext.assertIsSatisfied();
-    mockContext = null;
-  }
 
   @Test
   public void testAutoRebalaceStatsOnLockSuccess() throws InterruptedException {
-    final CacheOperationFacade mockCacheFacade = mockContext.mock(CacheOperationFacade.class);
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockCacheFacade).acquireAutoBalanceLock();
-        will(returnValue(true));
-        oneOf(mockCacheFacade).incrementAttemptCounter();
-        will(new CustomAction("increment stat") {
-          public Object invoke(Invocation invocation) throws Throwable {
-            new GeodeCacheFacade().incrementAttemptCounter();
-            return null;
-          }
-        });
-        allowing(mockCacheFacade);
-      }
-    });
-
     assertEquals(0, cache.getResourceManager().getStats().getAutoRebalanceAttempts());
     AutoBalancer balancer = new AutoBalancer();
-    balancer.setCacheOperationFacade(mockCacheFacade);
     balancer.getOOBAuditor().execute();
-    
     assertEquals(1, cache.getResourceManager().getStats().getAutoRebalanceAttempts());
   }
 
   @Test
   public void testAutoRebalaceStatsOnLockFailure() throws InterruptedException {
-    final CacheOperationFacade mockCacheFacade = mockContext.mock(CacheOperationFacade.class);
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockCacheFacade).acquireAutoBalanceLock();
-        will(returnValue(false));
-      }
-    });
-
+    acquireLockInDifferentThread(1);
     assertEquals(0, cache.getResourceManager().getStats().getAutoRebalanceAttempts());
     AutoBalancer balancer = new AutoBalancer();
-    balancer.setCacheOperationFacade(mockCacheFacade);
     balancer.getOOBAuditor().execute();
-
     assertEquals(0, cache.getResourceManager().getStats().getAutoRebalanceAttempts());
   }
-  
+
   @Test
   public void testAutoBalanceStatUpdate() {
     assertEquals(0, cache.getResourceManager().getStats().getAutoRebalanceAttempts());
     new GeodeCacheFacade().incrementAttemptCounter();
-    
     assertEquals(1, cache.getResourceManager().getStats().getAutoRebalanceAttempts());
   }
-  
+
   @Test
   public void testLockSuccess() throws InterruptedException {
-    final AtomicBoolean acquiredAutoBalanceLock = new AtomicBoolean(true);
-    
-    Thread thread = new Thread(new Runnable() {
-      @Override
-      public void run() {
-        CacheOperationFacade cacheFacade = new GeodeCacheFacade();
-        acquiredAutoBalanceLock.set(cacheFacade.acquireAutoBalanceLock());
-      }
-    });
-    thread.start();
-    
-    await().atMost(TIMEOUT_SECONDS, SECONDS).untilTrue(acquiredAutoBalanceLock);
-    
+    acquireLockInDifferentThread(1);
+    DistributedLockService dls = new GeodeCacheFacade().getDLS();
+    assertFalse(dls.lock(AutoBalancer.AUTO_BALANCER_LOCK, 0, -1));
+  }
+
+  @Test
+  public void canReacquireLock() throws InterruptedException {
+    acquireLockInDifferentThread(2);
     DistributedLockService dls = new GeodeCacheFacade().getDLS();
     assertFalse(dls.lock(AutoBalancer.AUTO_BALANCER_LOCK, 0, -1));
   }
@@ -168,7 +107,7 @@ public class AutoBalancerIntegrationJUnitTest {
     assertTrue(dls.lock(AutoBalancer.AUTO_BALANCER_LOCK, 0, -1));
 
     final AtomicBoolean success = new AtomicBoolean(true);
-    
+
     Thread thread = new Thread(new Runnable() {
       @Override
       public void run() {
@@ -178,7 +117,7 @@ public class AutoBalancerIntegrationJUnitTest {
     });
     thread.start();
     thread.join();
-    
+
     assertFalse(success.get());
   }
 
@@ -213,57 +152,6 @@ public class AutoBalancerIntegrationJUnitTest {
     cache.loadCacheXml(new ByteArrayInputStream(configStr.getBytes()));
   }
 
-  @Test
-  public void testFacadeCollectMemberDetails2Regions() {
-    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class);
-
-    final PartitionedRegion mockR1 = mockContext.mock(PartitionedRegion.class, "r1");
-    final PartitionedRegion mockR2 = mockContext.mock(PartitionedRegion.class, "r2");
-    final HashSet<PartitionedRegion> regions = new HashSet<>();
-    regions.add(mockR1);
-    regions.add(mockR2);
-
-    final PRHARedundancyProvider mockRedundancyProviderR1 = mockContext.mock(PRHARedundancyProvider.class, "prhaR1");
-    final InternalPRInfo mockR1PRInfo = mockContext.mock(InternalPRInfo.class, "prInforR1");
-
-    final PRHARedundancyProvider mockRedundancyProviderR2 = mockContext.mock(PRHARedundancyProvider.class, "prhaR2");
-    final InternalPRInfo mockR2PRInfo = mockContext.mock(InternalPRInfo.class, "prInforR2");
-
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockCache).getPartitionedRegions();
-        will(returnValue(regions));
-        exactly(2).of(mockCache).getResourceManager();
-        will(returnValue(cache.getResourceManager()));
-        allowing(mockR1).getFullPath();
-        oneOf(mockR1).getRedundancyProvider();
-        will(returnValue(mockRedundancyProviderR1));
-        allowing(mockR2).getFullPath();
-        oneOf(mockR2).getRedundancyProvider();
-        will(returnValue(mockRedundancyProviderR2));
-
-        oneOf(mockRedundancyProviderR1).buildPartitionedRegionInfo(with(true), with(any(LoadProbe.class)));
-        will(returnValue(mockR1PRInfo));
-
-        oneOf(mockRedundancyProviderR2).buildPartitionedRegionInfo(with(true), with(any(LoadProbe.class)));
-        will(returnValue(mockR2PRInfo));
-      }
-    });
-
-    GeodeCacheFacade facade = new GeodeCacheFacade() {
-      @Override
-      GemFireCacheImpl getCache() {
-        return mockCache;
-      }
-    };
-
-    Map<PartitionedRegion, InternalPRInfo> map = facade.getRegionMemberDetails();
-    assertNotNull(map);
-    assertEquals(2, map.size());
-    assertEquals(map.get(mockR1), mockR1PRInfo);
-    assertEquals(map.get(mockR2), mockR2PRInfo);
-  }
-
   private GemFireCacheImpl createBasicCache() {
     return (GemFireCacheImpl) new CacheFactory().set("mcast-port", "0").create();
   }
@@ -276,4 +164,22 @@ public class AutoBalancerIntegrationJUnitTest {
       }
     };
   }
+
+  private void acquireLockInDifferentThread(final int num) throws InterruptedException {
+    final CountDownLatch latch = new CountDownLatch(num);
+    Thread thread = new Thread(new Runnable() {
+      @Override
+      public void run() {
+        CacheOperationFacade cacheFacade = new GeodeCacheFacade();
+        for (int i = 0; i < num; i++) {
+          boolean result = cacheFacade.acquireAutoBalanceLock();
+          if (result) {
+            latch.countDown();
+          }
+        }
+      }
+    });
+    thread.start();
+    assertTrue(latch.await(TIMEOUT_SECONDS, TimeUnit.SECONDS));
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b4902570/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerJUnitTest.java b/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerJUnitTest.java
index 1eca3c2..5aa0b8d 100644
--- a/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerJUnitTest.java
+++ b/gemfire-rebalancer/src/test/java/com/gemstone/gemfire/cache/util/AutoBalancerJUnitTest.java
@@ -1,15 +1,23 @@
 package com.gemstone.gemfire.cache.util;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Properties;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
 
 import org.jmock.Expectations;
 import org.jmock.Mockery;
 import org.jmock.Sequence;
+import org.jmock.api.Invocation;
+import org.jmock.lib.action.CustomAction;
 import org.jmock.lib.concurrent.Synchroniser;
 import org.jmock.lib.legacy.ClassImposteriser;
 import org.junit.After;
@@ -27,10 +35,13 @@ import com.gemstone.gemfire.cache.util.AutoBalancer.CacheOperationFacade;
 import com.gemstone.gemfire.cache.util.AutoBalancer.GeodeCacheFacade;
 import com.gemstone.gemfire.cache.util.AutoBalancer.OOBAuditor;
 import com.gemstone.gemfire.cache.util.AutoBalancer.SizeBasedOOBAuditor;
+import com.gemstone.gemfire.cache.util.AutoBalancer.TimeProvider;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.PRHARedundancyProvider;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager;
 import com.gemstone.gemfire.internal.cache.partitioned.InternalPRInfo;
+import com.gemstone.gemfire.internal.cache.partitioned.LoadProbe;
 import com.gemstone.gemfire.test.junit.categories.UnitTest;
 
 /**
@@ -40,6 +51,11 @@ import com.gemstone.gemfire.test.junit.categories.UnitTest;
 public class AutoBalancerJUnitTest {
   Mockery mockContext;
 
+  CacheOperationFacade mockCacheFacade;
+  OOBAuditor mockAuditor;
+  AuditScheduler mockScheduler;
+  TimeProvider mockClock;
+
   @Before
   public void setupMock() {
     mockContext = new Mockery() {
@@ -48,6 +64,11 @@ public class AutoBalancerJUnitTest {
         setThreadingPolicy(new Synchroniser());
       }
     };
+
+    mockCacheFacade = mockContext.mock(CacheOperationFacade.class);
+    mockAuditor = mockContext.mock(OOBAuditor.class);
+    mockScheduler = mockContext.mock(AuditScheduler.class);
+    mockClock = mockContext.mock(TimeProvider.class);
   }
 
   @After
@@ -58,7 +79,6 @@ public class AutoBalancerJUnitTest {
 
   @Test
   public void testLockStatExecuteInSequence() throws InterruptedException {
-    final CacheOperationFacade mockCacheFacade = mockContext.mock(CacheOperationFacade.class);
     final Sequence sequence = mockContext.sequence("sequence");
     mockContext.checking(new Expectations() {
       {
@@ -73,33 +93,12 @@ public class AutoBalancerJUnitTest {
       }
     });
 
-    AutoBalancer balancer = new AutoBalancer();
-    balancer.setCacheOperationFacade(mockCacheFacade);
-    balancer.getOOBAuditor().execute();
-  }
-
-  @Test
-  public void testReusePreAcquiredLock() throws InterruptedException {
-    final CacheOperationFacade mockCacheFacade = mockContext.mock(CacheOperationFacade.class);
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockCacheFacade).acquireAutoBalanceLock();
-        will(returnValue(true));
-        exactly(2).of(mockCacheFacade).incrementAttemptCounter();
-        exactly(2).of(mockCacheFacade).getTotalTransferSize();
-        will(returnValue(0L));
-      }
-    });
-
-    AutoBalancer balancer = new AutoBalancer();
-    balancer.setCacheOperationFacade(mockCacheFacade);
-    balancer.getOOBAuditor().execute();
+    AutoBalancer balancer = new AutoBalancer(null, null, null, mockCacheFacade);
     balancer.getOOBAuditor().execute();
   }
 
   @Test
   public void testAcquireLockAfterReleasedRemotely() throws InterruptedException {
-    final CacheOperationFacade mockCacheFacade = mockContext.mock(CacheOperationFacade.class);
     final Sequence sequence = mockContext.sequence("sequence");
     mockContext.checking(new Expectations() {
       {
@@ -115,15 +114,13 @@ public class AutoBalancerJUnitTest {
       }
     });
 
-    AutoBalancer balancer = new AutoBalancer();
-    balancer.setCacheOperationFacade(mockCacheFacade);
+    AutoBalancer balancer = new AutoBalancer(null, null, null, mockCacheFacade);
     balancer.getOOBAuditor().execute();
     balancer.getOOBAuditor().execute();
   }
 
   @Test
   public void testFailExecuteIfLockedElsewhere() throws InterruptedException {
-    final CacheOperationFacade mockCacheFacade = mockContext.mock(CacheOperationFacade.class);
     mockContext.checking(new Expectations() {
       {
         oneOf(mockCacheFacade).acquireAutoBalanceLock();
@@ -132,40 +129,7 @@ public class AutoBalancerJUnitTest {
       }
     });
 
-    Thread thread = new Thread(new Runnable() {
-      @Override
-      public void run() {
-        AutoBalancer balancer = new AutoBalancer();
-        balancer.setCacheOperationFacade(mockCacheFacade);
-        balancer.getOOBAuditor().execute();
-      }
-    });
-    thread.start();
-    thread.join();
-  }
-
-  @Test
-  public void testFailExecuteIfBalanced() throws InterruptedException {
-    final CacheOperationFacade mockCacheFacade = mockContext.mock(CacheOperationFacade.class);
-    mockContext.checking(new Expectations() {
-      {
-        oneOf(mockCacheFacade).acquireAutoBalanceLock();
-        will(returnValue(true));
-        never(mockCacheFacade).rebalance();
-        oneOf(mockCacheFacade).incrementAttemptCounter();
-      }
-    });
-
-    AutoBalancer balancer = new AutoBalancer();
-    balancer.setCacheOperationFacade(mockCacheFacade);
-
-    SizeBasedOOBAuditor auditor = balancer.new SizeBasedOOBAuditor() {
-      @Override
-      boolean needsRebalancing() {
-        return false;
-      }
-    };
-    balancer.setOOBAuditor(auditor);
+    AutoBalancer balancer = new AutoBalancer(null, null, null, mockCacheFacade);
     balancer.getOOBAuditor().execute();
   }
 
@@ -181,7 +145,6 @@ public class AutoBalancerJUnitTest {
     final long totalSize = 1000L;
 
     final Map<PartitionedRegion, InternalPRInfo> details = new HashMap<>();
-    final CacheOperationFacade mockCacheFacade = mockContext.mock(CacheOperationFacade.class);
     mockContext.checking(new Expectations() {
       {
         allowing(mockCacheFacade).getRegionMemberDetails();
@@ -200,8 +163,7 @@ public class AutoBalancerJUnitTest {
       }
     });
 
-    AutoBalancer balancer = new AutoBalancer();
-    balancer.setCacheOperationFacade(mockCacheFacade);
+    AutoBalancer balancer = new AutoBalancer(null, null, null, mockCacheFacade);
     Properties config = getBasicConfig();
     config.put(AutoBalancer.MINIMUM_SIZE, "10");
     balancer.init(config);
@@ -218,7 +180,6 @@ public class AutoBalancerJUnitTest {
   public void testOOBWhenAboveThresholdButBelowMin() {
     final long totalSize = 1000L;
 
-    final CacheOperationFacade mockCacheFacade = mockContext.mock(CacheOperationFacade.class);
     mockContext.checking(new Expectations() {
       {
         // first run
@@ -233,8 +194,7 @@ public class AutoBalancerJUnitTest {
       }
     });
 
-    AutoBalancer balancer = new AutoBalancer();
-    balancer.setCacheOperationFacade(mockCacheFacade);
+    AutoBalancer balancer = new AutoBalancer(null, null, null, mockCacheFacade);
     Properties config = getBasicConfig();
     config.put(AutoBalancer.MINIMUM_SIZE, "" + (totalSize * 5));
     balancer.init(config);
@@ -252,7 +212,6 @@ public class AutoBalancerJUnitTest {
     final long totalSize = 1000L;
 
     final Map<PartitionedRegion, InternalPRInfo> details = new HashMap<>();
-    final CacheOperationFacade mockCacheFacade = mockContext.mock(CacheOperationFacade.class);
     mockContext.checking(new Expectations() {
       {
         allowing(mockCacheFacade).getRegionMemberDetails();
@@ -274,8 +233,7 @@ public class AutoBalancerJUnitTest {
       }
     });
 
-    AutoBalancer balancer = new AutoBalancer();
-    balancer.setCacheOperationFacade(mockCacheFacade);
+    AutoBalancer balancer = new AutoBalancer(null, null, null, mockCacheFacade);
     Properties config = getBasicConfig();
     config.put(AutoBalancer.MINIMUM_SIZE, "10");
     balancer.init(config);
@@ -355,8 +313,6 @@ public class AutoBalancerJUnitTest {
     props.put(AutoBalancer.SCHEDULE, someSchedule);
     props.put(AutoBalancer.SIZE_THRESHOLD_PERCENT, 17);
 
-    final AuditScheduler mockScheduler = mockContext.mock(AuditScheduler.class);
-    final OOBAuditor mockAuditor = mockContext.mock(OOBAuditor.class);
     mockContext.checking(new Expectations() {
       {
         oneOf(mockScheduler).init(someSchedule);
@@ -364,10 +320,7 @@ public class AutoBalancerJUnitTest {
       }
     });
 
-    AutoBalancer autoR = new AutoBalancer();
-    autoR.setScheduler(mockScheduler);
-    autoR.setOOBAuditor(mockAuditor);
-
+    AutoBalancer autoR = new AutoBalancer(mockScheduler, mockAuditor, null, null);
     autoR.init(props);
   }
 
@@ -404,6 +357,8 @@ public class AutoBalancerJUnitTest {
 
     mockContext.checking(new Expectations() {
       {
+        oneOf(mockCache).isClosed();
+        will(returnValue(false));
         oneOf(mockCache).getResourceManager();
         will(returnValue(mockRM));
         oneOf(mockRM).createRebalanceFactory();
@@ -424,12 +379,7 @@ public class AutoBalancerJUnitTest {
       }
     });
 
-    GeodeCacheFacade facade = new GeodeCacheFacade() {
-      @Override
-      GemFireCacheImpl getCache() {
-        return mockCache;
-      }
-    };
+    GeodeCacheFacade facade = new GeodeCacheFacade(mockCache);
 
     return facade;
   }
@@ -446,22 +396,71 @@ public class AutoBalancerJUnitTest {
     final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class);
     mockContext.checking(new Expectations() {
       {
+        oneOf(mockCache).isClosed();
+        will(returnValue(false));
         oneOf(mockCache).getPartitionedRegions();
         will(returnValue(new HashSet<PartitionedRegion>()));
       }
     });
 
-    GeodeCacheFacade facade = new GeodeCacheFacade() {
-      @Override
-      GemFireCacheImpl getCache() {
-        return mockCache;
-      }
-    };
+    GeodeCacheFacade facade = new GeodeCacheFacade(mockCache);
 
     assertEquals(0, facade.getRegionMemberDetails().size());
   }
 
   @Test
+  public void testFacadeCollectMemberDetails2Regions() {
+    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class);
+    final InternalResourceManager mockRM = mockContext.mock(InternalResourceManager.class);
+    final LoadProbe mockProbe = mockContext.mock(LoadProbe.class);
+
+    final PartitionedRegion mockR1 = mockContext.mock(PartitionedRegion.class, "r1");
+    final PartitionedRegion mockR2 = mockContext.mock(PartitionedRegion.class, "r2");
+    final HashSet<PartitionedRegion> regions = new HashSet<>();
+    regions.add(mockR1);
+    regions.add(mockR2);
+
+    final PRHARedundancyProvider mockRedundancyProviderR1 = mockContext.mock(PRHARedundancyProvider.class, "prhaR1");
+    final InternalPRInfo mockR1PRInfo = mockContext.mock(InternalPRInfo.class, "prInforR1");
+
+    final PRHARedundancyProvider mockRedundancyProviderR2 = mockContext.mock(PRHARedundancyProvider.class, "prhaR2");
+    final InternalPRInfo mockR2PRInfo = mockContext.mock(InternalPRInfo.class, "prInforR2");
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockCache).isClosed();
+        will(returnValue(false));
+        oneOf(mockCache).getPartitionedRegions();
+        will(returnValue(regions));
+        exactly(2).of(mockCache).getResourceManager();
+        will(returnValue(mockRM));
+        exactly(2).of(mockRM).getLoadProbe();
+        will(returnValue(mockProbe));
+        allowing(mockR1).getFullPath();
+        oneOf(mockR1).getRedundancyProvider();
+        will(returnValue(mockRedundancyProviderR1));
+        allowing(mockR2).getFullPath();
+        oneOf(mockR2).getRedundancyProvider();
+        will(returnValue(mockRedundancyProviderR2));
+
+        oneOf(mockRedundancyProviderR1).buildPartitionedRegionInfo(with(true), with(any(LoadProbe.class)));
+        will(returnValue(mockR1PRInfo));
+
+        oneOf(mockRedundancyProviderR2).buildPartitionedRegionInfo(with(true), with(any(LoadProbe.class)));
+        will(returnValue(mockR2PRInfo));
+      }
+    });
+
+    GeodeCacheFacade facade = new GeodeCacheFacade(mockCache);
+
+    Map<PartitionedRegion, InternalPRInfo> map = facade.getRegionMemberDetails();
+    assertNotNull(map);
+    assertEquals(2, map.size());
+    assertEquals(map.get(mockR1), mockR1PRInfo);
+    assertEquals(map.get(mockR2), mockR2PRInfo);
+  }
+
+  @Test
   public void testFacadeTotalBytes2Regions() {
     final PartitionedRegion mockR1 = mockContext.mock(PartitionedRegion.class, "r1");
     final PartitionedRegion mockR2 = mockContext.mock(PartitionedRegion.class, "r2");
@@ -514,6 +513,33 @@ public class AutoBalancerJUnitTest {
     assertEquals(123 + 74 + 3475, facade.getTotalDataSize(details));
   }
 
+  @Test
+  public void testAuditorInvocation() throws InterruptedException {
+    final CountDownLatch latch = new CountDownLatch(3);
+
+    mockContext.checking(new Expectations() {
+      {
+        oneOf(mockAuditor).init(with(any(Properties.class)));
+        exactly(2).of(mockAuditor).execute();
+        allowing(mockClock).currentTimeMillis();
+        will(new CustomAction("returnTime") {
+          @Override
+          public Object invoke(Invocation invocation) throws Throwable {
+            latch.countDown();
+            return 990L;
+          }
+        });
+      }
+    });
+
+    Properties props = AutoBalancerJUnitTest.getBasicConfig();
+
+    assertEquals(3, latch.getCount());
+    AutoBalancer autoR = new AutoBalancer(null, mockAuditor, mockClock, null);
+    autoR.init(props);
+    assertTrue(latch.await(1, TimeUnit.SECONDS));
+  }
+
   static Properties getBasicConfig() {
     Properties props = new Properties();
     // every second schedule



[02/50] [abbrv] incubator-geode git commit: Fixes GEODE-444 by closing client cache before closing server cache.

Posted by ds...@apache.org.
Fixes GEODE-444 by closing client cache before closing server cache.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/26fbf659
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/26fbf659
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/26fbf659

Branch: refs/heads/develop
Commit: 26fbf659c21e5a3584bbbdaa33060af68266a2bc
Parents: 5da17d1
Author: eshu <es...@pivotal.io>
Authored: Mon Oct 19 16:21:07 2015 -0700
Committer: eshu <es...@pivotal.io>
Committed: Mon Oct 19 16:26:18 2015 -0700

----------------------------------------------------------------------
 .../internal/cache/PartitionedRegionSingleHopDUnitTest.java  | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/26fbf659/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java
index 39618bb..278c477 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java
@@ -106,14 +106,14 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
 
   public void tearDown2() throws Exception {
     try {
-
+      /* fixes GEODE-444, really close client cache first by using super.tearDown2();
       // close the clients first
       member0.invoke(PartitionedRegionSingleHopDUnitTest.class, "closeCache");
       member1.invoke(PartitionedRegionSingleHopDUnitTest.class, "closeCache");
       member2.invoke(PartitionedRegionSingleHopDUnitTest.class, "closeCache");
       member3.invoke(PartitionedRegionSingleHopDUnitTest.class, "closeCache");
       closeCache();
-
+      */
       super.tearDown2();
 
       member0 = null;
@@ -755,7 +755,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     pause(5000);
     assertFalse(cms.isRefreshMetadataTestOnly());
   }
-  
+
   public void testServerLocationRemovalThroughPing() {
     Integer port0 = (Integer)member0.invoke(
         PartitionedRegionSingleHopDUnitTest.class, "createServer",
@@ -842,7 +842,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
 //    assertEquals(4/*numBuckets*/, prMetaData.getBucketServerLocationsMap_TEST_ONLY().size());    
   }
-  
+
   public void testMetadataFetchOnlyThroughputAll() {
     Integer port0 = (Integer)member0.invoke(
         PartitionedRegionSingleHopDUnitTest.class, "createServer",


[14/50] [abbrv] incubator-geode git commit: GEODE-429: Remove HdfsStore parser in cache xml

Posted by ds...@apache.org.
GEODE-429: Remove HdfsStore parser in cache xml


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/12318e9c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/12318e9c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/12318e9c

Branch: refs/heads/develop
Commit: 12318e9cf862795e46540fdf72836fd8cbba262d
Parents: 7f25197
Author: Ashvin Agrawal <as...@apache.org>
Authored: Mon Oct 19 14:36:25 2015 -0700
Committer: Ashvin Agrawal <as...@apache.org>
Committed: Wed Oct 21 08:55:22 2015 -0700

----------------------------------------------------------------------
 .../hdfs/internal/HDFSConfigJUnitTest.java      | 524 -------------------
 1 file changed, 524 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/12318e9c/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
deleted file mode 100644
index 26e6c73..0000000
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/hdfs/internal/HDFSConfigJUnitTest.java
+++ /dev/null
@@ -1,524 +0,0 @@
- /*=========================================================================
-   * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
-   * This product is protected by U.S. and international copyright
-   * and intellectual property laws. Pivotal products are covered by
-   * one or more patents listed at http://www.pivotal.io/patents.
-   *=========================================================================
-   */
-
-package com.gemstone.gemfire.cache.hdfs.internal;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.OutputStreamWriter;
-import java.io.PrintWriter;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
-import junit.framework.TestCase;
-
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.CacheXmlException;
-import com.gemstone.gemfire.cache.DiskStoreFactory;
-import com.gemstone.gemfire.cache.EvictionAttributes;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionFactory;
-import com.gemstone.gemfire.cache.RegionShortcut;
-import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueImpl;
-import com.gemstone.gemfire.cache.hdfs.HDFSStore;
-import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer;
-import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HoplogConfig;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.internal.cache.control.HeapMemoryMonitor;
-import com.gemstone.gemfire.test.junit.categories.HoplogTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-import org.apache.hadoop.hbase.regionserver.StoreFile;
-import org.junit.experimental.categories.Category;
-
-/**
- * A test class for testing the configuration option for HDFS 
- * 
- * @author Hemant Bhanawat
- * @author Ashvin Agrawal
- */
-@Category({IntegrationTest.class, HoplogTest.class})
-public class HDFSConfigJUnitTest extends TestCase {
-  private GemFireCacheImpl c;
-
-  public HDFSConfigJUnitTest() {
-    super();
-  }
-
-  @Override
-  public void setUp() {
-    System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");
-    this.c = createCache();
-    AbstractHoplogOrganizer.JUNIT_TEST_RUN = true;
-  }
-
-  @Override
-  public void tearDown() {
-    this.c.close();
-  }
-    
-    public void testHDFSStoreCreation() throws Exception {
-      this.c.close();
-      this.c = createCache();
-      try {
-        HDFSStoreFactory hsf = this.c.createHDFSStoreFactory();
-        HDFSStore store = hsf.create("myHDFSStore");
-        RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
-//        rf1.setHDFSStoreName("myHDFSStore");
-        Region r1 = rf1.create("r1");
-       
-        r1.put("k1", "v1");
-        
-        assertTrue("Mismatch in attributes, actual.batchsize: " + store.getBatchSize() + " and expected batchsize: 32", store.getBatchSize()== 32);
-        assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getBufferPersistent() + " and expected isPersistent: false", store.getBufferPersistent()== false);
-        assertEquals(false, r1.getAttributes().getHDFSWriteOnly());
-        assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getDiskStoreName() + " and expected getDiskStoreName: null", store.getDiskStoreName()== null);
-        assertTrue("Mismatch in attributes, actual.getFileRolloverInterval: " + store.getWriteOnlyFileRolloverInterval() + " and expected getFileRolloverInterval: 3600", store.getWriteOnlyFileRolloverInterval() == 3600);
-        assertTrue("Mismatch in attributes, actual.getMaxFileSize: " + store.getWriteOnlyFileRolloverSize() + " and expected getMaxFileSize: 256MB", store.getWriteOnlyFileRolloverSize() == 256);
-        this.c.close();
-        
-        
-        this.c = createCache();
-        hsf = this.c.createHDFSStoreFactory();
-        hsf.create("myHDFSStore");
-        
-        RegionFactory<Object, Object> rf = this.c.createRegionFactory(RegionShortcut.PARTITION);
-//        rf.setHDFSStoreName("myHDFSStore");
-        r1 = rf.create("r1");
-       
-        r1.put("k1", "v1");
-        assertTrue("Mismatch in attributes, actual.batchsize: " + store.getBatchSize() + " and expected batchsize: 32", store.getBatchSize()== 32);
-        assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getBufferPersistent() + " and expected isPersistent: false", store.getBufferPersistent()== false);
-        assertTrue("Mismatch in attributes, actual.isRandomAccessAllowed: " + r1.getAttributes().getHDFSWriteOnly() + " and expected isRandomAccessAllowed: true", r1.getAttributes().getHDFSWriteOnly()== true);
-        assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getDiskStoreName() + " and expected getDiskStoreName: null", store.getDiskStoreName()== null);
-        assertTrue("Mismatch in attributes, actual.batchInterval: " + store.getBatchInterval() + " and expected batchsize: 60000", store.getBatchInterval()== 60000);
-        assertTrue("Mismatch in attributes, actual.isDiskSynchronous: " + store.getSynchronousDiskWrite() + " and expected isDiskSynchronous: true", store.getSynchronousDiskWrite()== true);
-        
-        this.c.close();
-
-        this.c = createCache();
-        
-        File directory = new File("HDFS" + "_disk_"
-            + System.currentTimeMillis());
-        directory.mkdir();
-        File[] dirs1 = new File[] { directory };
-        DiskStoreFactory dsf = this.c.createDiskStoreFactory();
-        dsf.setDiskDirs(dirs1);
-        dsf.create("mydisk");
-        
-        
-        hsf = this.c.createHDFSStoreFactory();
-        hsf.setBatchSize(50);
-        hsf.setDiskStoreName("mydisk");
-        hsf.setBufferPersistent(true);
-        hsf.setBatchInterval(50);
-        hsf.setSynchronousDiskWrite(false);
-        hsf.setHomeDir("/home/hemant");
-        hsf.setNameNodeURL("mymachine");
-        hsf.setWriteOnlyFileRolloverSize(1);
-        hsf.setWriteOnlyFileRolloverInterval(10);
-        hsf.create("myHDFSStore");
-        
-        
-        rf = this.c.createRegionFactory(RegionShortcut.PARTITION);
-//        rf.setHDFSStoreName("myHDFSStore").setHDFSWriteOnly(true);
-        r1 = rf.create("r1");
-       
-        r1.put("k1", "v1");
-        store = c.findHDFSStore(r1.getAttributes().getHDFSStoreName());
-        
-        assertTrue("Mismatch in attributes, actual.batchsize: " + store.getBatchSize() + " and expected batchsize: 50", store.getBatchSize()== 50);
-        assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getBufferPersistent() + " and expected isPersistent: true", store.getBufferPersistent()== true);
-        assertTrue("Mismatch in attributes, actual.isRandomAccessAllowed: " + r1.getAttributes().getHDFSWriteOnly() + " and expected isRandomAccessAllowed: true", r1.getAttributes().getHDFSWriteOnly()== true);
-        assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getDiskStoreName() + " and expected getDiskStoreName: mydisk", store.getDiskStoreName()== "mydisk");
-        assertTrue("Mismatch in attributes, actual.HDFSStoreName: " + r1.getAttributes().getHDFSStoreName() + " and expected getDiskStoreName: myHDFSStore", r1.getAttributes().getHDFSStoreName()== "myHDFSStore");
-        assertTrue("Mismatch in attributes, actual.getFolderPath: " + ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getHomeDir() + " and expected getDiskStoreName: /home/hemant", ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getHomeDir()== "/home/hemant");
-        assertTrue("Mismatch in attributes, actual.getNamenode: " + ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getNameNodeURL()+ " and expected getDiskStoreName: mymachine", ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getNameNodeURL()== "mymachine");
-        assertTrue("Mismatch in attributes, actual.batchInterval: " + store.getBatchInterval() + " and expected batchsize: 50 ", store.getBatchSize()== 50);
-        assertTrue("Mismatch in attributes, actual.isDiskSynchronous: " + store.getSynchronousDiskWrite() + " and expected isPersistent: false", store.getSynchronousDiskWrite()== false);
-        assertTrue("Mismatch in attributes, actual.getFileRolloverInterval: " + store.getWriteOnlyFileRolloverInterval() + " and expected getFileRolloverInterval: 10", store.getWriteOnlyFileRolloverInterval() == 10);
-        assertTrue("Mismatch in attributes, actual.getMaxFileSize: " + store.getWriteOnlyFileRolloverSize() + " and expected getMaxFileSize: 1MB", store.getWriteOnlyFileRolloverSize() == 1);
-        this.c.close();
-      } finally {
-        this.c.close();
-      }
-    }
-       
-    public void testCacheXMLParsing() throws Exception {
-      try {
-        this.c.close();
-
-        Region r1 = null;
-
-        // use a cache.xml to recover
-        this.c = createCache();
-        ByteArrayOutputStream baos = new ByteArrayOutputStream();
-        PrintWriter pw = new PrintWriter(new OutputStreamWriter(baos), true); 
-        pw.println("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
-//      pw.println("<?xml version=\"1.0\"?>");
-//      pw.println("<!DOCTYPE cache PUBLIC");
-//      pw.println("  \"-//GemStone Systems, Inc.//GemFire Declarative Caching 7.5//EN\"");
-//      pw.println("  \"http://www.gemstone.com/dtd/cache7_5.dtd\">");
-        pw.println("<cache ");
-        pw.println("xmlns=\"http://schema.pivotal.io/gemfire/cache\"");
-        pw.println("xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"");
-        pw.println(" xsi:schemaLocation=\"http://schema.pivotal.io/gemfire/cache http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd\"");
-        pw.println("version=\"9.0\">");
-
-        pw.println("  <hdfs-store name=\"myHDFSStore\" namenode-url=\"mynamenode\"  home-dir=\"mypath\" />");
-        pw.println("  <region name=\"r1\" refid=\"PARTITION_HDFS\">");
-        pw.println("    <region-attributes hdfs-store-name=\"myHDFSStore\"/>");
-        pw.println("  </region>");
-        pw.println("</cache>");
-        pw.close();
-        byte[] bytes = baos.toByteArray();  
-        this.c.loadCacheXml(new ByteArrayInputStream(bytes));
-        
-        r1 = this.c.getRegion("/r1");
-        HDFSStoreImpl store = c.findHDFSStore(r1.getAttributes().getHDFSStoreName());
-        r1.put("k1", "v1");
-        assertTrue("Mismatch in attributes, actual.batchsize: " + store.getBatchSize() + " and expected batchsize: 32", store.getBatchSize()== 32);
-        assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getBufferPersistent() + " and expected isPersistent: false", store.getBufferPersistent()== false);
-        assertEquals(false, r1.getAttributes().getHDFSWriteOnly());
-        assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getDiskStoreName() + " and expected getDiskStoreName: null", store.getDiskStoreName()== null);
-        assertTrue("Mismatch in attributes, actual.getFileRolloverInterval: " + store.getWriteOnlyFileRolloverInterval() + " and expected getFileRolloverInterval: 3600", store.getWriteOnlyFileRolloverInterval() == 3600);
-        assertTrue("Mismatch in attributes, actual.getMaxFileSize: " + store.getWriteOnlyFileRolloverSize() + " and expected getMaxFileSize: 256MB", store.getWriteOnlyFileRolloverSize() == 256);
-        
-        this.c.close();
-        
-        // use a cache.xml to recover
-        this.c = createCache();
-        baos = new ByteArrayOutputStream();
-        pw = new PrintWriter(new OutputStreamWriter(baos), true);
-        pw.println("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
-//      pw.println("<?xml version=\"1.0\"?>");
-//      pw.println("<!DOCTYPE cache PUBLIC");
-//      pw.println("  \"-//GemStone Systems, Inc.//GemFire Declarative Caching 7.5//EN\"");
-//      pw.println("  \"http://www.gemstone.com/dtd/cache7_5.dtd\">");
-        pw.println("<cache ");
-        pw.println("xmlns=\"http://schema.pivotal.io/gemfire/cache\"");
-        pw.println("xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"");
-        pw.println(" xsi:schemaLocation=\"http://schema.pivotal.io/gemfire/cache http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd\"");
-        pw.println("version=\"9.0\">");
-        pw.println("  <hdfs-store name=\"myHDFSStore\" namenode-url=\"mynamenode\"  home-dir=\"mypath\" />");
-        pw.println("  <region name=\"r1\" refid=\"PARTITION_WRITEONLY_HDFS_STORE\">");
-        pw.println("    <region-attributes hdfs-store-name=\"myHDFSStore\"/>");
-        pw.println("  </region>");
-        pw.println("</cache>");
-        pw.close();
-        bytes = baos.toByteArray();  
-        this.c.loadCacheXml(new ByteArrayInputStream(bytes));
-        
-        r1 = this.c.getRegion("/r1");
-        store = c.findHDFSStore(r1.getAttributes().getHDFSStoreName());
-        r1.put("k1", "v1");
-        assertTrue("Mismatch in attributes, actual.batchsize: " + store.getBatchSize() + " and expected batchsize: 32", store.getBatchSize()== 32);
-        assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getBufferPersistent() + " and expected isPersistent: false", store.getBufferPersistent()== false);
-        assertTrue("Mismatch in attributes, actual.isRandomAccessAllowed: " + r1.getAttributes().getHDFSWriteOnly() + " and expected isRandomAccessAllowed: false", r1.getAttributes().getHDFSWriteOnly()== false);
-        assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getDiskStoreName() + " and expected getDiskStoreName: null", store.getDiskStoreName()== null);
-        
-        this.c.close();
-        
-        // use a cache.xml to recover
-        this.c = createCache();
-        baos = new ByteArrayOutputStream();
-        pw = new PrintWriter(new OutputStreamWriter(baos), true);
-        pw.println("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
-//        pw.println("<?xml version=\"1.0\"?>");
-//        pw.println("<!DOCTYPE cache PUBLIC");
-//        pw.println("  \"-//GemStone Systems, Inc.//GemFire Declarative Caching 7.5//EN\"");
-//        pw.println("  \"http://www.gemstone.com/dtd/cache7_5.dtd\">");
-        pw.println("<cache ");
-        pw.println("xmlns=\"http://schema.pivotal.io/gemfire/cache\"");
-        pw.println("xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"");
-        pw.println(" xsi:schemaLocation=\"http://schema.pivotal.io/gemfire/cache http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd\"");
-        pw.println("version=\"9.0\">");
-
-        pw.println("  <disk-store name=\"mydiskstore\"/>");
-        pw.println("  <hdfs-store name=\"myHDFSStore\" namenode-url=\"mynamenode\"  home-dir=\"mypath\" max-write-only-file-size=\"1\" write-only-file-rollover-interval=\"10\" ");
-        pw.println("    batch-size=\"151\" buffer-persistent =\"true\" disk-store=\"mydiskstore\" synchronous-disk-write=\"false\" batch-interval=\"50\"");
-        pw.println("  />");
-        pw.println("  <region name=\"r1\" refid=\"PARTITION_WRITEONLY_HDFS_STORE\">");
-        pw.println("    <region-attributes hdfs-store-name=\"myHDFSStore\" hdfs-write-only=\"false\">");
-        pw.println("    </region-attributes>");
-        pw.println("  </region>");
-        pw.println("</cache>");
-        pw.close();
-        bytes = baos.toByteArray();
-        this.c.loadCacheXml(new ByteArrayInputStream(bytes));
-        
-        r1 = this.c.getRegion("/r1");
-        store = c.findHDFSStore(r1.getAttributes().getHDFSStoreName());
-        r1.put("k1", "v1");
-        assertTrue("Mismatch in attributes, actual.batchsize: " + store.getBatchSize() + " and expected batchsize: 151", store.getBatchSize()== 151);
-        assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getBufferPersistent() + " and expected isPersistent: true", store.getBufferPersistent()== true);
-        assertTrue("Mismatch in attributes, actual.isRandomAccessAllowed: " + r1.getAttributes().getHDFSWriteOnly() + " and expected isRandomAccessAllowed: true", r1.getAttributes().getHDFSWriteOnly()== false);
-        assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getDiskStoreName() + " and expected getDiskStoreName: mydiskstore", store.getDiskStoreName().equals("mydiskstore"));
-        assertTrue("Mismatch in attributes, actual.HDFSStoreName: " + r1.getAttributes().getHDFSStoreName() + " and expected getDiskStoreName: myHDFSStore", r1.getAttributes().getHDFSStoreName().equals("myHDFSStore"));
-        assertTrue("Mismatch in attributes, actual.getFolderPath: " + ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getHomeDir() + " and expected getDiskStoreName: mypath", ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getHomeDir().equals("mypath"));
-        assertTrue("Mismatch in attributes, actual.getNamenode: " + ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getNameNodeURL()+ " and expected getDiskStoreName: mynamenode", ((GemFireCacheImpl)this.c).findHDFSStore("myHDFSStore").getNameNodeURL().equals("mynamenode"));
-        assertTrue("Mismatch in attributes, actual.batchInterval: " + store.getBatchInterval() + " and expected batchsize: 50", store.getBatchInterval()== 50);
-        assertTrue("Mismatch in attributes, actual.isDiskSynchronous: " + store.getSynchronousDiskWrite() + " and expected isDiskSynchronous: false", store.getSynchronousDiskWrite()== false);
-        assertTrue("Mismatch in attributes, actual.getFileRolloverInterval: " + store.getWriteOnlyFileRolloverInterval() + " and expected getFileRolloverInterval: 10", store.getWriteOnlyFileRolloverInterval() == 10);
-        assertTrue("Mismatch in attributes, actual.getMaxFileSize: " + store.getWriteOnlyFileRolloverSize() + " and expected getMaxFileSize: 1MB", store.getWriteOnlyFileRolloverSize() == 1);
-        
-        this.c.close();
-      } finally {
-          this.c.close();
-      }
-    }
-   
-  /**
-   * Validates if hdfs store conf is getting completely and correctly parsed
-   */
-  public void testHdfsStoreConfFullParsing() {
-    String conf = createStoreConf("123");
-    this.c.loadCacheXml(new ByteArrayInputStream(conf.getBytes()));
-    HDFSStoreImpl store = ((GemFireCacheImpl)this.c).findHDFSStore("store");
-    assertEquals("namenode url mismatch.", "url", store.getNameNodeURL());
-    assertEquals("home-dir mismatch.", "dir", store.getHomeDir());
-    assertEquals("hdfs-client-config-file mismatch.", "client", store.getHDFSClientConfigFile());
-    assertEquals("read-cache-size mismatch.", 24.5f, store.getBlockCacheSize());
-    
-    assertFalse("compaction auto-compact mismatch.", store.getMinorCompaction());
-    assertTrue("compaction auto-major-compact mismatch.", store.getMajorCompaction());
-    assertEquals("compaction max-concurrency", 23, store.getMinorCompactionThreads());
-    assertEquals("compaction max-major-concurrency", 27, store.getMajorCompactionThreads());
-    assertEquals("compaction major-interval", 711, store.getPurgeInterval());
-  }
-  
-  /**
-   * Validates that the config defaults are set even with minimum XML configuration 
-   */
-  public void testHdfsStoreConfMinParse() {
-    this.c.loadCacheXml(new ByteArrayInputStream(XML_MIN_CONF.getBytes()));
-    HDFSStoreImpl store = ((GemFireCacheImpl)this.c).findHDFSStore("store");
-    assertEquals("namenode url mismatch.", "url", store.getNameNodeURL());
-    assertEquals("home-dir mismatch.", "gemfire", store.getHomeDir());
-    
-    assertTrue("compaction auto-compact mismatch.", store.getMinorCompaction());
-    assertTrue("compaction auto-major-compact mismatch.", store.getMajorCompaction());
-    assertEquals("compaction max-input-file-size mismatch.", 512, store.getInputFileSizeMax());
-    assertEquals("compaction min-input-file-count.", 4, store.getInputFileCountMin());
-    assertEquals("compaction max-iteration-size.", 10, store.getInputFileCountMax());
-    assertEquals("compaction max-concurrency", 10, store.getMinorCompactionThreads());
-    assertEquals("compaction max-major-concurrency", 2, store.getMajorCompactionThreads());
-    assertEquals("compaction major-interval", 720, store.getMajorCompactionInterval());
-    assertEquals("compaction cleanup-interval", 30, store.getPurgeInterval());
-  }
-  
-  /**
-   * Validates that cache creation fails if a compaction configuration is
-   * provided which is not applicable to the selected compaction strategy
-   */
-  public void testHdfsStoreInvalidCompactionConf() {
-    String conf = createStoreConf("123");
-    try {
-      this.c.loadCacheXml(new ByteArrayInputStream(conf.getBytes()));
-      // expected
-    } catch (CacheXmlException e) {
-      fail();
-    }
-  }
-  
-  /**
-   * Validates that cache creation fails if a compaction configuration is
-   * provided which is not applicable to the selected compaction strategy
-   */
-  public void testInvalidConfigCheck() throws Exception {
-    this.c.close();
-
-    this.c = createCache();
-
-    HDFSStoreFactory hsf; 
-    hsf = this.c.createHDFSStoreFactory();
-    
-    try {
-      hsf.setInputFileSizeMax(-1);
-      fail("validation failed");
-    } catch (IllegalArgumentException e) {
-      //expected
-    }
-    try {
-      hsf.setInputFileCountMin(-1);
-      fail("validation failed");
-    } catch (IllegalArgumentException e) {
-      //expected
-    }
-    try {
-      hsf.setInputFileCountMax(-1);
-      //expected
-      fail("validation failed");
-    } catch (IllegalArgumentException e) {
-    }
-    try {
-      hsf.setMinorCompactionThreads(-1);
-      fail("validation failed");
-    } catch (IllegalArgumentException e) {
-      //expected
-    }
-    try {
-      hsf.setMajorCompactionInterval(-1);
-      fail("validation failed");
-    } catch (IllegalArgumentException e) {
-      //expected
-    }
-    try {
-      hsf.setMajorCompactionThreads(-1);
-      fail("validation failed");
-    } catch (IllegalArgumentException e) {
-      //expected
-    }
-    try {
-      hsf.setPurgeInterval(-1);
-      fail("validation failed");
-    } catch (IllegalArgumentException e) {
-      //expected
-    }
-    try {
-      hsf.setInputFileCountMin(2);
-      hsf.setInputFileCountMax(1);
-      hsf.create("test");
-      fail("validation failed");
-    } catch (IllegalArgumentException e) {
-      //expected
-    }
-    try {
-      hsf.setInputFileCountMax(1);
-      hsf.setInputFileCountMin(2);
-      hsf.create("test");
-      fail("validation failed");
-    } catch (IllegalArgumentException e) {
-      //expected
-    }
-  }
-  
-  /**
-   * Validates cache creation fails if invalid integer size configuration is provided
-   * @throws Exception
-   */
-  public void testHdfsStoreConfInvalidInt() throws Exception {
-    String conf = createStoreConf("NOT_INTEGER");
-    try {
-      this.c.loadCacheXml(new ByteArrayInputStream(conf.getBytes()));
-      fail();
-    } catch (CacheXmlException e) {
-      // expected
-    }
-  }
-  
-
-  private static String XML_MIN_CONF = "<?xml version=\"1.0\" encoding=\"UTF-8\"?> \n"
-  + "<cache \n"
-  + "xmlns=\"http://schema.pivotal.io/gemfire/cache\"\n"
-  + "xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n"
-  + " xsi:schemaLocation=\"http://schema.pivotal.io/gemfire/cache http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd\"\n"
-  + "version=\"9.0\">" +
-          "  <hdfs-store name=\"store\" namenode-url=\"url\" />" +
-          "</cache>";
-   
-  private static String XML_FULL_CONF = "<?xml version=\"1.0\" encoding=\"UTF-8\"?> \n"
-                                        + "<cache \n"
-                                        + "xmlns=\"http://schema.pivotal.io/gemfire/cache\"\n"
-                                        + "xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n"
-                                        + " xsi:schemaLocation=\"http://schema.pivotal.io/gemfire/cache http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd\"\n"
-                                        + "version=\"9.0\">"
-      + "  <hdfs-store name=\"store\" namenode-url=\"url\" "
-      + "              home-dir=\"dir\" "
-      + "              read-cache-size=\"24.5\" "
-      + "              max-write-only-file-size=\"FILE_SIZE_CONF\" "
-      + "              minor-compaction-threads = \"23\""
-      + "              major-compaction-threads = \"27\""
-      + "              major-compaction=\"true\" "
-      + "              minor-compaction=\"false\" "
-      + "              major-compaction-interval=\"781\" "
-      + "              purge-interval=\"711\" hdfs-client-config-file=\"client\" />\n"
-      + "</cache>";
-  // potential replacement targets
-  String FILE_SIZE_CONF_SUBSTRING = "FILE_SIZE_CONF";
-  
-  private String createStoreConf(String fileSize) {
-    String result = XML_FULL_CONF;
-    
-    String replaceWith = (fileSize == null) ? "123" : fileSize;
-    result = result.replaceFirst(FILE_SIZE_CONF_SUBSTRING, replaceWith);
-
-    return result;
-  }
-  
-  public void _testBlockCacheConfiguration() throws Exception {
-    this.c.close();
-    this.c = createCache();
-    try {
-      HDFSStoreFactory hsf = this.c.createHDFSStoreFactory();
-      
-      //Configure a block cache to cache about 20 blocks.
-      long heapSize = HeapMemoryMonitor.getTenuredPoolMaxMemory();
-      int blockSize = StoreFile.DEFAULT_BLOCKSIZE_SMALL;
-      int blockCacheSize = 5 * blockSize;
-      int entrySize = blockSize / 2;
-      
-      
-      float percentage = 100 * (float) blockCacheSize / (float) heapSize;
-      hsf.setBlockCacheSize(percentage);
-      HDFSStoreImpl store = (HDFSStoreImpl) hsf.create("myHDFSStore");
-      RegionFactory rf1 = this.c.createRegionFactory(RegionShortcut.PARTITION);
-      //Create a region that evicts everything
-//      rf1.setHDFSStoreName("myHDFSStore");
-      LocalRegion r1 = (LocalRegion) rf1.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(1)).create("r1");
-     
-      //Populate about many times our block cache size worth of data
-      //We want to try to cache at least 5 blocks worth of index and metadata
-      byte[] value = new byte[entrySize];
-      int numEntries = 10 * blockCacheSize / entrySize;
-      for(int i = 0; i < numEntries; i++) {
-        r1.put(i, value);
-      }
-
-      //Wait for the events to be written to HDFS.
-      Set<String> queueIds = r1.getAsyncEventQueueIds();
-      assertEquals(1, queueIds.size());
-      AsyncEventQueueImpl queue = (AsyncEventQueueImpl) c.getAsyncEventQueue(queueIds.iterator().next());
-      long end = System.nanoTime() + TimeUnit.SECONDS.toNanos(120);
-      while(queue.size() > 0 && System.nanoTime() < end) {
-        Thread.sleep(10);
-      }
-      assertEquals(0, queue.size());
-      
-      
-      Thread.sleep(10000);
-
-      //Do some reads to cache some blocks. Note that this doesn't
-      //end up caching data blocks, just index and bloom filters blocks.
-      for(int i = 0; i < numEntries; i++) {
-        r1.get(i);
-      }
-      
-      long statSize = store.getStats().getBlockCache().getBytesCached();
-      assertTrue("Block cache stats expected to be near " + blockCacheSize + " was " + statSize, 
-          blockCacheSize / 2  < statSize &&
-          statSize <=  2 * blockCacheSize);
-      
-      long currentSize = store.getBlockCache().getCurrentSize();
-      assertTrue("Block cache size expected to be near " + blockCacheSize + " was " + currentSize, 
-          blockCacheSize / 2  < currentSize &&
-          currentSize <= 2 * blockCacheSize);
-      
-    } finally {
-      this.c.close();
-    }
-  }
-
-  protected GemFireCacheImpl createCache() {
-    return (GemFireCacheImpl) new CacheFactory().set("mcast-port", "0").set("log-level", "info")
-    .create();
-  }
-}