You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by ud...@apache.org on 2017/08/23 20:55:05 UTC

[01/25] geode git commit: GEODE-3406: Locator accepts Protobuf requests [Forced Update!]

Repository: geode
Updated Branches:
  refs/heads/feature/GEODE-3503 6c807e826 -> a182a5a95 (forced update)


http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRegionNamesRequestOperationHandlerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRegionNamesRequestOperationHandlerJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRegionNamesRequestOperationHandlerJUnitTest.java
index 29902e6..fd84d41 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRegionNamesRequestOperationHandlerJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRegionNamesRequestOperationHandlerJUnitTest.java
@@ -17,7 +17,6 @@ package org.apache.geode.protocol.protobuf.operations;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
-import java.nio.charset.Charset;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashSet;
@@ -28,7 +27,8 @@ import org.junit.experimental.categories.Category;
 
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.Region;
-import org.apache.geode.protocol.protobuf.BasicTypes;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.protobuf.RegionAPI;
 import org.apache.geode.protocol.protobuf.Result;
 import org.apache.geode.protocol.protobuf.Success;
@@ -62,11 +62,12 @@ public class GetRegionNamesRequestOperationHandlerJUnitTest extends OperationHan
   }
 
   @Test
-  public void processReturnsCacheRegions() throws CodecAlreadyRegisteredForTypeException,
-      UnsupportedEncodingTypeException, CodecNotRegisteredForTypeException {
-    Result<RegionAPI.GetRegionNamesResponse> result =
-        operationHandler.process(serializationServiceStub,
-            ProtobufRequestUtilities.createGetRegionNamesRequest(), cacheStub);
+  public void processReturnsCacheRegions()
+      throws CodecAlreadyRegisteredForTypeException, UnsupportedEncodingTypeException,
+      CodecNotRegisteredForTypeException, InvalidExecutionContextException {
+    Result<RegionAPI.GetRegionNamesResponse> result = operationHandler.process(
+        serializationServiceStub, ProtobufRequestUtilities.createGetRegionNamesRequest(),
+        new ExecutionContext(cacheStub));
     Assert.assertTrue(result instanceof Success);
 
     RegionAPI.GetRegionNamesResponse getRegionsResponse = result.getMessage();
@@ -84,14 +85,15 @@ public class GetRegionNamesRequestOperationHandlerJUnitTest extends OperationHan
   }
 
   @Test
-  public void processReturnsNoCacheRegions() throws CodecAlreadyRegisteredForTypeException,
-      UnsupportedEncodingTypeException, CodecNotRegisteredForTypeException {
+  public void processReturnsNoCacheRegions()
+      throws CodecAlreadyRegisteredForTypeException, UnsupportedEncodingTypeException,
+      CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     Cache emptyCache = mock(Cache.class);;
     when(emptyCache.rootRegions())
         .thenReturn(Collections.unmodifiableSet(new HashSet<Region<String, String>>()));
-    Result<RegionAPI.GetRegionNamesResponse> result =
-        operationHandler.process(serializationServiceStub,
-            ProtobufRequestUtilities.createGetRegionNamesRequest(), emptyCache);
+    Result<RegionAPI.GetRegionNamesResponse> result = operationHandler.process(
+        serializationServiceStub, ProtobufRequestUtilities.createGetRegionNamesRequest(),
+        new ExecutionContext(emptyCache));
     Assert.assertTrue(result instanceof Success);
 
     RegionAPI.GetRegionNamesResponse getRegionsResponse = result.getMessage();

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRegionRequestOperationHandlerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRegionRequestOperationHandlerJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRegionRequestOperationHandlerJUnitTest.java
index 5cfa6b3..6762f66 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRegionRequestOperationHandlerJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRegionRequestOperationHandlerJUnitTest.java
@@ -19,6 +19,8 @@ import org.apache.geode.cache.DataPolicy;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionAttributes;
 import org.apache.geode.cache.Scope;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.MessageUtil;
 import org.apache.geode.protocol.protobuf.BasicTypes;
 import org.apache.geode.protocol.protobuf.ClientProtocol;
@@ -57,8 +59,9 @@ public class GetRegionRequestOperationHandlerJUnitTest extends OperationHandlerJ
   }
 
   @Test
-  public void processReturnsCacheRegions() throws CodecAlreadyRegisteredForTypeException,
-      UnsupportedEncodingTypeException, CodecNotRegisteredForTypeException {
+  public void processReturnsCacheRegions()
+      throws CodecAlreadyRegisteredForTypeException, UnsupportedEncodingTypeException,
+      CodecNotRegisteredForTypeException, InvalidExecutionContextException {
 
     RegionAttributes regionAttributesStub = mock(RegionAttributes.class);
     when(cacheStub.getRegion(TEST_REGION1)).thenReturn(region1Stub);
@@ -72,7 +75,7 @@ public class GetRegionRequestOperationHandlerJUnitTest extends OperationHandlerJ
 
 
     Result<RegionAPI.GetRegionResponse> result = operationHandler.process(serializationServiceStub,
-        MessageUtil.makeGetRegionRequest(TEST_REGION1), cacheStub);
+        MessageUtil.makeGetRegionRequest(TEST_REGION1), new ExecutionContext(cacheStub));
     RegionAPI.GetRegionResponse response = result.getMessage();
     BasicTypes.Region region = response.getRegion();
     Assert.assertEquals(TEST_REGION1, region.getName());
@@ -89,14 +92,15 @@ public class GetRegionRequestOperationHandlerJUnitTest extends OperationHandlerJ
   }
 
   @Test
-  public void processReturnsNoCacheRegions() throws CodecAlreadyRegisteredForTypeException,
-      UnsupportedEncodingTypeException, CodecNotRegisteredForTypeException {
+  public void processReturnsNoCacheRegions()
+      throws CodecAlreadyRegisteredForTypeException, UnsupportedEncodingTypeException,
+      CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     Cache emptyCache = mock(Cache.class);
     when(emptyCache.rootRegions())
         .thenReturn(Collections.unmodifiableSet(new HashSet<Region<String, String>>()));
     String unknownRegionName = "UNKNOWN_REGION";
     Result<RegionAPI.GetRegionResponse> result = operationHandler.process(serializationServiceStub,
-        MessageUtil.makeGetRegionRequest(unknownRegionName), emptyCache);
+        MessageUtil.makeGetRegionRequest(unknownRegionName), new ExecutionContext(emptyCache));
     Assert.assertTrue(result instanceof Failure);
     Assert.assertEquals(ProtocolErrorCode.REGION_NOT_FOUND.codeValue,
         result.getErrorMessage().getError().getErrorCode());

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandlerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandlerJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandlerJUnitTest.java
index 0213bf7..af35f6b 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandlerJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandlerJUnitTest.java
@@ -16,6 +16,8 @@ package org.apache.geode.protocol.protobuf.operations;
 
 import com.google.protobuf.ByteString;
 import org.apache.geode.cache.Region;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.protobuf.BasicTypes;
 import org.apache.geode.protocol.protobuf.Failure;
 import org.apache.geode.protocol.protobuf.ProtocolErrorCode;
@@ -70,10 +72,10 @@ public class GetRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
   @Test
   public void processReturnsTheEncodedValueFromTheRegion()
       throws CodecAlreadyRegisteredForTypeException, UnsupportedEncodingTypeException,
-      CodecNotRegisteredForTypeException {
+      CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     RegionAPI.GetRequest getRequest = generateTestRequest(false, false, false);
-    Result<RegionAPI.GetResponse> result =
-        operationHandler.process(serializationServiceStub, getRequest, cacheStub);
+    Result<RegionAPI.GetResponse> result = operationHandler.process(serializationServiceStub,
+        getRequest, new ExecutionContext(cacheStub));
 
     Assert.assertTrue(result instanceof Success);
     Assert.assertEquals(BasicTypes.EncodedValue.ValueCase.STRINGRESULT,
@@ -85,10 +87,10 @@ public class GetRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
   @Test
   public void processReturnsUnsucessfulResponseForInvalidRegion()
       throws CodecAlreadyRegisteredForTypeException, UnsupportedEncodingTypeException,
-      CodecNotRegisteredForTypeException {
+      CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     RegionAPI.GetRequest getRequest = generateTestRequest(true, false, false);
-    Result<RegionAPI.GetResponse> response =
-        operationHandler.process(serializationServiceStub, getRequest, cacheStub);
+    Result<RegionAPI.GetResponse> response = operationHandler.process(serializationServiceStub,
+        getRequest, new ExecutionContext(cacheStub));
 
     Assert.assertTrue(response instanceof Failure);
     Assert.assertEquals(ProtocolErrorCode.REGION_NOT_FOUND.codeValue,
@@ -98,10 +100,10 @@ public class GetRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
   @Test
   public void processReturnsKeyNotFoundWhenKeyIsNotFound()
       throws CodecAlreadyRegisteredForTypeException, UnsupportedEncodingTypeException,
-      CodecNotRegisteredForTypeException {
+      CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     RegionAPI.GetRequest getRequest = generateTestRequest(false, true, false);
-    Result<RegionAPI.GetResponse> response =
-        operationHandler.process(serializationServiceStub, getRequest, cacheStub);
+    Result<RegionAPI.GetResponse> response = operationHandler.process(serializationServiceStub,
+        getRequest, new ExecutionContext(cacheStub));
 
     Assert.assertTrue(response instanceof Success);
   }
@@ -109,10 +111,10 @@ public class GetRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
   @Test
   public void processReturnsLookupFailureWhenKeyFoundWithNoValue()
       throws CodecAlreadyRegisteredForTypeException, UnsupportedEncodingTypeException,
-      CodecNotRegisteredForTypeException {
+      CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     RegionAPI.GetRequest getRequest = generateTestRequest(false, false, true);
-    Result<RegionAPI.GetResponse> response =
-        operationHandler.process(serializationServiceStub, getRequest, cacheStub);
+    Result<RegionAPI.GetResponse> response = operationHandler.process(serializationServiceStub,
+        getRequest, new ExecutionContext(cacheStub));
 
     Assert.assertTrue(response instanceof Success);
   }
@@ -120,7 +122,8 @@ public class GetRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
   @Test
   public void processReturnsErrorWhenUnableToDecodeRequest()
       throws CodecAlreadyRegisteredForTypeException, UnsupportedEncodingTypeException,
-      CodecNotRegisteredForTypeException, UnsupportedEncodingException {
+      CodecNotRegisteredForTypeException, UnsupportedEncodingException,
+      InvalidExecutionContextException {
     CodecNotRegisteredForTypeException exception =
         new CodecNotRegisteredForTypeException("error finding codec for type");
     when(serializationServiceStub.decode(any(), any())).thenThrow(exception);
@@ -132,8 +135,8 @@ public class GetRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
         .setCustomEncodedValue(customEncodedValueBuilder).build();
     RegionAPI.GetRequest getRequest =
         ProtobufRequestUtilities.createGetRequest(TEST_REGION, encodedKey).getGetRequest();
-    Result<RegionAPI.GetResponse> response =
-        operationHandler.process(serializationServiceStub, getRequest, cacheStub);
+    Result<RegionAPI.GetResponse> response = operationHandler.process(serializationServiceStub,
+        getRequest, new ExecutionContext(cacheStub));
 
     Assert.assertTrue(response instanceof Failure);
     Assert.assertEquals(ProtocolErrorCode.VALUE_ENCODING_ERROR.codeValue,

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/PutAllRequestOperationHandlerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/PutAllRequestOperationHandlerJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/PutAllRequestOperationHandlerJUnitTest.java
index c13d542..d3fff49 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/PutAllRequestOperationHandlerJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/PutAllRequestOperationHandlerJUnitTest.java
@@ -15,6 +15,8 @@
 package org.apache.geode.protocol.protobuf.operations;
 
 import org.apache.geode.cache.Region;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.protobuf.BasicTypes;
 import org.apache.geode.protocol.protobuf.RegionAPI;
 import org.apache.geode.protocol.protobuf.Result;
@@ -66,12 +68,13 @@ public class PutAllRequestOperationHandlerJUnitTest extends OperationHandlerJUni
   }
 
   @Test
-  public void processInsertsMultipleValidEntriesInCache() throws UnsupportedEncodingTypeException,
-      CodecNotRegisteredForTypeException, CodecAlreadyRegisteredForTypeException {
+  public void processInsertsMultipleValidEntriesInCache()
+      throws UnsupportedEncodingTypeException, CodecNotRegisteredForTypeException,
+      CodecAlreadyRegisteredForTypeException, InvalidExecutionContextException {
     PutAllRequestOperationHandler operationHandler = new PutAllRequestOperationHandler();
 
     Result<RegionAPI.PutAllResponse> result = operationHandler.process(serializationServiceStub,
-        generateTestRequest(false, true), cacheStub);
+        generateTestRequest(false, true), new ExecutionContext(cacheStub));
 
     Assert.assertTrue(result instanceof Success);
 
@@ -85,7 +88,7 @@ public class PutAllRequestOperationHandlerJUnitTest extends OperationHandlerJUni
     PutAllRequestOperationHandler operationHandler = new PutAllRequestOperationHandler();
 
     Result<RegionAPI.PutAllResponse> result = operationHandler.process(serializationServiceStub,
-        generateTestRequest(true, true), cacheStub);
+        generateTestRequest(true, true), new ExecutionContext(cacheStub));
 
     assertTrue(result instanceof Success);
     verify(regionMock).put(TEST_KEY1, TEST_VALUE1);
@@ -104,7 +107,7 @@ public class PutAllRequestOperationHandlerJUnitTest extends OperationHandlerJUni
     PutAllRequestOperationHandler operationHandler = new PutAllRequestOperationHandler();
 
     Result<RegionAPI.PutAllResponse> result = operationHandler.process(serializationServiceStub,
-        generateTestRequest(false, false), cacheStub);
+        generateTestRequest(false, false), new ExecutionContext(cacheStub));
 
     assertTrue(result instanceof Success);
 

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/PutRequestOperationHandlerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/PutRequestOperationHandlerJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/PutRequestOperationHandlerJUnitTest.java
index fc697e4..64d9f67 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/PutRequestOperationHandlerJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/PutRequestOperationHandlerJUnitTest.java
@@ -16,6 +16,8 @@ package org.apache.geode.protocol.protobuf.operations;
 
 import com.google.protobuf.ByteString;
 import org.apache.geode.cache.Region;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.protobuf.BasicTypes;
 import org.apache.geode.protocol.protobuf.Failure;
 import org.apache.geode.protocol.protobuf.ProtocolErrorCode;
@@ -61,11 +63,12 @@ public class PutRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
   }
 
   @Test
-  public void test_puttingTheEncodedEntryIntoRegion() throws UnsupportedEncodingTypeException,
-      CodecNotRegisteredForTypeException, CodecAlreadyRegisteredForTypeException {
+  public void test_puttingTheEncodedEntryIntoRegion()
+      throws UnsupportedEncodingTypeException, CodecNotRegisteredForTypeException,
+      CodecAlreadyRegisteredForTypeException, InvalidExecutionContextException {
     PutRequestOperationHandler operationHandler = new PutRequestOperationHandler();
-    Result<RegionAPI.PutResponse> result =
-        operationHandler.process(serializationServiceStub, generateTestRequest(), cacheStub);
+    Result<RegionAPI.PutResponse> result = operationHandler.process(serializationServiceStub,
+        generateTestRequest(), new ExecutionContext(cacheStub));
 
     assertTrue(result instanceof Success);
 
@@ -74,9 +77,9 @@ public class PutRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
   }
 
   @Test
-  public void test_invalidEncodingType()
-      throws CodecAlreadyRegisteredForTypeException, UnsupportedEncodingTypeException,
-      CodecNotRegisteredForTypeException, UnsupportedEncodingException {
+  public void test_invalidEncodingType() throws CodecAlreadyRegisteredForTypeException,
+      UnsupportedEncodingTypeException, CodecNotRegisteredForTypeException,
+      UnsupportedEncodingException, InvalidExecutionContextException {
     String exceptionText = "unsupported type!";
     UnsupportedEncodingTypeException exception =
         new UnsupportedEncodingTypeException(exceptionText);
@@ -95,8 +98,8 @@ public class PutRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
     BasicTypes.Entry testEntry = ProtobufUtilities.createEntry(encodedKey, testValue);
     RegionAPI.PutRequest putRequest =
         ProtobufRequestUtilities.createPutRequest(TEST_REGION, testEntry).getPutRequest();
-    Result<RegionAPI.PutResponse> result =
-        operationHandler.process(serializationServiceStub, putRequest, cacheStub);
+    Result<RegionAPI.PutResponse> result = operationHandler.process(serializationServiceStub,
+        putRequest, new ExecutionContext(cacheStub));
 
     assertTrue(result instanceof Failure);
     assertEquals(ProtocolErrorCode.VALUE_ENCODING_ERROR.codeValue,
@@ -104,12 +107,13 @@ public class PutRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
   }
 
   @Test
-  public void test_RegionNotFound() throws CodecAlreadyRegisteredForTypeException,
-      UnsupportedEncodingTypeException, CodecNotRegisteredForTypeException {
+  public void test_RegionNotFound()
+      throws CodecAlreadyRegisteredForTypeException, UnsupportedEncodingTypeException,
+      CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     when(cacheStub.getRegion(TEST_REGION)).thenReturn(null);
     PutRequestOperationHandler operationHandler = new PutRequestOperationHandler();
-    Result<RegionAPI.PutResponse> result =
-        operationHandler.process(serializationServiceStub, generateTestRequest(), cacheStub);
+    Result<RegionAPI.PutResponse> result = operationHandler.process(serializationServiceStub,
+        generateTestRequest(), new ExecutionContext(cacheStub));
 
     assertTrue(result instanceof Failure);
     assertEquals(ProtocolErrorCode.REGION_NOT_FOUND.codeValue,
@@ -117,13 +121,14 @@ public class PutRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
   }
 
   @Test
-  public void test_RegionThrowsClasscastException() throws CodecAlreadyRegisteredForTypeException,
-      UnsupportedEncodingTypeException, CodecNotRegisteredForTypeException {
+  public void test_RegionThrowsClasscastException()
+      throws CodecAlreadyRegisteredForTypeException, UnsupportedEncodingTypeException,
+      CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     when(regionMock.put(any(), any())).thenThrow(ClassCastException.class);
 
     PutRequestOperationHandler operationHandler = new PutRequestOperationHandler();
-    Result<RegionAPI.PutResponse> result =
-        operationHandler.process(serializationServiceStub, generateTestRequest(), cacheStub);
+    Result<RegionAPI.PutResponse> result = operationHandler.process(serializationServiceStub,
+        generateTestRequest(), new ExecutionContext(cacheStub));
 
     assertTrue(result instanceof Failure);
     assertEquals(ProtocolErrorCode.CONSTRAINT_VIOLATION.codeValue,

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandlerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandlerJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandlerJUnitTest.java
index 3b917b7..47d6231 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandlerJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandlerJUnitTest.java
@@ -16,6 +16,8 @@ package org.apache.geode.protocol.protobuf.operations;
 
 import com.google.protobuf.ByteString;
 import org.apache.geode.cache.Region;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.protobuf.BasicTypes;
 import org.apache.geode.protocol.protobuf.ClientProtocol;
 import org.apache.geode.protocol.protobuf.Failure;
@@ -71,10 +73,10 @@ public class RemoveRequestOperationHandlerJUnitTest extends OperationHandlerJUni
   @Test
   public void processValidKeyRemovesTheEntryAndReturnSuccess()
       throws CodecAlreadyRegisteredForTypeException, UnsupportedEncodingTypeException,
-      CodecNotRegisteredForTypeException {
+      CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     RegionAPI.RemoveRequest removeRequest = generateTestRequest(false, false).getRemoveRequest();
-    Result<RegionAPI.RemoveResponse> result =
-        operationHandler.process(serializationServiceStub, removeRequest, cacheStub);
+    Result<RegionAPI.RemoveResponse> result = operationHandler.process(serializationServiceStub,
+        removeRequest, new ExecutionContext(cacheStub));
 
     assertTrue(result instanceof Success);
     verify(regionStub).remove(TEST_KEY);
@@ -83,10 +85,10 @@ public class RemoveRequestOperationHandlerJUnitTest extends OperationHandlerJUni
   @Test
   public void processReturnsUnsucessfulResponseForInvalidRegion()
       throws CodecAlreadyRegisteredForTypeException, UnsupportedEncodingTypeException,
-      CodecNotRegisteredForTypeException {
+      CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     RegionAPI.RemoveRequest removeRequest = generateTestRequest(true, false).getRemoveRequest();
-    Result<RegionAPI.RemoveResponse> result =
-        operationHandler.process(serializationServiceStub, removeRequest, cacheStub);
+    Result<RegionAPI.RemoveResponse> result = operationHandler.process(serializationServiceStub,
+        removeRequest, new ExecutionContext(cacheStub));
 
     assertTrue(result instanceof Failure);
     assertEquals(ProtocolErrorCode.REGION_NOT_FOUND.codeValue,
@@ -96,10 +98,10 @@ public class RemoveRequestOperationHandlerJUnitTest extends OperationHandlerJUni
   @Test
   public void processReturnsSuccessWhenKeyIsNotFound()
       throws CodecAlreadyRegisteredForTypeException, UnsupportedEncodingTypeException,
-      CodecNotRegisteredForTypeException {
+      CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     RegionAPI.RemoveRequest removeRequest = generateTestRequest(false, true).getRemoveRequest();
-    Result<RegionAPI.RemoveResponse> result =
-        operationHandler.process(serializationServiceStub, removeRequest, cacheStub);
+    Result<RegionAPI.RemoveResponse> result = operationHandler.process(serializationServiceStub,
+        removeRequest, new ExecutionContext(cacheStub));
 
     assertTrue(result instanceof Success);
   }
@@ -107,7 +109,8 @@ public class RemoveRequestOperationHandlerJUnitTest extends OperationHandlerJUni
   @Test
   public void processReturnsErrorWhenUnableToDecodeRequest()
       throws CodecAlreadyRegisteredForTypeException, UnsupportedEncodingTypeException,
-      CodecNotRegisteredForTypeException, UnsupportedEncodingException {
+      CodecNotRegisteredForTypeException, UnsupportedEncodingException,
+      InvalidExecutionContextException {
     CodecNotRegisteredForTypeException exception =
         new CodecNotRegisteredForTypeException("error finding codec for type");
     when(serializationServiceStub.decode(any(), any())).thenThrow(exception);
@@ -120,8 +123,8 @@ public class RemoveRequestOperationHandlerJUnitTest extends OperationHandlerJUni
 
     RegionAPI.RemoveRequest removeRequest =
         ProtobufRequestUtilities.createRemoveRequest(TEST_REGION, encodedKey).getRemoveRequest();;
-    Result<RegionAPI.RemoveResponse> result =
-        operationHandler.process(serializationServiceStub, removeRequest, cacheStub);
+    Result<RegionAPI.RemoveResponse> result = operationHandler.process(serializationServiceStub,
+        removeRequest, new ExecutionContext(cacheStub));
 
     assertTrue(result instanceof Failure);
     assertEquals(ProtocolErrorCode.VALUE_ENCODING_ERROR.codeValue,


[08/25] geode git commit: GEODE-3445: Add gfsh connect option --skip-ssl-validation

Posted by ud...@apache.org.
GEODE-3445: Add gfsh connect option --skip-ssl-validation


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/b77e1c7d
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/b77e1c7d
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/b77e1c7d

Branch: refs/heads/feature/GEODE-3503
Commit: b77e1c7d1e1dc31a539496483c8e9f739155f021
Parents: 31e82d6
Author: Jared Stewart <js...@pivotal.io>
Authored: Mon Aug 21 14:02:36 2017 -0700
Committer: Jared Stewart <js...@pivotal.io>
Committed: Tue Aug 22 12:03:00 2017 -0700

----------------------------------------------------------------------
 ...shConnectToLocatorWithSSLAcceptanceTest.java | 110 +++++++++++++++++++
 .../geode/test/dunit/rules/gfsh/GfshRule.java   |   5 +-
 .../geode/test/dunit/rules/gfsh/GfshScript.java |  12 +-
 .../internal/cli/commands/ConnectCommand.java   |  17 +--
 4 files changed, 131 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/b77e1c7d/geode-assembly/src/test/java/org/apache/geode/management/GfshConnectToLocatorWithSSLAcceptanceTest.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/management/GfshConnectToLocatorWithSSLAcceptanceTest.java b/geode-assembly/src/test/java/org/apache/geode/management/GfshConnectToLocatorWithSSLAcceptanceTest.java
new file mode 100644
index 0000000..75d60a3
--- /dev/null
+++ b/geode-assembly/src/test/java/org/apache/geode/management/GfshConnectToLocatorWithSSLAcceptanceTest.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.management;
+
+import static org.apache.geode.distributed.ConfigurationProperties.SSL_CIPHERS;
+import static org.apache.geode.distributed.ConfigurationProperties.SSL_ENABLED_COMPONENTS;
+import static org.apache.geode.distributed.ConfigurationProperties.SSL_KEYSTORE;
+import static org.apache.geode.distributed.ConfigurationProperties.SSL_KEYSTORE_PASSWORD;
+import static org.apache.geode.distributed.ConfigurationProperties.SSL_KEYSTORE_TYPE;
+import static org.apache.geode.distributed.ConfigurationProperties.SSL_PROTOCOLS;
+import static org.apache.geode.distributed.ConfigurationProperties.SSL_TRUSTSTORE;
+import static org.apache.geode.distributed.ConfigurationProperties.SSL_TRUSTSTORE_PASSWORD;
+import static org.apache.geode.distributed.ConfigurationProperties.SSL_TRUSTSTORE_TYPE;
+import static org.apache.geode.util.test.TestUtil.getResourcePath;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Properties;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TemporaryFolder;
+
+import org.apache.geode.security.SecurableCommunicationChannels;
+import org.apache.geode.test.dunit.rules.gfsh.GfshRule;
+import org.apache.geode.test.dunit.rules.gfsh.GfshScript;
+import org.apache.geode.test.junit.categories.AcceptanceTest;
+
+@Category(AcceptanceTest.class)
+public class GfshConnectToLocatorWithSSLAcceptanceTest {
+  @Rule
+  public GfshRule gfshRule = new GfshRule();
+
+  @Rule
+  public TemporaryFolder temporaryFolder = new TemporaryFolder();
+
+  private File sslPropertiesFile;
+
+  @Before
+  public void setup() throws IOException {
+    File jks = new File(getResourcePath(getClass(), "/ssl/trusted.keystore"));
+    assertThat(jks).exists();
+
+    Properties serverProps = new Properties();
+    serverProps.setProperty(SSL_ENABLED_COMPONENTS, SecurableCommunicationChannels.ALL);
+    serverProps.setProperty(SSL_KEYSTORE, jks.getAbsolutePath());
+    serverProps.setProperty(SSL_KEYSTORE_PASSWORD, "password");
+    serverProps.setProperty(SSL_KEYSTORE_TYPE, "JKS");
+    serverProps.setProperty(SSL_TRUSTSTORE, jks.getAbsolutePath());
+    serverProps.setProperty(SSL_TRUSTSTORE_PASSWORD, "password");
+    serverProps.setProperty(SSL_TRUSTSTORE_TYPE, "JKS");
+    serverProps.setProperty(SSL_CIPHERS, "any");
+    serverProps.setProperty(SSL_PROTOCOLS, "any");
+
+    sslPropertiesFile = temporaryFolder.newFile("ssl.properties");
+    serverProps.store(new FileOutputStream(sslPropertiesFile), null);
+
+    GfshScript startLocator =
+        GfshScript.of("start locator --name=locator --security-properties-file="
+            + sslPropertiesFile.getAbsolutePath());
+    gfshRule.execute(startLocator);
+  }
+
+  @Test
+  public void canConnectOverHttpWithUnsignedSSLCertificateIfSkipSslValidationIsSet()
+      throws Exception {
+    GfshScript connect =
+        GfshScript.of("connect --use-http --skip-ssl-validation --security-properties-file="
+            + sslPropertiesFile.getAbsolutePath());
+    gfshRule.execute(connect);
+  }
+
+  @Test
+  public void cannotConnectOverHttpWithUnsignedSSLCertificateIfSkipSslValidationIsNotSet()
+      throws Exception {
+    GfshScript connect = GfshScript
+        .of("connect --use-http --security-properties-file=" + sslPropertiesFile.getAbsolutePath())
+        .expectFailure();
+    gfshRule.execute(connect);
+  }
+
+  @Test
+  public void cannotConnectOverHttpWithoutSSL() throws Exception {
+    GfshScript connect = GfshScript.of("connect --use-http").expectFailure();
+    gfshRule.execute(connect);
+  }
+
+  @Test
+  public void canConnectOverJmxWithSSL() throws Exception {
+    GfshScript connect = GfshScript.of("connect --use-http=false --security-properties-file="
+        + sslPropertiesFile.getAbsolutePath());
+    gfshRule.execute(connect);
+  }
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/b77e1c7d/geode-assembly/src/test/java/org/apache/geode/test/dunit/rules/gfsh/GfshRule.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/test/dunit/rules/gfsh/GfshRule.java b/geode-assembly/src/test/java/org/apache/geode/test/dunit/rules/gfsh/GfshRule.java
index fa25f14..f77cc77 100644
--- a/geode-assembly/src/test/java/org/apache/geode/test/dunit/rules/gfsh/GfshRule.java
+++ b/geode-assembly/src/test/java/org/apache/geode/test/dunit/rules/gfsh/GfshRule.java
@@ -31,7 +31,6 @@ import java.util.stream.Collectors;
 import org.junit.rules.ExternalResource;
 import org.junit.rules.TemporaryFolder;
 
-import org.apache.geode.internal.lang.SystemUtils;
 import org.apache.geode.management.internal.cli.commands.StatusLocatorRealGfshTest;
 import org.apache.geode.management.internal.cli.util.CommandStringBuilder;
 import org.apache.geode.test.dunit.rules.RequiresGeodeHome;
@@ -144,7 +143,7 @@ public class GfshRule extends ExternalResource {
         new CommandStringBuilder("stop server").addOption("dir", dir).toString();
 
     GfshScript stopServerScript =
-        new GfshScript(stopServerCommand).withName("stop-server-teardown").awaitQuietly();
+        new GfshScript(stopServerCommand).withName("teardown-stop-server").awaitQuietly();
     execute(stopServerScript);
   }
 
@@ -153,7 +152,7 @@ public class GfshRule extends ExternalResource {
         new CommandStringBuilder("stop locator").addOption("dir", dir).toString();
 
     GfshScript stopServerScript =
-        new GfshScript(stopLocatorCommand).withName("stop-locator-teardown").awaitQuietly();
+        new GfshScript(stopLocatorCommand).withName("teardown-stop-locator").awaitQuietly();
     execute(stopServerScript);
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/b77e1c7d/geode-assembly/src/test/java/org/apache/geode/test/dunit/rules/gfsh/GfshScript.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/test/dunit/rules/gfsh/GfshScript.java b/geode-assembly/src/test/java/org/apache/geode/test/dunit/rules/gfsh/GfshScript.java
index 52ef0d3..5b140e0 100644
--- a/geode-assembly/src/test/java/org/apache/geode/test/dunit/rules/gfsh/GfshScript.java
+++ b/geode-assembly/src/test/java/org/apache/geode/test/dunit/rules/gfsh/GfshScript.java
@@ -16,7 +16,6 @@ package org.apache.geode.test.dunit.rules.gfsh;
 
 import static org.assertj.core.api.Assertions.assertThat;
 
-import java.nio.file.Path;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
@@ -25,7 +24,7 @@ import org.apache.geode.management.internal.cli.util.ThreePhraseGenerator;
 
 public class GfshScript {
   private final String[] commands;
-  private String name = new ThreePhraseGenerator().generate('-');
+  private String name;
   private TimeUnit timeoutTimeUnit = TimeUnit.MINUTES;
   private int timeout = 1;
   private boolean awaitQuietly = false;
@@ -34,6 +33,7 @@ public class GfshScript {
 
   public GfshScript(String... commands) {
     this.commands = commands;
+    this.name = defaultName(commands);
   }
 
   /**
@@ -152,4 +152,12 @@ public class GfshScript {
   public String getName() {
     return name;
   }
+
+  private String defaultName(String... commands) {
+    try {
+      return commands[0].substring(0, commands[0].indexOf("-"));
+    } catch (Exception handled) {
+      return new ThreePhraseGenerator().generate('-');
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/b77e1c7d/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConnectCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConnectCommand.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConnectCommand.java
index 274f61c..d0f2e5a 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConnectCommand.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConnectCommand.java
@@ -29,9 +29,11 @@ import java.util.Map;
 import java.util.Objects;
 import java.util.Properties;
 
+import javax.net.ssl.HostnameVerifier;
 import javax.net.ssl.HttpsURLConnection;
 import javax.net.ssl.KeyManagerFactory;
 import javax.net.ssl.SSLContext;
+import javax.net.ssl.SSLSession;
 import javax.net.ssl.TrustManagerFactory;
 
 import org.apache.commons.lang.StringUtils;
@@ -112,7 +114,10 @@ public class ConnectCommand implements GfshCommand {
           help = CliStrings.CONNECT__SECURITY_PROPERTIES__HELP) final File gfSecurityPropertiesFile,
       @CliOption(key = {CliStrings.CONNECT__USE_SSL}, specifiedDefaultValue = "true",
           unspecifiedDefaultValue = "false",
-          help = CliStrings.CONNECT__USE_SSL__HELP) boolean useSsl)
+          help = CliStrings.CONNECT__USE_SSL__HELP) boolean useSsl,
+      @CliOption(key = {"skip-ssl-validation"}, specifiedDefaultValue = "true",
+          unspecifiedDefaultValue = "false",
+          help = "When connecting via HTTP, connects using 1-way SSL validation rather than 2-way SSL validation.") boolean skipSslValidation)
       throws MalformedURLException {
 
     Result result;
@@ -145,12 +150,10 @@ public class ConnectCommand implements GfshCommand {
       gfProperties.setProperty(UserInputProperty.PASSWORD.getKey(), password);
     }
 
-    // TODO: refactor this to be more readable, like
-    /*
-     * if(useHttp) connectOverHttp else if(jmxManagerEndPoint==null) connectToLocator to get the
-     * jmxManagerEndPoint else connectTo jmxManagerEndPoint
-     */
     if (useHttp) {
+      if (skipSslValidation) {
+        HttpsURLConnection.setDefaultHostnameVerifier((String s, SSLSession sslSession) -> true);
+      }
       result = httpConnect(gfProperties, url);
     } else {
       result = jmxConnect(gfProperties, useSsl, jmxManagerEndPoint, locatorEndPoint, false);
@@ -160,8 +163,6 @@ public class ConnectCommand implements GfshCommand {
   }
 
   /**
-   *
-   * @param gfsh
    * @param useSsl if true, and no files/options passed, we would still insist on prompting for ssl
    *        config (considered only when the last three parameters are null)
    * @param gfPropertiesFile gemfire properties file, can be null


[10/25] geode git commit: GEODE-3047 Atomic creation flag is not set if the region is recoverd from the disk.

Posted by ud...@apache.org.
GEODE-3047 Atomic creation flag is not set if the region is recoverd from the disk.

While creating bucket region, to satisfy the redudndancy copies the bucket regions
are created on all vailable nodes. The initialization (setting persistentIDs) of
these buckets are done after creating buckets on all the nodes. This introduced
a race condition for the bucket region which are recovered from the disk to
exchange thier old id with the peer node resulting in ConflictingPersistentData
Exception.

The changes are done so that the regions persistent ids are set as soon as they
are created/initialized.


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/35d3a97e
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/35d3a97e
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/35d3a97e

Branch: refs/heads/feature/GEODE-3503
Commit: 35d3a97edd5c6e8b3f4dd73af51771b0c4728cf6
Parents: e2c3d53
Author: Anil <ag...@pivotal.io>
Authored: Thu Aug 17 16:29:57 2017 -0700
Committer: Anil <ag...@pivotal.io>
Committed: Tue Aug 22 14:19:16 2017 -0700

----------------------------------------------------------------------
 .../cache/BucketPersistenceAdvisor.java         |  7 +++
 .../cache/BucketPersistenceAdvisorTest.java     | 56 ++++++++++++++++++++
 2 files changed, 63 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/35d3a97e/geode-core/src/main/java/org/apache/geode/internal/cache/BucketPersistenceAdvisor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketPersistenceAdvisor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketPersistenceAdvisor.java
index 367bb75..423fb64 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketPersistenceAdvisor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketPersistenceAdvisor.java
@@ -463,7 +463,14 @@ public class BucketPersistenceAdvisor extends PersistenceAdvisorImpl {
     super.setOnline(false, true, newId);
   }
 
+  public boolean isAtomicCreation() {
+    return this.atomicCreation;
+  }
+
   public void setAtomicCreation(boolean atomicCreation) {
+    if (getPersistentID() != null) {
+      return;
+    }
     synchronized (lock) {
       this.atomicCreation = atomicCreation;
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/35d3a97e/geode-core/src/test/java/org/apache/geode/internal/cache/BucketPersistenceAdvisorTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/BucketPersistenceAdvisorTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/BucketPersistenceAdvisorTest.java
new file mode 100644
index 0000000..d97f3e4
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/BucketPersistenceAdvisorTest.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import org.apache.geode.distributed.DistributedLockService;
+import org.apache.geode.internal.cache.PartitionedRegion.BucketLock;
+import org.apache.geode.internal.cache.persistence.PersistentMemberID;
+import org.apache.geode.internal.cache.persistence.PersistentMemberManager;
+import org.apache.geode.internal.cache.persistence.PersistentMemberView;
+import org.apache.geode.test.junit.categories.UnitTest;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.*;
+
+@Category(UnitTest.class)
+public class BucketPersistenceAdvisorTest {
+
+  @Test
+  public void shouldBeMockable() throws Exception {
+    BucketPersistenceAdvisor mockBucketAdvisor = mock(BucketPersistenceAdvisor.class);
+    when(mockBucketAdvisor.isRecovering()).thenReturn(true);
+    assertTrue(mockBucketAdvisor.isRecovering());
+  }
+
+  @Test
+  public void atomicCreationShouldNotBeSetForPersistentRegion() throws Exception {
+    PersistentMemberID mockPersistentID = mock(PersistentMemberID.class);
+    PersistentMemberView mockStorage = mock(PersistentMemberView.class);
+    when(mockStorage.getMyPersistentID()).thenReturn(mockPersistentID);
+
+    BucketPersistenceAdvisor bpa = new BucketPersistenceAdvisor(
+        mock(CacheDistributionAdvisor.class), mock(DistributedLockService.class), mockStorage,
+        "/region", mock(DiskRegionStats.class), mock(PersistentMemberManager.class),
+        mock(BucketLock.class), mock(ProxyBucketRegion.class));
+    bpa.setAtomicCreation(true);
+    assertFalse(bpa.isAtomicCreation());
+  }
+
+}


[24/25] geode git commit: GEODE-3507 PartitionedRegionRedundancyTracker now does not allow actualRedundantCopies stat to be negative

Posted by ud...@apache.org.
GEODE-3507 PartitionedRegionRedundancyTracker now does not allow actualRedundantCopies stat to be negative


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/039edfce
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/039edfce
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/039edfce

Branch: refs/heads/feature/GEODE-3503
Commit: 039edfce4bd5f58f967b98fad0ee72c4a0adfba4
Parents: a229933
Author: Lynn Gallinat <lg...@pivotal.io>
Authored: Wed Aug 23 11:49:54 2017 -0700
Committer: Lynn Gallinat <lg...@pivotal.io>
Committed: Wed Aug 23 11:52:37 2017 -0700

----------------------------------------------------------------------
 .../internal/cache/PartitionedRegionRedundancyTracker.java     | 2 +-
 .../internal/cache/PartitionedRegionRedundancyTrackerTest.java | 6 ++++++
 2 files changed, 7 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/039edfce/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionRedundancyTracker.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionRedundancyTracker.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionRedundancyTracker.java
index 38ef61b..1ff8bc9 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionRedundancyTracker.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionRedundancyTracker.java
@@ -134,6 +134,6 @@ class PartitionedRegionRedundancyTracker {
   }
 
   void setActualRedundancy(int actualRedundancy) {
-    stats.setActualRedundantCopies(actualRedundancy);
+    stats.setActualRedundantCopies(Math.max(actualRedundancy, 0));
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/039edfce/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionRedundancyTrackerTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionRedundancyTrackerTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionRedundancyTrackerTest.java
index 0917835..0eba655 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionRedundancyTrackerTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionRedundancyTrackerTest.java
@@ -117,4 +117,10 @@ public class PartitionedRegionRedundancyTrackerTest {
     redundancyTracker.decrementNoCopiesBucketCount();
     assertEquals(1, redundancyTracker.getLowestBucketCopies());
   }
+
+  @Test
+  public void willNotSetActualRedundantCopiesStatBelowZero() {
+    redundancyTracker.setActualRedundancy(-1);
+    assertEquals(0, stats.getActualRedundantCopies());
+  }
 }


[18/25] geode git commit: GEODE-3395 Variable-ize product version and name in user guide - Managing

Posted by ud...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/network_partitioning/chapter_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/network_partitioning/chapter_overview.html.md.erb b/geode-docs/managing/network_partitioning/chapter_overview.html.md.erb
index 98d3c0b..756fa40 100644
--- a/geode-docs/managing/network_partitioning/chapter_overview.html.md.erb
+++ b/geode-docs/managing/network_partitioning/chapter_overview.html.md.erb
@@ -19,29 +19,29 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Apache Geode architecture and management features help detect and resolve network partition problems.
+<%=vars.product_name_long%> architecture and management features help detect and resolve network partition problems.
 
--   **[How Network Partitioning Management Works](../../managing/network_partitioning/how_network_partitioning_management_works.html)**
+-   **[How Network Partitioning Management Works](how_network_partitioning_management_works.html)**
 
-    Geode handles network outages by using a weighting system to determine whether the remaining available members have a sufficient quorum to continue as a distributed system.
+    <%=vars.product_name%> handles network outages by using a weighting system to determine whether the remaining available members have a sufficient quorum to continue as a distributed system.
 
--   **[Failure Detection and Membership Views](../../managing/network_partitioning/failure_detection.html)**
+-   **[Failure Detection and Membership Views](failure_detection.html)**
 
-    Geode uses failure detection to remove unresponsive members from membership views.
+    <%=vars.product_name%> uses failure detection to remove unresponsive members from membership views.
 
--   **[Membership Coordinators, Lead Members and Member Weighting](../../managing/network_partitioning/membership_coordinators_lead_members_and_weighting.html)**
+-   **[Membership Coordinators, Lead Members and Member Weighting](membership_coordinators_lead_members_and_weighting.html)**
 
     Network partition detection uses a designated membership coordinator and a weighting system that accounts for a lead member to determine whether a network partition has occurred.
 
--   **[Network Partitioning Scenarios](../../managing/network_partitioning/network_partitioning_scenarios.html)**
+-   **[Network Partitioning Scenarios](network_partitioning_scenarios.html)**
 
     This topic describes network partitioning scenarios and what happens to the partitioned sides of the distributed system.
 
--   **[Configure Apache Geode to Handle Network Partitioning](../../managing/network_partitioning/handling_network_partitioning.html)**
+-   **[Configure <%=vars.product_name_long%> to Handle Network Partitioning](handling_network_partitioning.html)**
 
     This section lists the configuration steps for network partition detection.
 
--   **[Preventing Network Partitions](../../managing/network_partitioning/preventing_network_partitions.html)**
+-   **[Preventing Network Partitions](preventing_network_partitions.html)**
 
     This section provides a short list of things you can do to prevent network partition from occurring.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/network_partitioning/failure_detection.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/network_partitioning/failure_detection.html.md.erb b/geode-docs/managing/network_partitioning/failure_detection.html.md.erb
index 223b3d9..b79b51d 100644
--- a/geode-docs/managing/network_partitioning/failure_detection.html.md.erb
+++ b/geode-docs/managing/network_partitioning/failure_detection.html.md.erb
@@ -19,7 +19,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Geode uses failure detection to remove unresponsive members from membership views.
+<%=vars.product_name%> uses failure detection to remove unresponsive members from membership views.
 
 ## <a id="concept_CFD13177F78C456095622151D6EE10EB__section_1AAE6C92FED249EFBA476D8A480B8E51" class="no-quick-link"></a>Failure Detection
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/network_partitioning/handling_network_partitioning.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/network_partitioning/handling_network_partitioning.html.md.erb b/geode-docs/managing/network_partitioning/handling_network_partitioning.html.md.erb
index a227597..20eac01 100644
--- a/geode-docs/managing/network_partitioning/handling_network_partitioning.html.md.erb
+++ b/geode-docs/managing/network_partitioning/handling_network_partitioning.html.md.erb
@@ -1,6 +1,4 @@
----
-title:  Configure Apache Geode to Handle Network Partitioning
----
+<% set_title("Configure", product_name_long, "to Handle Network Partitioning") %>
 
 <!--
 Licensed to the Apache Software Foundation (ASF) under one or more
@@ -60,6 +58,6 @@ The system uses a combination of member coordinators and system members, designa
     -   If the system has clients connecting to it, the clients' `cache.xml` pool `read-timeout` should be set to at least three times the `member-timeout` setting in the server's `gemfire.properties` file. The default pool `read-timeout` setting is 10000 milliseconds.
     -   You can adjust the default weights of members by specifying the system property `gemfire.member-weight` upon startup. For example, if you have some VMs that host a needed service, you could assign them a higher weight upon startup.
 
--   By default, members that are forced out of the distributed system by a network partition event will automatically restart and attempt to reconnect. Data members will attempt to reinitialize the cache. See [Handling Forced Cache Disconnection Using Autoreconnect](../autoreconnect/member-reconnect.html).
+-   By default, members that are forced out of the distributed system by a network partition event will automatically restart and attempt to reconnect. Data members will attempt to reinitialize the cache. See [Handling Forced Cache Disconnection Using Autoreconnect](../member-reconnect.html).
 
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/network_partitioning/how_network_partitioning_management_works.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/network_partitioning/how_network_partitioning_management_works.html.md.erb b/geode-docs/managing/network_partitioning/how_network_partitioning_management_works.html.md.erb
index 93a14ac..75c34b7 100644
--- a/geode-docs/managing/network_partitioning/how_network_partitioning_management_works.html.md.erb
+++ b/geode-docs/managing/network_partitioning/how_network_partitioning_management_works.html.md.erb
@@ -19,14 +19,14 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Geode handles network outages by using a weighting system to determine whether the remaining available members have a sufficient quorum to continue as a distributed system.
+<%=vars.product_name%> handles network outages by using a weighting system to determine whether the remaining available members have a sufficient quorum to continue as a distributed system.
 
 <a id="how_network_partitioning_management_works__section_548146BB8C24412CB7B43E6640272882"></a>
 Individual members are each assigned a weight, and the quorum is determined by comparing the total weight of currently responsive members to the previous total weight of responsive members.
 
 Your distributed system can split into separate running systems when members lose the ability to see each other. The typical cause of this problem is a failure in the network. When a partitioned system is detected, only one side of the system keeps running and the other side automatically shuts down.
 
-The network partitioning detection feature is enabled by default with a true value for the `enable-network-partition-detection` property. See [Configure Apache Geode to Handle Network Partitioning](handling_network_partitioning.html#handling_network_partitioning) for details. Quorum weight calculations are always performed and logged regardless of this configuration setting.
+The network partitioning detection feature is enabled by default with a true value for the `enable-network-partition-detection` property. See [Configure <%=vars.product_name_long%> to Handle Network Partitioning](handling_network_partitioning.html#handling_network_partitioning) for details. Quorum weight calculations are always performed and logged regardless of this configuration setting.
 
 The overall process for detecting a network partition is as follows:
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/network_partitioning/membership_coordinators_lead_members_and_weighting.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/network_partitioning/membership_coordinators_lead_members_and_weighting.html.md.erb b/geode-docs/managing/network_partitioning/membership_coordinators_lead_members_and_weighting.html.md.erb
index cb21f54..ccb0354 100644
--- a/geode-docs/managing/network_partitioning/membership_coordinators_lead_members_and_weighting.html.md.erb
+++ b/geode-docs/managing/network_partitioning/membership_coordinators_lead_members_and_weighting.html.md.erb
@@ -23,7 +23,7 @@ Network partition detection uses a designated membership coordinator and a weigh
 
 ## <a id="concept_23C2606D59754106AFBFE17515DF4330__section_7C67F1D30C1645CC8489E481873691D9" class="no-quick-link"></a>Membership Coordinators and Lead Members
 
-The membership coordinator is a member that manages entry and exit of other members of the distributed system. With network partition detection enabled, the coordinator can be any Geode member but locators are preferred. In a locator-based system, if all locators are in the reconnecting state, the system continues to function, but new members are not able to join until a locator has successfully reconnected. After a locator has reconnected, the reconnected locator will take over the role of coordinator.
+The membership coordinator is a member that manages entry and exit of other members of the distributed system. With network partition detection enabled, the coordinator can be any <%=vars.product_name%> member but locators are preferred. In a locator-based system, if all locators are in the reconnecting state, the system continues to function, but new members are not able to join until a locator has successfully reconnected. After a locator has reconnected, the reconnected locator will take over the role of coordinator.
 
 When a coordinator is shutting down, it sends out a view that removes itself from the list and the other members must determine who the new coordinator is.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/network_partitioning/preventing_network_partitions.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/network_partitioning/preventing_network_partitions.html.md.erb b/geode-docs/managing/network_partitioning/preventing_network_partitions.html.md.erb
index 6a983f5..27eeed2 100644
--- a/geode-docs/managing/network_partitioning/preventing_network_partitions.html.md.erb
+++ b/geode-docs/managing/network_partitioning/preventing_network_partitions.html.md.erb
@@ -25,4 +25,4 @@ To avoid a network partition:
 
 -   Use NIC teaming for redundant connectivity. See [http://www.cisco.com/en/US/docs/solutions/Enterprise/Data_Center/vmware/VMware.html#wp696452](http://www.cisco.com/en/US/docs/solutions/Enterprise/Data_Center/vmware/VMware.html#wp696452) for more information.
 -   It is best if all servers share a common network switch. Having multiple network switches increases the possibility of a network partition occurring. If multiple switches must be used, redundant routing paths should be available, if possible. The weight of members sharing a switch in a multi-switch configuration will determine which partition survives if there is an inter-switch failure.
--   In terms of Geode configuration, consider the weighting of members. For example, you could assign important processes a higher weight.
+-   In terms of <%=vars.product_name%> configuration, consider the weighting of members. For example, you could assign important processes a higher weight.

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/region_compression.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/region_compression.html.md.erb b/geode-docs/managing/region_compression.html.md.erb
new file mode 100644
index 0000000..de831c8
--- /dev/null
+++ b/geode-docs/managing/region_compression.html.md.erb
@@ -0,0 +1,226 @@
+---
+title: Region Compression
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<a id="topic_r43_wgc_gl"></a>
+
+
+This section describes region compression, its benefits and usage.
+
+One way to reduce memory consumption by <%=vars.product_name%> is to enable compression in your regions. <%=vars.product_name%> allows you to compress in-memory region values using pluggable compressors (compression codecs). <%=vars.product_name%> includes the [Snappy](http://google.github.io/snappy/) compressor as the built-in compression codec; however, you can implement and specify a different compressor for each compressed region.
+
+## What Gets Compressed
+
+When you enable compression in a region, all values stored in the region are compressed while in memory. Keys and indexes are not compressed. New values are compressed when put into the in-memory cache and all values are decompressed when being read from the cache. Values are not compressed when persisted to disk. Values are decompressed before being sent over the wire to other peer members or clients.
+
+When compression is enabled, each value in the region is compressed, and each region entry is compressed as a single unit. It is not possible to compress individual fields of an entry.
+
+You can have a mix of compressed and non-compressed regions in the same cache.
+
+-   **[Guidelines on Using Compression](#concept_a2c_rhc_gl)**
+
+    This topic describes factors to consider when deciding on whether to use compression.
+
+-   **[How to Enable Compression in a Region](#topic_inm_whc_gl)**
+
+    This topic describes how to enable compression on your region.
+
+-   **[Working with Compressors](#topic_hqf_syj_g4)**
+
+    When using region compression, you can use the default Snappy compressor included with <%=vars.product_name%> or you can specify your own compressor.
+
+-   **[Comparing Performance of Compressed and Non-Compressed Regions](#topic_omw_j3c_gl)**
+
+    The comparative performance of compressed regions versus non-compressed regions can vary depending on how the region is being used and whether the region is hosted in a memory-bound JVM.
+
+## <a id="concept_a2c_rhc_gl" class="no-quick-link"></a>Guidelines on Using Compression
+
+This topic describes factors to consider when deciding on whether to use compression.
+
+Review the following guidelines when deciding on whether or not to enable compression in your region:
+
+-   **Use compression when JVM memory usage is too high.** Compression allows you to store more region data in-memory and to reduce the number of expensive garbage collection cycles that prevent JVMs from running out of memory when memory usage is high.
+
+    To determine if JVM memory usage is high, examine the the following statistics:
+
+    -   vmStats&gt;freeMemory
+    -   vmStats-&gt;maxMemory
+    -   ConcurrentMarkSweep-&gt;collectionTime
+
+    If the amount of free memory regularly drops below 20% - 25% or the duration of the garbage collection cycles is generally on the high side, then the regions hosted on that JVM are good candidates for having compression enabled.
+
+-   **Consider the types and lengths of the fields in the region's entries.** Since compression is performed on each entry separately (and not on the region as a whole), consider the potential for duplicate data across a single entry. Duplicate bytes are compressed more easily. Also, since region entries are first serialized into a byte area before being compressed, how well the data might compress is determined by the number and length of duplicate bytes across the entire entry and not just a single field. Finally, the larger the entry the more likely compression will achieve good results as the potential for duplicate bytes, and a series of duplicate bytes, increases.
+-   **Consider the type of data you wish to compress.** The type of data stored has a significant impact on how well the data may compress. String data will generally compress better than numeric data simply because string bytes are far more likely to repeat; however, that may not always be the case. For example, a region entry that holds a couple of short, unique strings may not provide as much memory savings when compressed as another region entry that holds a large number of integer values. In short, when evaluating the potential gains of compressing a region, consider the likelihood of having duplicate bytes, and more importantly the length of a series of duplicate bytes, for a single, serialized region entry. In addition, data that has already been compressed, such as JPEG format files, can actually cause more memory to be used.
+-   **Compress if you are storing large text values.** Compression is beneficial if you are storing large text values (such as JSON or XML) or blobs in <%=vars.product_name%> that would benefit from compression.
+-   **Consider whether fields being queried against are indexed.** You can query against compressed regions; however, if the fields you are querying against have not been indexed, then the fields must be decompressed before they can be used for comparison. In short, you may incur some query performance costs when querying against non-indexed fields.
+-   **Objects stored in the compression region must be serializable.** Compression only operates on byte arrays, therefore objects being stored in a compressed region must be serializable and deserializable. The objects can either implement the Serializable interface or use one of the other <%=vars.product_name%> serialization mechanisms (such as PdxSerializable). Implementers should always be aware that when compression is enabled the instance of an object put into a region will not be the same instance when taken out. Therefore, transient attributes will lose their value when the containing object is put into and then taken out of a region.
+
+-   **Compressed regions will enable cloning by default.** Setting a compressor and then disabling cloning results in an exception. The options are incompatible because the process of compressing/serializing and then decompressing/deserializing will result in a different instance of the object being created and that may be interpreted as cloning the object.
+
+<a id="topic_inm_whc_gl"></a>
+
+## <a id="topic_inm_whc_gl" class="no-quick-link"></a>How to Enable Compression in a Region
+
+This topic describes how to enable compression on your region.
+
+To enable compression on your region, set the following region attribute in your cache.xml:
+
+``` pre
+<?xml version="1.0" encoding= "UTF-8"?>
+<cache xmlns="http://geode.apache.org/schema/cache"
+    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+    xsi:schemaLocation="http://geode.apache.org/schema/cache http://geode.apache.org/schema/cache/cache-1.0.xsd"
+    version="1.0” lock-lease="120"  lock-timeout= "60" search-timeout= "300"  is-server= "true"  copy-on-read= "false" > 
+   <region name="compressedRegion" > 
+      <region-attributes data-policy="replicate" ... /> 
+         <compressor>
+             <class-name>org.apache.geode.compression.SnappyCompressor</class-name>
+         </compressor>
+        ...
+      </region-attributes>
+   </region> 
+</cache>
+```
+
+In the Compressor element, specify the class-name for your compressor implementation. This example specifies the Snappy compressor, which is bundled with <%=vars.product_name%> . You can also specify a custom compressor. See [Working with Compressors](#topic_hqf_syj_g4) for an example.
+
+Compression can be enabled during region creation using gfsh or programmatically as well.
+
+Using gfsh:
+
+``` pre
+gfsh>create-region --name=”CompressedRegion” --compressor=”org.apache.geode.compression.SnappyCompressor”;
+```
+
+API:
+
+``` pre
+regionFactory.setCompressor(new SnappyCompressor());
+```
+
+or
+
+``` pre
+regionFactory.setCompressor(SnappyCompressor.getDefaultInstance());
+```
+
+## How to Check Whether Compression is Enabled
+
+You can also check whether a region has compression enabled by querying which codec is being used. A null codec indicates that no compression is enabled for the region.
+
+``` pre
+Region myRegion = cache.getRegion("myRegion");
+Compressor compressor = myRegion.getAttributes().getCompressor();
+```
+
+## <a id="topic_hqf_syj_g4" class="no-quick-link"></a>Working with Compressors
+
+When using region compression, you can use the default Snappy compressor included with <%=vars.product_name%> or you can specify your own compressor.
+
+The compression API consists of a single interface that compression providers must implement. The default compressor (SnappyCompressor) is the single compression implementation that comes bundled with the product. Note that since the Compressor is stateless, there only needs to be a single instance in any JVM; however, multiple instances may be used without issue. The single, default instance of the SnappyCompressor may be retrieved with the `SnappyCompressor.getDefaultInstance()` static method.
+
+**Note:**
+The Snappy codec included with <%=vars.product_name%> cannot be used with Solaris deployments. Snappy is only supported on Linux, Windows, and macOS deployments of <%=vars.product_name%>.
+
+This example provides a custom Compressor implementation:
+
+``` pre
+package com.mybiz.myproduct.compression;
+
+import org.apache.geode.compression.Compressor;
+
+public class LZWCompressor implements Compressor {
+  private final LZWCodec lzwCodec = new LZWCodec(); 
+  
+  @Override
+  public byte[] compress(byte[] input) {
+         return lzwCodec.compress(input);
+  }
+
+  @Override
+  public byte[] decompress(byte[] input) {
+         return lzwCodec.decompress(input);
+  }
+}
+```
+
+To use the new custom compressor on a region:
+
+1.  Make sure that the new compressor package is available in the classpath of all JVMs that will host the region.
+2.  Configure the custom compressor for the region using any of the following mechanisms:
+
+    Using gfsh:
+
+    ``` pre
+    gfsh>create-region --name=”CompressedRegion” \
+    --compressor=”com.mybiz.myproduct.compression.LZWCompressor”
+    ```
+
+    Using API:
+
+    For example:
+
+    ``` pre
+    regionFactory.setCompressor(new LZWCompressor());
+    ```
+
+    cache.xml:
+
+    ``` pre
+    <region-attributes>
+     <Compressor>
+         <class-name>com.mybiz.myproduct.compression.LZWCompressor</class-name>
+      </Compressor>
+    </region-attributes>
+    ```
+
+## Changing the Compressor for an Already Compressed Region
+
+You typically enable compression on a region at the time of region creation. You cannot modify the Compressor or disable compression for the region while the region is online.
+
+However, if you need to change the compressor or disable compression, you can do so by performing the following steps:
+
+1.  Shut down the members hosting the region you wish to modify.
+2.  Modify the cache.xml file for the member either specifying a new compressor or removing the compressor attribute from the region.
+3.  Restart the member.
+
+## <a id="topic_omw_j3c_gl" class="no-quick-link"></a>Comparing Performance of Compressed and Non-Compressed Regions
+
+The comparative performance of compressed regions versus non-compressed regions can vary depending on how the region is being used and whether the region is hosted in a memory-bound JVM.
+
+When considering the cost of enabling compression, you should consider the relative cost of reading and writing compressed data as well as the cost of compression as a percentage of the total time spent managing entries in a region. As a general rule, enabling compression on a region will add 30% - 60% more overhead for region create and update operations than for region get operations. Because of this, enabling compression will create more overhead on regions that are write heavy than on regions that are read heavy.
+
+However, when attempting to evaluate the performance cost of enabling compression you should also consider the cost of compression relative to the overall cost of managing entries in a region. A region may be tuned in such a way that it is highly optimized for read and/or write performance. For example, a replicated region that does not save to disk will have much better read and write performance than a partitioned region that does save to disk. Enabling compression on a region that has been optimized for read and write performance will provide more noticeable results than using compression on regions that have not been optimized this way. More concretely, performance may degrade by several hundred percent on a read/write optimized region whereas it may only degrade by 5 to 10 percent on a non-optimized region.
+
+A final note on performance relates to the cost when enabling compression on regions in a memory bound JVM. Enabling compression generally assumes that the enclosing JVM is memory bound and therefore spends a lot of time for garbage collection. In that case performance may improve by as much as several hundred percent as the JVM will be running far fewer garbage collection cycles and spending less time when running a cycle.
+
+## Monitoring Compression Performance
+
+The following statistics provide monitoring for cache compression:
+
+-   `compressTime`
+-   `decompressTime`
+-   `compressions`
+-   `decompressions`
+-   `preCompressedBytes`
+-   `postCompressedBytes`
+
+See [Cache Performance (CachePerfStats)](../reference/statistics_list.html#section_DEF8D3644D3246AB8F06FE09A37DC5C8) for statistic descriptions.
+
+

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/region_compression/region_compression.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/region_compression/region_compression.html.md.erb b/geode-docs/managing/region_compression/region_compression.html.md.erb
deleted file mode 100644
index ac351dd..0000000
--- a/geode-docs/managing/region_compression/region_compression.html.md.erb
+++ /dev/null
@@ -1,226 +0,0 @@
----
-title: Region Compression
----
-
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-<a id="topic_r43_wgc_gl"></a>
-
-
-This section describes region compression, its benefits and usage.
-
-One way to reduce memory consumption by Geode is to enable compression in your regions. Geode allows you to compress in-memory region values using pluggable compressors (compression codecs). Geode includes the [Snappy](http://google.github.io/snappy/) compressor as the built-in compression codec; however, you can implement and specify a different compressor for each compressed region.
-
-## What Gets Compressed
-
-When you enable compression in a region, all values stored in the region are compressed while in memory. Keys and indexes are not compressed. New values are compressed when put into the in-memory cache and all values are decompressed when being read from the cache. Values are not compressed when persisted to disk. Values are decompressed before being sent over the wire to other peer members or clients.
-
-When compression is enabled, each value in the region is compressed, and each region entry is compressed as a single unit. It is not possible to compress individual fields of an entry.
-
-You can have a mix of compressed and non-compressed regions in the same cache.
-
--   **[Guidelines on Using Compression](#concept_a2c_rhc_gl)**
-
-    This topic describes factors to consider when deciding on whether to use compression.
-
--   **[How to Enable Compression in a Region](#topic_inm_whc_gl)**
-
-    This topic describes how to enable compression on your region.
-
--   **[Working with Compressors](#topic_hqf_syj_g4)**
-
-    When using region compression, you can use the default Snappy compressor included with Geode or you can specify your own compressor.
-
--   **[Comparing Performance of Compressed and Non-Compressed Regions](#topic_omw_j3c_gl)**
-
-    The comparative performance of compressed regions versus non-compressed regions can vary depending on how the region is being used and whether the region is hosted in a memory-bound JVM.
-
-## <a id="concept_a2c_rhc_gl" class="no-quick-link"></a>Guidelines on Using Compression
-
-This topic describes factors to consider when deciding on whether to use compression.
-
-Review the following guidelines when deciding on whether or not to enable compression in your region:
-
--   **Use compression when JVM memory usage is too high.** Compression allows you to store more region data in-memory and to reduce the number of expensive garbage collection cycles that prevent JVMs from running out of memory when memory usage is high.
-
-    To determine if JVM memory usage is high, examine the the following statistics:
-
-    -   vmStats&gt;freeMemory
-    -   vmStats-&gt;maxMemory
-    -   ConcurrentMarkSweep-&gt;collectionTime
-
-    If the amount of free memory regularly drops below 20% - 25% or the duration of the garbage collection cycles is generally on the high side, then the regions hosted on that JVM are good candidates for having compression enabled.
-
--   **Consider the types and lengths of the fields in the region's entries.** Since compression is performed on each entry separately (and not on the region as a whole), consider the potential for duplicate data across a single entry. Duplicate bytes are compressed more easily. Also, since region entries are first serialized into a byte area before being compressed, how well the data might compress is determined by the number and length of duplicate bytes across the entire entry and not just a single field. Finally, the larger the entry the more likely compression will achieve good results as the potential for duplicate bytes, and a series of duplicate bytes, increases.
--   **Consider the type of data you wish to compress.** The type of data stored has a significant impact on how well the data may compress. String data will generally compress better than numeric data simply because string bytes are far more likely to repeat; however, that may not always be the case. For example, a region entry that holds a couple of short, unique strings may not provide as much memory savings when compressed as another region entry that holds a large number of integer values. In short, when evaluating the potential gains of compressing a region, consider the likelihood of having duplicate bytes, and more importantly the length of a series of duplicate bytes, for a single, serialized region entry. In addition, data that has already been compressed, such as JPEG format files, can actually cause more memory to be used.
--   **Compress if you are storing large text values.** Compression is beneficial if you are storing large text values (such as JSON or XML) or blobs in Geode that would benefit from compression.
--   **Consider whether fields being queried against are indexed.** You can query against compressed regions; however, if the fields you are querying against have not been indexed, then the fields must be decompressed before they can be used for comparison. In short, you may incur some query performance costs when querying against non-indexed fields.
--   **Objects stored in the compression region must be serializable.** Compression only operates on byte arrays, therefore objects being stored in a compressed region must be serializable and deserializable. The objects can either implement the Serializable interface or use one of the other Geode serialization mechanisms (such as PdxSerializable). Implementers should always be aware that when compression is enabled the instance of an object put into a region will not be the same instance when taken out. Therefore, transient attributes will lose their value when the containing object is put into and then taken out of a region.
-
--   **Compressed regions will enable cloning by default.** Setting a compressor and then disabling cloning results in an exception. The options are incompatible because the process of compressing/serializing and then decompressing/deserializing will result in a different instance of the object being created and that may be interpreted as cloning the object.
-
-<a id="topic_inm_whc_gl"></a>
-
-## <a id="topic_inm_whc_gl" class="no-quick-link"></a>How to Enable Compression in a Region
-
-This topic describes how to enable compression on your region.
-
-To enable compression on your region, set the following region attribute in your cache.xml:
-
-``` pre
-<?xml version="1.0" encoding= "UTF-8"?>
-<cache xmlns="http://geode.apache.org/schema/cache"
-    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-    xsi:schemaLocation="http://geode.apache.org/schema/cache http://geode.apache.org/schema/cache/cache-1.0.xsd"
-    version="1.0” lock-lease="120"  lock-timeout= "60" search-timeout= "300"  is-server= "true"  copy-on-read= "false" > 
-   <region name="compressedRegion" > 
-      <region-attributes data-policy="replicate" ... /> 
-         <compressor>
-             <class-name>org.apache.geode.compression.SnappyCompressor</class-name>
-         </compressor>
-        ...
-      </region-attributes>
-   </region> 
-</cache>
-```
-
-In the Compressor element, specify the class-name for your compressor implementation. This example specifies the Snappy compressor, which is bundled with Geode . You can also specify a custom compressor. See [Working with Compressors](#topic_hqf_syj_g4) for an example.
-
-Compression can be enabled during region creation using gfsh or programmatically as well.
-
-Using gfsh:
-
-``` pre
-gfsh>create-region --name=”CompressedRegion” --compressor=”org.apache.geode.compression.SnappyCompressor”;
-```
-
-API:
-
-``` pre
-regionFactory.setCompressor(new SnappyCompressor());
-```
-
-or
-
-``` pre
-regionFactory.setCompressor(SnappyCompressor.getDefaultInstance());
-```
-
-## How to Check Whether Compression is Enabled
-
-You can also check whether a region has compression enabled by querying which codec is being used. A null codec indicates that no compression is enabled for the region.
-
-``` pre
-Region myRegion = cache.getRegion("myRegion");
-Compressor compressor = myRegion.getAttributes().getCompressor();
-```
-
-## <a id="topic_hqf_syj_g4" class="no-quick-link"></a>Working with Compressors
-
-When using region compression, you can use the default Snappy compressor included with Geode or you can specify your own compressor.
-
-The compression API consists of a single interface that compression providers must implement. The default compressor (SnappyCompressor) is the single compression implementation that comes bundled with the product. Note that since the Compressor is stateless, there only needs to be a single instance in any JVM; however, multiple instances may be used without issue. The single, default instance of the SnappyCompressor may be retrieved with the `SnappyCompressor.getDefaultInstance()` static method.
-
-**Note:**
-The Snappy codec included with Geode cannot be used with Solaris deployments. Snappy is only supported on Linux, Windows, and macOS deployments of Geode.
-
-This example provides a custom Compressor implementation:
-
-``` pre
-package com.mybiz.myproduct.compression;
-
-import org.apache.geode.compression.Compressor;
-
-public class LZWCompressor implements Compressor {
-  private final LZWCodec lzwCodec = new LZWCodec(); 
-  
-  @Override
-  public byte[] compress(byte[] input) {
-         return lzwCodec.compress(input);
-  }
-
-  @Override
-  public byte[] decompress(byte[] input) {
-         return lzwCodec.decompress(input);
-  }
-}
-```
-
-To use the new custom compressor on a region:
-
-1.  Make sure that the new compressor package is available in the classpath of all JVMs that will host the region.
-2.  Configure the custom compressor for the region using any of the following mechanisms:
-
-    Using gfsh:
-
-    ``` pre
-    gfsh>create-region --name=”CompressedRegion” \
-    --compressor=”com.mybiz.myproduct.compression.LZWCompressor”
-    ```
-
-    Using API:
-
-    For example:
-
-    ``` pre
-    regionFactory.setCompressor(new LZWCompressor());
-    ```
-
-    cache.xml:
-
-    ``` pre
-    <region-attributes>
-     <Compressor>
-         <class-name>com.mybiz.myproduct.compression.LZWCompressor</class-name>
-      </Compressor>
-    </region-attributes>
-    ```
-
-## Changing the Compressor for an Already Compressed Region
-
-You typically enable compression on a region at the time of region creation. You cannot modify the Compressor or disable compression for the region while the region is online.
-
-However, if you need to change the compressor or disable compression, you can do so by performing the following steps:
-
-1.  Shut down the members hosting the region you wish to modify.
-2.  Modify the cache.xml file for the member either specifying a new compressor or removing the compressor attribute from the region.
-3.  Restart the member.
-
-## <a id="topic_omw_j3c_gl" class="no-quick-link"></a>Comparing Performance of Compressed and Non-Compressed Regions
-
-The comparative performance of compressed regions versus non-compressed regions can vary depending on how the region is being used and whether the region is hosted in a memory-bound JVM.
-
-When considering the cost of enabling compression, you should consider the relative cost of reading and writing compressed data as well as the cost of compression as a percentage of the total time spent managing entries in a region. As a general rule, enabling compression on a region will add 30% - 60% more overhead for region create and update operations than for region get operations. Because of this, enabling compression will create more overhead on regions that are write heavy than on regions that are read heavy.
-
-However, when attempting to evaluate the performance cost of enabling compression you should also consider the cost of compression relative to the overall cost of managing entries in a region. A region may be tuned in such a way that it is highly optimized for read and/or write performance. For example, a replicated region that does not save to disk will have much better read and write performance than a partitioned region that does save to disk. Enabling compression on a region that has been optimized for read and write performance will provide more noticeable results than using compression on regions that have not been optimized this way. More concretely, performance may degrade by several hundred percent on a read/write optimized region whereas it may only degrade by 5 to 10 percent on a non-optimized region.
-
-A final note on performance relates to the cost when enabling compression on regions in a memory bound JVM. Enabling compression generally assumes that the enclosing JVM is memory bound and therefore spends a lot of time for garbage collection. In that case performance may improve by as much as several hundred percent as the JVM will be running far fewer garbage collection cycles and spending less time when running a cycle.
-
-## Monitoring Compression Performance
-
-The following statistics provide monitoring for cache compression:
-
--   `compressTime`
--   `decompressTime`
--   `compressions`
--   `decompressions`
--   `preCompressedBytes`
--   `postCompressedBytes`
-
-See [Cache Performance (CachePerfStats)](../../reference/statistics_list.html#section_DEF8D3644D3246AB8F06FE09A37DC5C8) for statistic descriptions.
-
-

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/security/authentication_examples.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/security/authentication_examples.html.md.erb b/geode-docs/managing/security/authentication_examples.html.md.erb
index ce21bac..ce73399 100644
--- a/geode-docs/managing/security/authentication_examples.html.md.erb
+++ b/geode-docs/managing/security/authentication_examples.html.md.erb
@@ -21,8 +21,8 @@ limitations under the License.
 
 This example demonstrates the basics of an implementation of the
 `SecurityManager.authenticate` method.
-The remainder of the example may be found within the Apache Geode
-source code within the
+The remainder of the example may be found in the <%=vars.product_name_long%>
+source code in the
 `geode-core/src/main/java/org/apache/geode/examples/security` directory.
 
 Of course, the security implementation of every installation is unique,

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/security/authentication_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/security/authentication_overview.html.md.erb b/geode-docs/managing/security/authentication_overview.html.md.erb
index de8088e..0a6e0c5 100644
--- a/geode-docs/managing/security/authentication_overview.html.md.erb
+++ b/geode-docs/managing/security/authentication_overview.html.md.erb
@@ -22,20 +22,20 @@ limitations under the License.
 Authentication verifies the identities of components within the distributed
 system such as peers, clients, and those connecting to a JMX manager.
 
--   **[Implementing Authentication](../../managing/security/implementing_authentication.html)**
+-   **[Implementing Authentication](implementing_authentication.html)**
 
     All components of the distributed system authenticate the same way,
     through a custom-written method.
 
--   **[Encrypting Passwords for Use in cache.xml](../../managing/security/encrypting_passwords.html)**
+-   **[Encrypting Passwords for Use in cache.xml](encrypting_passwords.html)**
 
-    Apache Geode provides a gfsh utility to generate encrypted passwords.
+    <%=vars.product_name_long%> provides a gfsh utility to generate encrypted passwords.
 
--   **[Encrypt Credentials with Diffie-Hellman](../../managing/security/encrypting_with_diffie_helman.html)**
+-   **[Encrypt Credentials with Diffie-Hellman](encrypting_with_diffie_hellman.html)**
 
     For secure transmission of sensitive information, like passwords, you can encrypt credentials using the Diffie-Hellman key exchange algorithm.
 
--   **[Authentication Example](../../managing/security/authentication_examples.html)**
+-   **[Authentication Example](authentication_examples.html)**
 
     The example demonstrates the basics of an implementation of the
 `SecurityManager.authenticate` method.

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/security/authorization_example.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/security/authorization_example.html.md.erb b/geode-docs/managing/security/authorization_example.html.md.erb
index 36182c7..8089220 100644
--- a/geode-docs/managing/security/authorization_example.html.md.erb
+++ b/geode-docs/managing/security/authorization_example.html.md.erb
@@ -21,7 +21,7 @@ limitations under the License.
 
 This example demonstrates the basics of an implementation of the
 `SecurityManager.authorize` method.
-The remainder of the example may be found within the Apache Geode
+The remainder of the example may be found within the <%=vars.product_name_long%>
 source code within the
 `geode-core/src/main/java/org/apache/geode/examples/security` directory.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/security/authorization_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/security/authorization_overview.html.md.erb b/geode-docs/managing/security/authorization_overview.html.md.erb
index beca4f2..3745ef8 100644
--- a/geode-docs/managing/security/authorization_overview.html.md.erb
+++ b/geode-docs/managing/security/authorization_overview.html.md.erb
@@ -23,11 +23,11 @@ Distributed system and cache operations can be restricted, intercepted and
 modifed, or completely blocked based on configured access rights set for
 the various distributed system entities. 
 
--   **[Implementing Authorization](../../managing/security/implementing_authorization.html)**
+-   **[Implementing Authorization](implementing_authorization.html)**
 
     To use authorization for client/server systems, your client connections must be authenticated by their servers.
 
--   **[Authorization Example](../../managing/security/authorization_example.html)**
+-   **[Authorization Example](authorization_example.html)**
 
     This topic discusses the authorization example provided in the product under `templates/security` using `XmlAuthorization.java`, `XmlErrorHandler.java`, and `authz6_0.dtd`.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/security/chapter_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/security/chapter_overview.html.md.erb b/geode-docs/managing/security/chapter_overview.html.md.erb
index 98990f6..58f6fe3 100644
--- a/geode-docs/managing/security/chapter_overview.html.md.erb
+++ b/geode-docs/managing/security/chapter_overview.html.md.erb
@@ -21,27 +21,27 @@ limitations under the License.
 
 The security framework permits authentication of connecting components and authorization of operations for all communicating components of the distributed system.
 
--   **[Security Implementation Introduction and Overview](../../managing/security/implementing_security.html)**
+-   **[Security Implementation Introduction and Overview](implementing_security.html)**
 
     Encryption, SSL secure communication, authentication, and authorization help to secure the distributed system.
 
--   **[Security Detail Considerations](../../managing/security/security_audit_overview.html)**
+-   **[Security Detail Considerations](security_audit_overview.html)**
 
     This section gathers discrete details in one convenient location to better help you assess and configure the security of your environment.
 
--   **[Enable Security with Property Definitions](../../managing/security/enable_security.html)**
+-   **[Enable Security with Property Definitions](enable_security.html)**
 
--   **[Authentication](../../managing/security/authentication_overview.html)**
+-   **[Authentication](authentication_overview.html)**
 
     A distributed system using authentication bars malicious peers or clients, and deters inadvertent access to its cache.
 
--   **[Authorization](../../managing/security/authorization_overview.html)**
+-   **[Authorization](authorization_overview.html)**
 
     Client operations on a cache server can be restricted or completely blocked based on the roles and permissions assigned to the credentials submitted by the client.
 
--   **[Post Processing of Region Data](../../managing/security/post_processing.html)**
+-   **[Post Processing of Region Data](post_processing.html)**
 
--   **[SSL](../../managing/security/ssl_overview.html)**
+-   **[SSL](ssl_overview.html)**
 
     SSL protects your data in transit between applications.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/security/encrypting_passwords.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/security/encrypting_passwords.html.md.erb b/geode-docs/managing/security/encrypting_passwords.html.md.erb
index 975727e..c0e36ad 100644
--- a/geode-docs/managing/security/encrypting_passwords.html.md.erb
+++ b/geode-docs/managing/security/encrypting_passwords.html.md.erb
@@ -21,7 +21,7 @@ limitations under the License.
 <a id="topic_730CC61BA84F421494956E2B98BDE2A1"></a>
 
 
-Apache Geode provides a gfsh utility to generate encrypted passwords.
+<%=vars.product_name_long%> provides a gfsh utility to generate encrypted passwords.
 
 You may need to specify an encrypted password in `cache.xml` when configuring JNDI connections to external JDBC data sources. See [Configuring Database Connections Using JNDI](../../developing/transactions/configuring_db_connections_using_JNDI.html#topic_A5E3A67C808D48C08E1F0DC167C5C494) for configuration examples.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/security/encrypting_with_diffie_hellman.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/security/encrypting_with_diffie_hellman.html.md.erb b/geode-docs/managing/security/encrypting_with_diffie_hellman.html.md.erb
new file mode 100644
index 0000000..2dd91cb
--- /dev/null
+++ b/geode-docs/managing/security/encrypting_with_diffie_hellman.html.md.erb
@@ -0,0 +1,66 @@
+---
+title:  Encrypt Credentials with Diffie-Hellman
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+For secure transmission of sensitive information, like passwords, you can encrypt credentials using the Diffie-Hellman key exchange algorithm.
+
+This encryption applies only to client/server authentication - not peer-to-peer authentication.
+
+You need to specify the name of a valid symmetric key cipher supported by the JDK. Valid key names, like DES, DESede, AES, and Blowfish, enable the Diffie-Hellman algorithm with the specified cipher to encrypt the credentials. For valid JDK names, see [http://download.oracle.com/javase/1.5.0/docs/guide/security/CryptoSpec.html#AppA](http://download.oracle.com/javase/1.5.0/docs/guide/security/CryptoSpec.html#AppA).
+
+Before you begin, you need to understand how to use your security algorithm.
+
+## <a id="using_diffie_helman__section_45A9502BDF8E42E1970CEFB132F7424D" class="no-quick-link"></a>Enable Server Authentication of Client with Diffie-Hellman
+
+Set this in property in the client’s `gemfire.properties` (or `gfsecurity.properties` file if you are creating a special restricted access file for security configuration):
+
+-   `security-client-dhalgo`. Name of a valid symmetric key cipher supported by the JDK, possibly followed by a key size specification.
+
+This causes the server to authenticate the client using the Diffie-Hellman algorithm.
+
+## <a id="using_diffie_helman__section_D07F68BE8D3140E99244895F4AF2CC80" class="no-quick-link"></a>Enable Client Authentication of Server
+
+This requires server authentication of client with Diffie-Hellman to be enabled. To have your client authenticate its servers, in addition to being authenticated:
+
+1.  In server `gemfire.properties` (or `gfsecurity.properties` file if you are creating a special restricted access file for security configuration), set:
+    1.  `security-server-kspath`. Path of the PKCS\#12 keystore containing the private key for the server
+    2.  `security-server-ksalias`. Alias name for the private key in the keystore.
+    3.  `security-server-kspasswd`. Keystore and private key password, which should match.
+
+2.  In client `gemfire.properties` (or `gfsecurity.properties` file if you are creating a special restricted access file for security configuration), set:
+    1.  `security-client-kspasswd`. Password for the public key file store on the client
+    2.  `security-client-kspath`. Path to the client public key truststore, the JKS keystore of public keys for all servers the client can use. This keystore should not be password-protected
+
+## <a id="using_diffie_helman__section_5FB4437072AC4B4E93210BEA60B67A27" class="no-quick-link"></a>Set the Key Size for AES and Blowfish Encryption Keys
+
+For algorithms like AES, especially if large key sizes are used, you may need Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files from Sun or equivalent for your JDK. This enables encryption of client credentials in combination with challenge-response from server to client to prevent replay and other types of attacks. It also enables challenge-response from client to server to avoid server-side replay attacks.
+
+For the AES and Blowfish algorithms, you can specify the key size for the `security-client-dhalgo` property by adding a colon and the size after the algorithm specification, like this:
+
+``` pre
+security-client-dhalgo=AES:192
+```
+
+-   For AES, valid key size settings are:
+    -   AES:128
+    -   AES:192
+    -   AES:256
+-   For Blowfish, set the key size between 128 and 448 bits, inclusive.
+

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/security/encrypting_with_diffie_helman.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/security/encrypting_with_diffie_helman.html.md.erb b/geode-docs/managing/security/encrypting_with_diffie_helman.html.md.erb
deleted file mode 100644
index 2dd91cb..0000000
--- a/geode-docs/managing/security/encrypting_with_diffie_helman.html.md.erb
+++ /dev/null
@@ -1,66 +0,0 @@
----
-title:  Encrypt Credentials with Diffie-Hellman
----
-
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-For secure transmission of sensitive information, like passwords, you can encrypt credentials using the Diffie-Hellman key exchange algorithm.
-
-This encryption applies only to client/server authentication - not peer-to-peer authentication.
-
-You need to specify the name of a valid symmetric key cipher supported by the JDK. Valid key names, like DES, DESede, AES, and Blowfish, enable the Diffie-Hellman algorithm with the specified cipher to encrypt the credentials. For valid JDK names, see [http://download.oracle.com/javase/1.5.0/docs/guide/security/CryptoSpec.html#AppA](http://download.oracle.com/javase/1.5.0/docs/guide/security/CryptoSpec.html#AppA).
-
-Before you begin, you need to understand how to use your security algorithm.
-
-## <a id="using_diffie_helman__section_45A9502BDF8E42E1970CEFB132F7424D" class="no-quick-link"></a>Enable Server Authentication of Client with Diffie-Hellman
-
-Set this in property in the client’s `gemfire.properties` (or `gfsecurity.properties` file if you are creating a special restricted access file for security configuration):
-
--   `security-client-dhalgo`. Name of a valid symmetric key cipher supported by the JDK, possibly followed by a key size specification.
-
-This causes the server to authenticate the client using the Diffie-Hellman algorithm.
-
-## <a id="using_diffie_helman__section_D07F68BE8D3140E99244895F4AF2CC80" class="no-quick-link"></a>Enable Client Authentication of Server
-
-This requires server authentication of client with Diffie-Hellman to be enabled. To have your client authenticate its servers, in addition to being authenticated:
-
-1.  In server `gemfire.properties` (or `gfsecurity.properties` file if you are creating a special restricted access file for security configuration), set:
-    1.  `security-server-kspath`. Path of the PKCS\#12 keystore containing the private key for the server
-    2.  `security-server-ksalias`. Alias name for the private key in the keystore.
-    3.  `security-server-kspasswd`. Keystore and private key password, which should match.
-
-2.  In client `gemfire.properties` (or `gfsecurity.properties` file if you are creating a special restricted access file for security configuration), set:
-    1.  `security-client-kspasswd`. Password for the public key file store on the client
-    2.  `security-client-kspath`. Path to the client public key truststore, the JKS keystore of public keys for all servers the client can use. This keystore should not be password-protected
-
-## <a id="using_diffie_helman__section_5FB4437072AC4B4E93210BEA60B67A27" class="no-quick-link"></a>Set the Key Size for AES and Blowfish Encryption Keys
-
-For algorithms like AES, especially if large key sizes are used, you may need Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files from Sun or equivalent for your JDK. This enables encryption of client credentials in combination with challenge-response from server to client to prevent replay and other types of attacks. It also enables challenge-response from client to server to avoid server-side replay attacks.
-
-For the AES and Blowfish algorithms, you can specify the key size for the `security-client-dhalgo` property by adding a colon and the size after the algorithm specification, like this:
-
-``` pre
-security-client-dhalgo=AES:192
-```
-
--   For AES, valid key size settings are:
-    -   AES:128
-    -   AES:192
-    -   AES:256
--   For Blowfish, set the key size between 128 and 448 bits, inclusive.
-

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/security/implementing_ssl.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/security/implementing_ssl.html.md.erb b/geode-docs/managing/security/implementing_ssl.html.md.erb
index 5cf2517..978d517 100644
--- a/geode-docs/managing/security/implementing_ssl.html.md.erb
+++ b/geode-docs/managing/security/implementing_ssl.html.md.erb
@@ -20,8 +20,8 @@ limitations under the License.
 -->
 
 You can configure SSL for authentication between members and to protect your data during
-distribution. You can use SSL alone or in conjunction with the other Geode security options.
-Geode SSL connections use the Java Secure Sockets Extension (JSSE) package.
+distribution. You can use SSL alone or in conjunction with the other <%=vars.product_name%> security options.
+<%=vars.product_name%> SSL connections use the Java Secure Sockets Extension (JSSE) package.
 
 ## <a id="ssl_configurable_components" class="no-quick-link"></a>SSL-Configurable Components
 
@@ -62,7 +62,7 @@ with a locator must also have SSL enabled.
 
 ## <a id="ssl_configuration_properties" class="no-quick-link"></a>SSL Configuration Properties
 
-You can use Geode configuration properties to enable or disable SSL, to identify SSL ciphers and
+You can use <%=vars.product_name%> configuration properties to enable or disable SSL, to identify SSL ciphers and
 protocols, and to provide the location and credentials for key and trust stores.
 
 <dt>**ssl-enabled-components**</dt>
@@ -186,7 +186,7 @@ The following table lists the components you can configure to use SSL.
 | server    | Communication between clients and servers                             |
 | all       | All of the above                                                      |
 
-The following table lists the properties you can use to configure SSL on your Geode system.
+The following table lists the properties you can use to configure SSL on your <%=vars.product_name%> system.
 
 <span class="tablecap">Table 2. SSL Configuration Properties</span>
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/security/properties_file.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/security/properties_file.html.md.erb b/geode-docs/managing/security/properties_file.html.md.erb
index e9ba1d6..2888564 100644
--- a/geode-docs/managing/security/properties_file.html.md.erb
+++ b/geode-docs/managing/security/properties_file.html.md.erb
@@ -22,7 +22,7 @@ limitations under the License.
 
 Any security-related (properties that begin with `security-*`) configuration properties that are normally configured in `gemfire.properties` can be moved to a separate `gfsecurity.properties` file. Placing these configuration settings in a separate file allows you to restrict access to security configuration data. This way, you can still allow read or write access for your `gemfire.properties` file.
 
-Upon startup, Geode processes will look for the `gfsecurity.properties` file in the following locations in order:
+Upon startup, <%=vars.product_name%> processes will look for the `gfsecurity.properties` file in the following locations in order:
 
 -   current working directory
 -   user's home directory

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/security/security-audit.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/security/security-audit.html.md.erb b/geode-docs/managing/security/security-audit.html.md.erb
index 0a6c410..c5589da 100644
--- a/geode-docs/managing/security/security-audit.html.md.erb
+++ b/geode-docs/managing/security/security-audit.html.md.erb
@@ -21,7 +21,7 @@ limitations under the License.
 <a id="topic_686158E9AFBD47518BE1B4BEB232C190"></a>
 
 
-Geode processes use either UDP or TCP/IP ports to communicate with other processes or clients.
+<%=vars.product_name%> processes use either UDP or TCP/IP ports to communicate with other processes or clients.
 
 For example:
 
@@ -30,9 +30,9 @@ For example:
 -   JMX clients (such as `gfsh` and JConsole) can connect to JMX Managers and other manageable members on the pre-defined RMI port 1099. You can configure a different port if necessary.
 -   Each gateway receiver usually has a port range where it listens for incoming communication.
 
-See [Firewalls and Ports](../../configuring/running/firewalls_ports.html#concept_5ED182BDBFFA4FAB89E3B81366EBC58E) for the complete list of ports used by Geode, their default values, and how to configure them if you do not want to use the default value.
+See [Firewalls and Ports](../../configuring/running/firewalls_ports.html#concept_5ED182BDBFFA4FAB89E3B81366EBC58E) for the complete list of ports used by <%=vars.product_name%>, their default values, and how to configure them if you do not want to use the default value.
 
-Geode does not have any external interfaces or services that need to be enabled or opened.
+<%=vars.product_name%> does not have any external interfaces or services that need to be enabled or opened.
 
 ## <a id="topic_263072624B8D4CDBAD18B82E07AA44B6" class="no-quick-link"></a>Resources That Must Be Protected
 
@@ -49,7 +49,7 @@ The default location of the `gemfire.properties` and `cache.xml` configuration f
 
 By default, the log files are located in the working directory used when you started the corresponding processes.
 
-For Geode members (locators and cache servers), you can also specify a custom working directory location when you start each process. See [Logging](../logging/logging.html#concept_30DB86B12B454E168B80BB5A71268865) for more details.
+For <%=vars.product_name%> members (locators and cache servers), you can also specify a custom working directory location when you start each process. See [Logging](../logging/logging.html#concept_30DB86B12B454E168B80BB5A71268865) for more details.
 
 The log files are as follows:
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/security/security_audit_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/security/security_audit_overview.html.md.erb b/geode-docs/managing/security/security_audit_overview.html.md.erb
index 7f1c374..faa20d7 100644
--- a/geode-docs/managing/security/security_audit_overview.html.md.erb
+++ b/geode-docs/managing/security/security_audit_overview.html.md.erb
@@ -25,11 +25,11 @@ This section gathers discrete details in one convenient location to better help
 
 -   **[External Interfaces, Ports, and Services](security-audit.html)**
 
-    Geode processes use either UDP or TCP/IP ports to communicate with other processes or clients.
+    <%=vars.product_name%> processes use either UDP or TCP/IP ports to communicate with other processes or clients.
 
 -   **[Resources That Must Be Protected](security-audit.html#topic_263072624B8D4CDBAD18B82E07AA44B6)**
 
-    Certain Geode configuration files should be readable and writeable *only* by the dedicated user who runs servers.
+    Certain <%=vars.product_name%> configuration files should be readable and writeable *only* by the dedicated user who runs servers.
 
 -   **[Log File Locations](security-audit.html#topic_5B6DF783A14241399DC25C6EE8D0048A)**
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/security/ssl_example.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/security/ssl_example.html.md.erb b/geode-docs/managing/security/ssl_example.html.md.erb
index ef213dc..f4b3dd6 100644
--- a/geode-docs/managing/security/ssl_example.html.md.erb
+++ b/geode-docs/managing/security/ssl_example.html.md.erb
@@ -19,7 +19,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-A simple example demonstrates the configuration and startup of Geode system components with SSL.
+A simple example demonstrates the configuration and startup of <%=vars.product_name%> system components with SSL.
 
 ## <a id="ssl_example__section_A8817FA8EF654CFB862F2375C0DD6770" class="no-quick-link"></a>Provider-Specific Configuration File
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/security/ssl_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/security/ssl_overview.html.md.erb b/geode-docs/managing/security/ssl_overview.html.md.erb
index b038a28..d52d8be 100644
--- a/geode-docs/managing/security/ssl_overview.html.md.erb
+++ b/geode-docs/managing/security/ssl_overview.html.md.erb
@@ -22,24 +22,24 @@ limitations under the License.
 SSL protects your data in transit between applications by ensuring
 that only the applications identified by you can share distributed system data.
 
-To be secure, the data that is cached in a Geode system must be protected during storage, distribution, and processing. At any time, data in a distributed system may be in one or more of these locations:
+To be secure, the data that is cached in a <%=vars.product_name%> system must be protected during storage, distribution, and processing. At any time, data in a distributed system may be in one or more of these locations:
 
 -   In memory
 -   On disk
 -   In transit between processes (for example, in an internet or intranet)
 
-For the protection of data in memory or on disk, Geode relies on your standard system security features such as firewalls, operating system settings, and JDK security settings.
+For the protection of data in memory or on disk, <%=vars.product_name%> relies on your standard system security features such as firewalls, operating system settings, and JDK security settings.
 
-The SSL implementation ensures that only the applications identified by you can share distributed system data in transit. In this figure, the data in the visible portion of the distributed system is secured by the firewall and by security settings in the operating system and in the JDK. The data in the disk files, for example, is protected by the firewall and by file permissions. Using SSL for data distribution provides secure communication between Geode system members inside and outside the firewalls.
+The SSL implementation ensures that only the applications identified by you can share distributed system data in transit. In this figure, the data in the visible portion of the distributed system is secured by the firewall and by security settings in the operating system and in the JDK. The data in the disk files, for example, is protected by the firewall and by file permissions. Using SSL for data distribution provides secure communication between <%=vars.product_name%> system members inside and outside the firewalls.
 
 <img src="../../images/security-5.gif" id="how_ssl_works__image_0437E0FC3EE74FB297BE4EBCC0FD4321" class="image" />
 
 
 -   **[Configuring SSL](implementing_ssl.html)**
 
-    You configure SSL for mutual authentication between members and to protect your data during distribution. You can use SSL alone or in conjunction with the other Geode security options.
+    You configure SSL for mutual authentication between members and to protect your data during distribution. You can use SSL alone or in conjunction with the other <%=vars.product_name%> security options.
 
 -   **[SSL Sample Implementation](ssl_example.html)**
 
-    A simple example demonstrates the configuration and startup of Geode system components with SSL.
+    A simple example demonstrates the configuration and startup of <%=vars.product_name%> system components with SSL.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/statistics/application_defined_statistics.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/statistics/application_defined_statistics.html.md.erb b/geode-docs/managing/statistics/application_defined_statistics.html.md.erb
index 72e0876..fccc761 100644
--- a/geode-docs/managing/statistics/application_defined_statistics.html.md.erb
+++ b/geode-docs/managing/statistics/application_defined_statistics.html.md.erb
@@ -19,10 +19,10 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Geode includes interfaces for defining and maintaining your own statistics.
+<%=vars.product_name%> includes interfaces for defining and maintaining your own statistics.
 
 <a id="application_defined_statistics__section_88C31FA62A194947BF71AD54B5F9BAB3"></a>
-The Geode package, `org.apache.geode`, includes the following interfaces for defining and maintaining your own statistics:
+The <%=vars.product_name%> package, `org.apache.geode`, includes the following interfaces for defining and maintaining your own statistics:
 
 -   **StatisticDescriptor**. Describes an individual statistic. Each statistic has a name and information on the statistic it holds, such as its class type (long, int, etc.) and whether it is a counter that always increments, or a gauge that can vary in any manner.
 -   **StatisticsType**. Logical type that holds a list of `StatisticDescriptors` and provides access methods to them. The `StatisticDescriptors` contained by a `StatisticsType` are each assigned a unique ID within the list. `StatisticsType` is used to create a `Statistics` instance.

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/statistics/chapter_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/statistics/chapter_overview.html.md.erb b/geode-docs/managing/statistics/chapter_overview.html.md.erb
index 87cf5f4..650b852 100644
--- a/geode-docs/managing/statistics/chapter_overview.html.md.erb
+++ b/geode-docs/managing/statistics/chapter_overview.html.md.erb
@@ -19,24 +19,24 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Every application and server in a distributed system can access statistical data about Apache Geode operations. You can configure the gathering of statistics by using the `alter runtime` command of `gfsh` or in the `gemfire.properties` file to facilitate system analysis and troubleshooting.
+Every application and server in a distributed system can access statistical data about <%=vars.product_name_long%> operations. You can configure the gathering of statistics by using the `alter runtime` command of `gfsh` or in the `gemfire.properties` file to facilitate system analysis and troubleshooting.
 
--   **[How Statistics Work](../../managing/statistics/how_statistics_work.html)**
+-   **[How Statistics Work](how_statistics_work.html)**
 
     Each application or cache server that joins the distributed system can collect and archive statistical data for analyzing system performance.
 
--   **[Transient Region and Entry Statistics](../../managing/statistics/transient_region_and_entry_statistics.html)**
+-   **[Transient Region and Entry Statistics](transient_region_and_entry_statistics.html)**
 
-    For replicated, distributed, and local regions, Geode provides a standard set of statistics for the region and its entries.
+    For replicated, distributed, and local regions, <%=vars.product_name%> provides a standard set of statistics for the region and its entries.
 
--   **[Application-Defined and Custom Statistics](../../managing/statistics/application_defined_statistics.html)**
+-   **[Application-Defined and Custom Statistics](application_defined_statistics.html)**
 
-    Geode includes interfaces for defining and maintaining your own statistics.
+    <%=vars.product_name%> includes interfaces for defining and maintaining your own statistics.
 
--   **[Configuring and Using Statistics](../../managing/statistics/setting_up_statistics.html)**
+-   **[Configuring and Using Statistics](setting_up_statistics.html)**
 
     You configure statistics and statistics archiving in gemfire.properties
 
--   **[Viewing Archived Statistics](../../managing/statistics/viewing_statistics.html)**
+-   **[Viewing Archived Statistics](viewing_statistics.html)**
 
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/statistics/how_statistics_work.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/statistics/how_statistics_work.html.md.erb b/geode-docs/managing/statistics/how_statistics_work.html.md.erb
index 461c3e8..44618e7 100644
--- a/geode-docs/managing/statistics/how_statistics_work.html.md.erb
+++ b/geode-docs/managing/statistics/how_statistics_work.html.md.erb
@@ -27,7 +27,7 @@ Set the configuration attributes that control statistics collection in `gfsh` or
 When Java applications and servers join a distributed system, they can be configured via the cluster configuration service to enable statistics sampling and whether to archive the statistics that are gathered.
 
 **Note:**
-Geode statistics use the Java `System.nanoTimer` for nanosecond timing. This method provides nanosecond precision, but not necessarily nanosecond accuracy. For more information, see the online Java documentation for `System.nanoTimer` for the JRE you are using with Geode.
+<%=vars.product_name%> statistics use the Java `System.nanoTimer` for nanosecond timing. This method provides nanosecond precision, but not necessarily nanosecond accuracy. For more information, see the online Java documentation for `System.nanoTimer` for the JRE you are using with <%=vars.product_name%>.
 
 Statistics sampling provides valuable information for ongoing system tuning and troubleshooting. Sampling statistics (not including time-based statistics) at the default sample rate does not impact overall distributed system performance. We recommend enabling statistics sampling in production environments. We do not recommend enabling time-based statistics (configured with the enable-time-statistics property) in production environments.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/statistics/setting_up_statistics.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/statistics/setting_up_statistics.html.md.erb b/geode-docs/managing/statistics/setting_up_statistics.html.md.erb
index 2ec1f23..8467eed 100644
--- a/geode-docs/managing/statistics/setting_up_statistics.html.md.erb
+++ b/geode-docs/managing/statistics/setting_up_statistics.html.md.erb
@@ -137,7 +137,7 @@ In this procedure it is assumed that you understand [Basic Configuration and Pro
 
 ## <a id="setting_up_statistics__section_D511BB61B27A44749E2012B066A5C906" class="no-quick-link"></a>Controlling the Size of Archive Files
 
-You can specify limits on the archive files for statistics using `alter                 runtime` command. These are the areas of control:
+You can specify limits on the archive files for statistics using `alter runtime` command. These are the areas of control:
 
 -   **Archive File Growth Rate**.
     -   The `--statistic-sample-rate` parameter controls how often samples are taken, which affects the speed at which the archive file grows.

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/statistics/transient_region_and_entry_statistics.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/statistics/transient_region_and_entry_statistics.html.md.erb b/geode-docs/managing/statistics/transient_region_and_entry_statistics.html.md.erb
index a10cfd5..3d71b6a 100644
--- a/geode-docs/managing/statistics/transient_region_and_entry_statistics.html.md.erb
+++ b/geode-docs/managing/statistics/transient_region_and_entry_statistics.html.md.erb
@@ -19,12 +19,12 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-For replicated, distributed, and local regions, Geode provides a standard set of statistics for the region and its entries.
+For replicated, distributed, and local regions, <%=vars.product_name%> provides a standard set of statistics for the region and its entries.
 
-Geode gathers these statistics when the `--enable-statistics` parameter of the `create region` command of `gfsh` is set to true or in cache.xml the region attribute `statistics-enabled` is set to true.
+<%=vars.product_name%> gathers these statistics when the `--enable-statistics` parameter of the `create region` command of `gfsh` is set to true or in cache.xml the region attribute `statistics-enabled` is set to true.
 
 **Note:**
-Unlike other Geode statistics, these region and entry statistics are not archived and cannot be charted.
+Unlike other <%=vars.product_name%> statistics, these region and entry statistics are not archived and cannot be charted.
 
 **Note:**
 Enabling these statistics requires extra memory per entry. See [Memory Requirements for Cached Data](../../reference/topics/memory_requirements_for_cache_data.html#calculating_memory_requirements).

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/troubleshooting/chapter_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/troubleshooting/chapter_overview.html.md.erb b/geode-docs/managing/troubleshooting/chapter_overview.html.md.erb
index 8722a77..b6373b5 100644
--- a/geode-docs/managing/troubleshooting/chapter_overview.html.md.erb
+++ b/geode-docs/managing/troubleshooting/chapter_overview.html.md.erb
@@ -21,39 +21,39 @@ limitations under the License.
 
 This section provides strategies for handling common errors and failure situations.
 
--   **[Producing Artifacts for Troubleshooting](../../managing/troubleshooting/producing_troubleshooting_artifacts.html)**
+-   **[Producing Artifacts for Troubleshooting](producing_troubleshooting_artifacts.html)**
 
     There are several types of files that are critical for troubleshooting.
 
--   **[Diagnosing System Problems](../../managing/troubleshooting/diagnosing_system_probs.html)**
+-   **[Diagnosing System Problems](diagnosing_system_probs.html)**
 
     This section provides possible causes and suggested responses for system problems.
 
--   **[System Failure and Recovery](../../managing/troubleshooting/system_failure_and_recovery.html)**
+-   **[System Failure and Recovery](system_failure_and_recovery.html)**
 
     This section describes alerts for and appropriate responses to various kinds of system failures. It also helps you plan a strategy for data recovery.
 
--   **[Handling Forced Cache Disconnection Using Autoreconnect](../../managing/autoreconnect/member-reconnect.html)**
+-   **[Handling Forced Cache Disconnection Using Autoreconnect](../member-reconnect.html)**
 
-    A Geode member may be forcibly disconnected from a Geode distributed system if the member is unresponsive for a period of time, or if a network partition separates one or more members into a group that is too small to act as the distributed system.
+    A <%=vars.product_name%> member may be forcibly disconnected from a <%=vars.product_name%> distributed system if the member is unresponsive for a period of time, or if a network partition separates one or more members into a group that is too small to act as the distributed system.
 
--   **[Recovering from Application and Cache Server Crashes](../../managing/troubleshooting/recovering_from_app_crashes.html)**
+-   **[Recovering from Application and Cache Server Crashes](recovering_from_app_crashes.html)**
 
     When the application or cache server crashes, its local cache is lost, and any resources it owned (for example, distributed locks) are released. The member must recreate its local cache upon recovery.
 
--   **[Recovering from Machine Crashes](../../managing/troubleshooting/recovering_from_machine_crashes.html)**
+-   **[Recovering from Machine Crashes](recovering_from_machine_crashes.html)**
 
     When a machine crashes because of a shutdown, power loss, hardware failure, or operating system failure, all of its applications and cache servers and their local caches are lost.
 
--   **[Recovering from ConflictingPersistentDataExceptions](../../managing/troubleshooting/recovering_conflicting_data_exceptions.html)**
+-   **[Recovering from ConflictingPersistentDataExceptions](recovering_conflicting_data_exceptions.html)**
 
-    A `ConflictingPersistentDataException` while starting up persistent members indicates that you have multiple copies of some persistent data, and Geode cannot determine which copy to use.
+    A `ConflictingPersistentDataException` while starting up persistent members indicates that you have multiple copies of some persistent data, and <%=vars.product_name%> cannot determine which copy to use.
 
--   **[Preventing and Recovering from Disk Full Errors](../../managing/troubleshooting/prevent_and_recover_disk_full_errors.html)**
+-   **[Preventing and Recovering from Disk Full Errors](prevent_and_recover_disk_full_errors.html)**
 
-    It is important to monitor the disk usage of Geode members. If a member lacks sufficient disk space for a disk store, the member attempts to shut down the disk store and its associated cache, and logs an error message. A shutdown due to a member running out of disk space can cause loss of data, data file corruption, log file corruption and other error conditions that can negatively impact your applications.
+    It is important to monitor the disk usage of <%=vars.product_name%> members. If a member lacks sufficient disk space for a disk store, the member attempts to shut down the disk store and its associated cache, and logs an error message. A shutdown due to a member running out of disk space can cause loss of data, data file corruption, log file corruption and other error conditions that can negatively impact your applications.
 
--   **[Understanding and Recovering from Network Outages](../../managing/troubleshooting/recovering_from_network_outages.html)**
+-   **[Understanding and Recovering from Network Outages](recovering_from_network_outages.html)**
 
     The safest response to a network outage is to restart all the processes and bring up a fresh data set.
 


[07/25] geode git commit: GEODE-3406: Fixed test failures.

Posted by ud...@apache.org.
GEODE-3406: Fixed test failures.

Now TcpServerFactory doesn't look for protobuf enabled property.
If service is not available then it just ignores that


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/31e82d6d
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/31e82d6d
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/31e82d6d

Branch: refs/heads/feature/GEODE-3503
Commit: 31e82d6d64187ba7a6d71771806a0b107a811c11
Parents: 0cc6043
Author: Hitesh Khamesra <hk...@pivotal.io>
Authored: Mon Aug 21 14:46:19 2017 -0700
Committer: Hitesh Khamesra <hk...@pivotal.io>
Committed: Tue Aug 22 10:56:00 2017 -0700

----------------------------------------------------------------------
 .../geode/internal/cache/tier/sockets/TcpServerFactory.java | 9 +++++----
 .../apache/geode/test/dunit/standalone/DUnitLauncher.java   | 2 --
 2 files changed, 5 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/31e82d6d/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactory.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactory.java
index 9c6bd8c..a67d6e6 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactory.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactory.java
@@ -40,14 +40,15 @@ public class TcpServerFactory {
   }
 
   public synchronized ClientProtocolMessageHandler initializeMessageHandler() {
-    if (!Boolean.getBoolean("geode.feature-protobuf-protocol")) {
-      return null;
-    }
     if (protocolHandler != null) {
       return protocolHandler;
     }
 
-    protocolHandler = new MessageHandlerFactory().makeMessageHandler();
+    try {
+      protocolHandler = new MessageHandlerFactory().makeMessageHandler();
+    } catch (ServiceLoadingFailureException ex) {
+      // ignore, TcpServer will take care right now
+    }
 
     return protocolHandler;
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/31e82d6d/geode-core/src/test/java/org/apache/geode/test/dunit/standalone/DUnitLauncher.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/standalone/DUnitLauncher.java b/geode-core/src/test/java/org/apache/geode/test/dunit/standalone/DUnitLauncher.java
index fd88abf..b35270e 100644
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/standalone/DUnitLauncher.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/standalone/DUnitLauncher.java
@@ -297,8 +297,6 @@ public class DUnitLauncher {
         // able to do so successfully anyway
         p.setProperty(DISABLE_AUTO_RECONNECT, "true");
 
-        System.setProperty("geode.feature-protobuf-protocol", "true");
-
         try {
           Locator.startLocatorAndDS(0, locatorLogFile, p);
           InternalLocator internalLocator = (InternalLocator) Locator.getLocator();


[12/25] geode git commit: GEODE-3504: Add Experimental caveat for Redis and Auto-rebalance

Posted by ud...@apache.org.
GEODE-3504: Add Experimental caveat for Redis and Auto-rebalance


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/a1ac45de
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/a1ac45de
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/a1ac45de

Branch: refs/heads/feature/GEODE-3503
Commit: a1ac45dee947dd95c70200e7779275a03b492733
Parents: c0f6c84
Author: Dave Barnes <db...@pivotal.io>
Authored: Tue Aug 22 11:59:32 2017 -0700
Committer: Dave Barnes <db...@pivotal.io>
Committed: Tue Aug 22 14:41:48 2017 -0700

----------------------------------------------------------------------
 .../partitioned_regions/automated_rebalance.html.md.erb         | 2 ++
 .../developing/partitioned_regions/chapter_overview.html.md.erb | 5 -----
 .../partitioned_regions/rebalancing_pr_data.html.md.erb         | 5 +++++
 geode-docs/tools_modules/redis_adapter.html.md.erb              | 2 ++
 4 files changed, 9 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/a1ac45de/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb b/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
index 37b7dce..5d8f65a 100644
--- a/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
+++ b/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
@@ -18,6 +18,8 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
+**Note:** This feature is experimental and is subject to change in future releases of <%=vars.product_name_long%>.
+
 Automated rebalance triggers a rebalance
 (see [Rebalancing Partitioned Region Data](rebalancing_pr_data.html))
 operation based on a

http://git-wip-us.apache.org/repos/asf/geode/blob/a1ac45de/geode-docs/developing/partitioned_regions/chapter_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/developing/partitioned_regions/chapter_overview.html.md.erb b/geode-docs/developing/partitioned_regions/chapter_overview.html.md.erb
index 0d41532..583c860 100644
--- a/geode-docs/developing/partitioned_regions/chapter_overview.html.md.erb
+++ b/geode-docs/developing/partitioned_regions/chapter_overview.html.md.erb
@@ -49,11 +49,6 @@ In addition to basic region management, partitioned regions include options for
 
     In a distributed system with minimal contention to the concurrent threads reading or updating from the members, you can use rebalancing to dynamically increase or decrease your data and processing capacity.
 
-- **[Automated Rebalancing of Partitioned Region Data](automated_rebalance.html)**
-
-    The automated rebalance feature triggers a rebalance operation
-based on a time schedule.
-
 -   **[Checking Redundancy in Partitioned Regions](checking_region_redundancy.html)**
 
     Under some circumstances, it can be important to verify that your partitioned region data is redundant and that upon member restart, redundancy has been recovered properly across partitioned region members.

http://git-wip-us.apache.org/repos/asf/geode/blob/a1ac45de/geode-docs/developing/partitioned_regions/rebalancing_pr_data.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/developing/partitioned_regions/rebalancing_pr_data.html.md.erb b/geode-docs/developing/partitioned_regions/rebalancing_pr_data.html.md.erb
index 66057a4..504eb79 100644
--- a/geode-docs/developing/partitioned_regions/rebalancing_pr_data.html.md.erb
+++ b/geode-docs/developing/partitioned_regions/rebalancing_pr_data.html.md.erb
@@ -104,3 +104,8 @@ gfsh>rebalance --simulate
 If you are using `heap_lru` for data eviction, you may notice a difference between your simulated results and your actual rebalancing results. This discrepancy can be due to the VM starting to evict entries after you execute the simulation. Then when you perform an actual rebalance operation, the operation will make different decisions based on the newer heap size.
 
 
+## Automated Rebalancing
+
+The experimental [automated rebalance feature](automated_rebalance.html) triggers a rebalance operation based on a time schedule.
+
+

http://git-wip-us.apache.org/repos/asf/geode/blob/a1ac45de/geode-docs/tools_modules/redis_adapter.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/redis_adapter.html.md.erb b/geode-docs/tools_modules/redis_adapter.html.md.erb
index 82f9ed5..1fb1898 100644
--- a/geode-docs/tools_modules/redis_adapter.html.md.erb
+++ b/geode-docs/tools_modules/redis_adapter.html.md.erb
@@ -19,6 +19,8 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
+**Note:** This feature is experimental and is subject to change in future releases of <%=vars.product_name_long%>.
+
 The <%=vars.product_name%> Redis adapter allows <%=vars.product_name%> to function as a drop-in replacement for a Redis data store, letting Redis applications take advantage of <%=vars.product_name%>’s scaling capabilities without changing their client code. Redis clients connect to a <%=vars.product_name%> server in the same way they connect to a Redis server, using an IP address and a port number.
 
 -   **[Using the Redis Adapter](#using-the-redis-adapter)**


[11/25] geode git commit: GEODE-3395 Variable-ize product version and name in user guide - Topo & Comms, format repair

Posted by ud...@apache.org.
GEODE-3395 Variable-ize product version and name in user guide - Topo & Comms, format repair


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/c0f6c841
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/c0f6c841
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/c0f6c841

Branch: refs/heads/feature/GEODE-3503
Commit: c0f6c841862c64a0b82594045320e82f558452ba
Parents: 35d3a97
Author: Dave Barnes <db...@pivotal.io>
Authored: Tue Aug 22 14:30:19 2017 -0700
Committer: Dave Barnes <db...@pivotal.io>
Committed: Tue Aug 22 14:30:19 2017 -0700

----------------------------------------------------------------------
 .../topology_concepts/IPv4_and_IPv6.html.md.erb                    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/c0f6c841/geode-docs/topologies_and_comm/topology_concepts/IPv4_and_IPv6.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/topologies_and_comm/topology_concepts/IPv4_and_IPv6.html.md.erb b/geode-docs/topologies_and_comm/topology_concepts/IPv4_and_IPv6.html.md.erb
index 7fdc77f..f1a91ee 100644
--- a/geode-docs/topologies_and_comm/topology_concepts/IPv4_and_IPv6.html.md.erb
+++ b/geode-docs/topologies_and_comm/topology_concepts/IPv4_and_IPv6.html.md.erb
@@ -21,8 +21,8 @@ limitations under the License.
 
 By default, <%=vars.product_name_long%> uses Internet Protocol version 4 for <%=vars.product_name%> address specifications. You can switch to Internet Protocol version 6 if all your machines support it. You may lose performance, so you need to understand the costs of making the switch.
 
-<a id="IPv4_and_IPv6__section_027647C0034042C087FD5C8DBCB8482B"></a>
 -   IPv4 uses a 32-bit address. IPv4 was the first protocol and is still the main one in use, but its address space is expected to be exhausted within a few years.
+
 -   IPv6 uses a 128-bit address. IPv6 succeeds IPv4, and will provide a much greater number of addresses.
 
 Based on current testing with <%=vars.product_name%> , IPv4 is generally recommended. IPv6 connections tend to take longer to form and the communication tends to be slower. Not all machines support IPv6 addressing. To use IPv6, all machines in your distributed system must support it or you will have connectivity problems.


[09/25] geode git commit: GEODE-3395 Variable-ize product version and name in user guide - Topo & Comms

Posted by ud...@apache.org.
GEODE-3395 Variable-ize product version and name in user guide - Topo & Comms


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/e2c3d531
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/e2c3d531
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/e2c3d531

Branch: refs/heads/feature/GEODE-3503
Commit: e2c3d531f6bf7aeddb0ce38ab356ec415e32fb48
Parents: b77e1c7
Author: Dave Barnes <db...@pivotal.io>
Authored: Tue Aug 22 14:08:59 2017 -0700
Committer: Dave Barnes <db...@pivotal.io>
Committed: Tue Aug 22 14:08:59 2017 -0700

----------------------------------------------------------------------
 .../topologies_and_comm/book_intro.html.md.erb  | 12 ++++-----
 .../chapter_overview.html.md.erb                | 18 +++++++-------
 ...nt_server_example_configurations.html.md.erb |  2 +-
 .../client_server_whats_next.html.md.erb        |  2 +-
 .../chapter_overview.html.md.erb                | 10 ++++----
 .../multisite_topologies.html.md.erb            |  4 +--
 .../setting_up_a_multisite_system.html.md.erb   | 22 ++++++++---------
 .../chapter_overview.html.md.erb                |  8 +++---
 .../setting_up_peer_communication.html.md.erb   |  4 +--
 .../topology_concepts/IPv4_and_IPv6.html.md.erb |  6 ++---
 .../chapter_overview.html.md.erb                | 26 ++++++++++----------
 .../how_communication_works.html.md.erb         | 16 ++++++------
 .../how_member_discovery_works.html.md.erb      | 10 ++++----
 .../how_multisite_systems_work.html.md.erb      | 20 +++++++--------
 .../how_server_discovery_works.html.md.erb      |  4 +--
 ...how_the_pool_manages_connections.html.md.erb |  2 +-
 .../member_communication.html.md.erb            |  2 +-
 .../topology_types.html.md.erb                  | 10 ++++----
 .../using_bind_addresses.html.md.erb            | 12 ++++-----
 19 files changed, 95 insertions(+), 95 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/e2c3d531/geode-docs/topologies_and_comm/book_intro.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/topologies_and_comm/book_intro.html.md.erb b/geode-docs/topologies_and_comm/book_intro.html.md.erb
index f7de5ed..daf705a 100644
--- a/geode-docs/topologies_and_comm/book_intro.html.md.erb
+++ b/geode-docs/topologies_and_comm/book_intro.html.md.erb
@@ -19,23 +19,23 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-*Topologies and Communication* explains how to plan and configure Apache Geode member discovery, peer-to-peer and client/server communication topologies.
+*Topologies and Communication* explains how to plan and configure <%=vars.product_name_long%> member discovery, peer-to-peer and client/server communication topologies.
 
 <a id="concept_7628F498DB534A2D8A99748F5DA5DC94__section_E62DEF9610814012A3307D50A56FE1B4"></a>
 
--   **[Topology and Communication General Concepts](../topologies_and_comm/topology_concepts/chapter_overview.html)**
+-   **[Topology and Communication General Concepts](topology_concepts/chapter_overview.html)**
 
-    Before you configure your Apache Geode members, make sure you understand the options for topology and communication.
+    Before you configure your <%=vars.product_name_long%> members, make sure you understand the options for topology and communication.
 
--   **[Peer-to-Peer Configuration](../topologies_and_comm/p2p_configuration/chapter_overview.html)**
+-   **[Peer-to-Peer Configuration](p2p_configuration/chapter_overview.html)**
 
     Use peer-to-peer configuration to set member discovery and communication within a single distributed system.
 
--   **[Client/Server Configuration](../topologies_and_comm/cs_configuration/chapter_overview.html)**
+-   **[Client/Server Configuration](cs_configuration/chapter_overview.html)**
 
     In the client/server architecture, a relatively small server farm manages the cached data of and access to the same data for many client applications. Clients can update and access data efficiently, leaving the servers to manage data distribution to other clients and any synchronization with outside data stores.
 
--   **[Multi-site (WAN) Configuration](../topologies_and_comm/multi_site_configuration/chapter_overview.html)**
+-   **[Multi-site (WAN) Configuration](multi_site_configuration/chapter_overview.html)**
 
     Use the multi-site configuration to scale horizontally between disparate, loosely-coupled distributed systems. A wide-area network (WAN) is the main use case for the multi-site topology.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/e2c3d531/geode-docs/topologies_and_comm/cs_configuration/chapter_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/topologies_and_comm/cs_configuration/chapter_overview.html.md.erb b/geode-docs/topologies_and_comm/cs_configuration/chapter_overview.html.md.erb
index 45a8855..76d4aae 100644
--- a/geode-docs/topologies_and_comm/cs_configuration/chapter_overview.html.md.erb
+++ b/geode-docs/topologies_and_comm/cs_configuration/chapter_overview.html.md.erb
@@ -21,31 +21,31 @@ limitations under the License.
 
 In the client/server architecture, a relatively small server farm manages the cached data of and access to the same data for many client applications. Clients can update and access data efficiently, leaving the servers to manage data distribution to other clients and any synchronization with outside data stores.
 
--   **[Standard Client/Server Deployment](../../topologies_and_comm/cs_configuration/standard_client_server_deployment.html)**
+-   **[Standard Client/Server Deployment](standard_client_server_deployment.html)**
 
     In the most common client/server topology, a farm of cache servers provides caching services to many clients. Cache servers have a homogeneous data store in data regions that are replicated or partitioned across the server farm.
 
--   **[How Server Discovery Works](../../topologies_and_comm/topology_concepts/how_server_discovery_works.html)**
+-   **[How Server Discovery Works](../topology_concepts/how_server_discovery_works.html)**
 
-    Apache Geode locators provide reliable and flexible server discovery services for your clients. You can use all servers for all client requests, or group servers according to function, with the locators directing each client request to the right group of servers.
+    <%=vars.product_name_long%> locators provide reliable and flexible server discovery services for your clients. You can use all servers for all client requests, or group servers according to function, with the locators directing each client request to the right group of servers.
 
--   **[How Client/Server Connections Work](../../topologies_and_comm/topology_concepts/how_the_pool_manages_connections.html)**
+-   **[How Client/Server Connections Work](../topology_concepts/how_the_pool_manages_connections.html)**
 
-    The server pools in your Apache Geode client processes manage all client connection requests to the server tier. To make the best use of the pool functionality, you should understand how the pool manages the server connections.
+    The server pools in your <%=vars.product_name_long%> client processes manage all client connection requests to the server tier. To make the best use of the pool functionality, you should understand how the pool manages the server connections.
 
--   **[Configuring a Client/Server System](../../topologies_and_comm/cs_configuration/setting_up_a_client_server_system.html)**
+-   **[Configuring a Client/Server System](setting_up_a_client_server_system.html)**
 
     Configure your server and client processes and data regions to run your client/server system.
 
--   **[Organizing Servers Into Logical Member Groups](../../topologies_and_comm/cs_configuration/configure_servers_into_logical_groups.html)**
+-   **[Organizing Servers Into Logical Member Groups](configure_servers_into_logical_groups.html)**
 
     In a client/server configuration, by putting servers into logical member groups, you can control which servers your clients use and target specific servers for specific data or tasks. You can configure servers to manage different data sets or to direct specific client traffic to a subset of servers, such as those directly connected to a back-end database.
 
--   **[Client/Server Example Configurations](../../topologies_and_comm/cs_configuration/client_server_example_configurations.html)**
+-   **[Client/Server Example Configurations](client_server_example_configurations.html)**
 
     For easy configuration, you can start with these example client/server configurations and modify for your systems.
 
--   **[Fine-Tuning Your Client/Server Configuration](../../topologies_and_comm/cs_configuration/client_server_whats_next.html)**
+-   **[Fine-Tuning Your Client/Server Configuration](client_server_whats_next.html)**
 
     You can fine-tune your client/server system with server load-balancing and client thread use of pool connections. For example, you can configure how often the servers check their load with the cache server `load-poll-interval` property, or configure your own server load metrics by implementing the `org.apache.geode.cache.server` package.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/e2c3d531/geode-docs/topologies_and_comm/cs_configuration/client_server_example_configurations.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/topologies_and_comm/cs_configuration/client_server_example_configurations.html.md.erb b/geode-docs/topologies_and_comm/cs_configuration/client_server_example_configurations.html.md.erb
index eeafbe5..48f6126 100644
--- a/geode-docs/topologies_and_comm/cs_configuration/client_server_example_configurations.html.md.erb
+++ b/geode-docs/topologies_and_comm/cs_configuration/client_server_example_configurations.html.md.erb
@@ -62,7 +62,7 @@ gfsh>start server --name=server1 --server-port=40404
 
 See `start server`.
 
-The client’s `cache.xml` `<client-cache>` declaration automatically configures it as a standalone Geode application.
+The client’s `cache.xml` `<client-cache>` declaration automatically configures it as a standalone <%=vars.product_name%> application.
 
 The client's `cache.xml`:
 

http://git-wip-us.apache.org/repos/asf/geode/blob/e2c3d531/geode-docs/topologies_and_comm/cs_configuration/client_server_whats_next.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/topologies_and_comm/cs_configuration/client_server_whats_next.html.md.erb b/geode-docs/topologies_and_comm/cs_configuration/client_server_whats_next.html.md.erb
index 6da8f32..4e7c03e 100644
--- a/geode-docs/topologies_and_comm/cs_configuration/client_server_whats_next.html.md.erb
+++ b/geode-docs/topologies_and_comm/cs_configuration/client_server_whats_next.html.md.erb
@@ -30,7 +30,7 @@ When the client pool requests connection information from the server locator, th
 -   Between updates from the servers, the locators estimate which server is the least loaded by using the server estimates for the cost of additional connections. For example, if the current pool connection load for a server’s connections is 0.4 and each additional connection would add 0.1 to its load, the locator can estimate that adding two new pool connections will take the server’s pool connection load to 0.6.
 -   Locators do not share connection information among themselves. These estimates provide rough guidance to the individual locators for the periods between updates from the servers.
 
-Geode provides a default utility that probes the server and its resource usage to give load information to the locators. The default probe returns the following load metrics:
+<%=vars.product_name%> provides a default utility that probes the server and its resource usage to give load information to the locators. The default probe returns the following load metrics:
 -   The pool connection load is the number of connections to the server divided by the server’s `max-connections` setting. This means that servers with a lower `max-connections` setting receives fewer connections than servers with a higher setting. The load is a number between 0 and 1, where 0 means there are no connections, and 1 means the server is at `max-connections`. The load estimate for each additional pool connection is 1/`max-connections`.
 -   The subscription connection load is the number of subscription queues hosted by this server. The load estimate for each additional subscription connection is 1.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/e2c3d531/geode-docs/topologies_and_comm/multi_site_configuration/chapter_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/topologies_and_comm/multi_site_configuration/chapter_overview.html.md.erb b/geode-docs/topologies_and_comm/multi_site_configuration/chapter_overview.html.md.erb
index b5f627b..9d62e26 100644
--- a/geode-docs/topologies_and_comm/multi_site_configuration/chapter_overview.html.md.erb
+++ b/geode-docs/topologies_and_comm/multi_site_configuration/chapter_overview.html.md.erb
@@ -21,21 +21,21 @@ limitations under the License.
 
 Use the multi-site configuration to scale horizontally between disparate, loosely-coupled distributed systems. A wide-area network (WAN) is the main use case for the multi-site topology.
 
--   **[How Multi-site (WAN) Systems Work](../../topologies_and_comm/topology_concepts/how_multisite_systems_work.html)**
+-   **[How Multi-site (WAN) Systems Work](../topology_concepts/how_multisite_systems_work.html)**
 
-    The Apache Geode multi-site implementation connects disparate distributed systems. The systems act as one when they are coupled, and they act as independent systems when communication between sites fails. The coupling is tolerant of weak or slow links between distributed system sites. A wide-area network (WAN) is the main use case for the multi-site topology.
+    The <%=vars.product_name_long%> multi-site implementation connects disparate distributed systems. The systems act as one when they are coupled, and they act as independent systems when communication between sites fails. The coupling is tolerant of weak or slow links between distributed system sites. A wide-area network (WAN) is the main use case for the multi-site topology.
 
--   **[Multi-site (WAN) Topologies](../../topologies_and_comm/multi_site_configuration/multisite_topologies.html)**
+-   **[Multi-site (WAN) Topologies](multisite_topologies.html)**
 
     To configure your multi-site topology, you should understand the recommended topologies and the topologies to avoid.
 
--   **[Configuring a Multi-site (WAN) System](../../topologies_and_comm/multi_site_configuration/setting_up_a_multisite_system.html)**
+-   **[Configuring a Multi-site (WAN) System](setting_up_a_multisite_system.html)**
 
     Plan and configure your multi-site topology, and configure the regions that will be shared between systems.
 
 -   **[Filtering Events for Multi-Site (WAN) Distribution](../../developing/events/filtering_multisite_events.html)**
 
-    You can optionally create gateway sender and/or gateway receiver filters to control which events are queued and distributed to a remote site, or to modify the data stream that is transmitted between Geode sites.
+    You can optionally create gateway sender and/or gateway receiver filters to control which events are queued and distributed to a remote site, or to modify the data stream that is transmitted between <%=vars.product_name%> sites.
 
 -   **[Resolving Conflicting Events](../../developing/events/resolving_multisite_conflicts.html)**
 

http://git-wip-us.apache.org/repos/asf/geode/blob/e2c3d531/geode-docs/topologies_and_comm/multi_site_configuration/multisite_topologies.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/topologies_and_comm/multi_site_configuration/multisite_topologies.html.md.erb b/geode-docs/topologies_and_comm/multi_site_configuration/multisite_topologies.html.md.erb
index b710b8d..89f961b 100644
--- a/geode-docs/topologies_and_comm/multi_site_configuration/multisite_topologies.html.md.erb
+++ b/geode-docs/topologies_and_comm/multi_site_configuration/multisite_topologies.html.md.erb
@@ -22,9 +22,9 @@ limitations under the License.
 To configure your multi-site topology, you should understand the recommended topologies and the topologies to avoid.
 
 <a id="multisite_topologies__section_26A561471249495A847B4C3854EE04C9"></a>
-This section describes Geode's support for various topologies. Depending on your application needs, there may be several topologies that work. These are considerations to keep in mind:
+This section describes <%=vars.product_name%>'s support for various topologies. Depending on your application needs, there may be several topologies that work. These are considerations to keep in mind:
 
--   When a Geode site receives a message from a gateway sender, it forwards it to the other sites it knows about, excluding those sites that it knows have already seen the message. Each message contains the initial sender's ID and the ID of each of the sites the initial sender sent to, so no site forwards to those sites. However, messages do not pick up the ID of the sites they pass through, so it is possible in certain topologies for more than one copy of a message to be sent to one site.
+-   When a <%=vars.product_name%> site receives a message from a gateway sender, it forwards it to the other sites it knows about, excluding those sites that it knows have already seen the message. Each message contains the initial sender's ID and the ID of each of the sites the initial sender sent to, so no site forwards to those sites. However, messages do not pick up the ID of the sites they pass through, so it is possible in certain topologies for more than one copy of a message to be sent to one site.
 -   In some configurations, the loss of one site affects how other sites communicate with one another.
 
 ## <a id="multisite_topologies__section_7ECE1AFB1F94446FAA0A9FD504217C76" class="no-quick-link"></a>Fully Connected Mesh Topology

http://git-wip-us.apache.org/repos/asf/geode/blob/e2c3d531/geode-docs/topologies_and_comm/multi_site_configuration/setting_up_a_multisite_system.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/topologies_and_comm/multi_site_configuration/setting_up_a_multisite_system.html.md.erb b/geode-docs/topologies_and_comm/multi_site_configuration/setting_up_a_multisite_system.html.md.erb
index dd2bc3a..8e03069 100644
--- a/geode-docs/topologies_and_comm/multi_site_configuration/setting_up_a_multisite_system.html.md.erb
+++ b/geode-docs/topologies_and_comm/multi_site_configuration/setting_up_a_multisite_system.html.md.erb
@@ -25,7 +25,7 @@ Plan and configure your multi-site topology, and configure the regions that will
 
 Before you start, you should understand how to configure membership and communication in peer-to-peer systems using locators. See [Configuring Peer-to-Peer Discovery](../p2p_configuration/setting_up_a_p2p_system.html) and [Configuring Peer Communication](../p2p_configuration/setting_up_peer_communication.html).
 
-WAN deployments increase the messaging demands on a Geode system. To avoid hangs related to WAN messaging, always set `conserve-sockets=false` for Geode members that participate in a WAN deployment. See [Configuring Sockets in Multi-Site (WAN) Deployments](../../managing/monitor_tune/sockets_and_gateways.html) and [Making Sure You Have Enough Sockets](../../managing/monitor_tune/socket_communication_have_enough_sockets.html).
+WAN deployments increase the messaging demands on a <%=vars.product_name%> system. To avoid hangs related to WAN messaging, always set `conserve-sockets=false` for <%=vars.product_name%> members that participate in a WAN deployment. See [Configuring Sockets in Multi-Site (WAN) Deployments](../../managing/monitor_tune/sockets_and_gateways.html) and [Making Sure You Have Enough Sockets](../../managing/monitor_tune/socket_communication_have_enough_sockets.html).
 
 ## <a id="setting_up_a_multisite_system__section_86F9FE9D786D407FB438C56E43FC5DB1" class="no-quick-link"></a>Main Steps
 
@@ -43,7 +43,7 @@ Use the following steps to configure a multi-site system:
 
 3.  Configure the gateway senders that you will use to distribute region events to remote systems. See [Configure Gateway Senders](setting_up_a_multisite_system.html#setting_up_a_multisite_system__section_1500299A8F9A4C2385680E337F5D3DEC).
 4.  Create the data regions that you want to participate in the multi-site system, specifying the gateway sender(s) that each region should use for WAN distribution. Configure the same regions in the target clusters to apply the distributed events. See [Create Data Regions for Multi-site Communication](setting_up_a_multisite_system.html#setting_up_a_multisite_system__section_E1DEDD0743D54831AFFBCCDC750F8879).
-5.  Configure gateway receivers in each Geode cluster that will receive region events from another cluster. See [Configure Gateway Receivers](setting_up_a_multisite_system.html#setting_up_a_multisite_system__section_E3A44F85359046C7ADD12861D261637B).
+5.  Configure gateway receivers in each <%=vars.product_name%> cluster that will receive region events from another cluster. See [Configure Gateway Receivers](setting_up_a_multisite_system.html#setting_up_a_multisite_system__section_E3A44F85359046C7ADD12861D261637B).
 6.  Start distributed system member processes in the correct order (locators first, followed by data nodes) to ensure efficient discovery of WAN resources. See [Starting Up and Shutting Down Your System](../../configuring/running/starting_up_shutting_down.html).
 7.  (Optional.) Deploy custom conflict resolvers to handle resolve potential conflicts that are detected when applying events from over a WAN. See [Resolving Conflicting Events](../../developing/events/resolving_multisite_conflicts.html#topic_E97BB68748F14987916CD1A50E4B4542).
 8.  (Optional.) Deploy WAN filters to determine which events are distributed over the WAN, or to modify events as they are distributed over the WAN. See [Filtering Events for Multi-Site (WAN) Distribution](../../developing/events/filtering_multisite_events.html#topic_E97BB68748F14987916CD1A50E4B4542).
@@ -63,11 +63,11 @@ To configure a gateway sender that uses gfsh to create the cache.xml configurati
 
 See [WAN Configuration](../../reference/topics/elements_ref.html#topic_7B1CABCAD056499AA57AF3CFDBF8ABE3) for more information about individual configuration properties.
 
-1.  For each Geode system, choose the members that will host a gateway sender configuration and distribute region events to remote sites:
-    -   You must deploy a parallel gateway sender configuration on each Geode member that hosts a region that uses the sender.
-    -   You may choose to deploy a serial gateway sender configuration on one or more Geode members in order to provide high availability. However, only one instance of a given serial gateway sender configuration distributes region events at any given time.
+1.  For each <%=vars.product_name%> system, choose the members that will host a gateway sender configuration and distribute region events to remote sites:
+    -   You must deploy a parallel gateway sender configuration on each <%=vars.product_name%> member that hosts a region that uses the sender.
+    -   You may choose to deploy a serial gateway sender configuration on one or more <%=vars.product_name%> members in order to provide high availability. However, only one instance of a given serial gateway sender configuration distributes region events at any given time.
 
-2.  Configure each gateway sender on a Geode member using gfsh, `cache.xml` or Java API:
+2.  Configure each gateway sender on a <%=vars.product_name%> member using gfsh, `cache.xml` or Java API:
     -   **gfsh configuration command**
 
         ``` pre
@@ -77,7 +77,7 @@ See [WAN Configuration](../../reference/topics/elements_ref.html#topic_7B1CABCAD
         ```
     -   **cache.xml configuration**
 
-        These example `cache.xml` entries configure two parallel gateway senders to distribute region events to two remote Geode clusters (clusters "2" and "3"):
+        These example `cache.xml` entries configure two parallel gateway senders to distribute region events to two remote <%=vars.product_name%> clusters (clusters "2" and "3"):
 
         ``` pre
         <cache>
@@ -176,7 +176,7 @@ See [WAN Configuration](../../reference/topics/elements_ref.html#topic_7B1CABCAD
     ```
 
 **Note:**
-The gateway sender configuration for a specific sender `id` must be identical on each Geode member that hosts the gateway sender.
+The gateway sender configuration for a specific sender `id` must be identical on each <%=vars.product_name%> member that hosts the gateway sender.
 
 ## <a id="setting_up_a_multisite_system__section_E1DEDD0743D54831AFFBCCDC750F8879" class="no-quick-link"></a>Create Data Regions for Multi-site Communication
 
@@ -227,14 +227,14 @@ In addition to configuring regions with gateway senders to distribute events, yo
 
 ## <a id="setting_up_a_multisite_system__section_E3A44F85359046C7ADD12861D261637B" class="no-quick-link"></a>Configure Gateway Receivers
 
-Always configure a gateway receiver in each Geode cluster that will receive and apply region events from another cluster.
+Always configure a gateway receiver in each <%=vars.product_name%> cluster that will receive and apply region events from another cluster.
 
-A gateway receiver configuration can be applied to multiple Geode servers for load balancing and high availability. However, each Geode member that hosts a gateway receiver must also define all of the regions for which the receiver may receive an event. If a gateway receiver receives an event for a region that the local member does not define, Geode throws an exception. See [Create Data Regions for Multi-site Communication](setting_up_a_multisite_system.html#setting_up_a_multisite_system__section_E1DEDD0743D54831AFFBCCDC750F8879).
+A gateway receiver configuration can be applied to multiple <%=vars.product_name%> servers for load balancing and high availability. However, each <%=vars.product_name%> member that hosts a gateway receiver must also define all of the regions for which the receiver may receive an event. If a gateway receiver receives an event for a region that the local member does not define, <%=vars.product_name%> throws an exception. See [Create Data Regions for Multi-site Communication](setting_up_a_multisite_system.html#setting_up_a_multisite_system__section_E1DEDD0743D54831AFFBCCDC750F8879).
 
 **Note:**
 You can only host one gateway receiver per member.
 
-A gateway receiver configuration specifies a range of possible port numbers on which to listen. The Geode server picks an unused port number from the specified range to use for the receiver process. You can use this functionality to easily deploy the same gateway receiver configuration to multiple members.
+A gateway receiver configuration specifies a range of possible port numbers on which to listen. The <%=vars.product_name%> server picks an unused port number from the specified range to use for the receiver process. You can use this functionality to easily deploy the same gateway receiver configuration to multiple members.
 
 You can optionally configure gateway receivers to provide a specific IP address or host name for gateway sender connections. If you configure hostname-for-senders, locators will use the provided host name or IP address when instructing gateway senders on how to connect to gateway receivers. If you provide "" or null as the value, by default the gateway receiver's bind-address will be sent to clients.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/e2c3d531/geode-docs/topologies_and_comm/p2p_configuration/chapter_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/topologies_and_comm/p2p_configuration/chapter_overview.html.md.erb b/geode-docs/topologies_and_comm/p2p_configuration/chapter_overview.html.md.erb
index 22c026f..7a4febc 100644
--- a/geode-docs/topologies_and_comm/p2p_configuration/chapter_overview.html.md.erb
+++ b/geode-docs/topologies_and_comm/p2p_configuration/chapter_overview.html.md.erb
@@ -21,15 +21,15 @@ limitations under the License.
 
 Use peer-to-peer configuration to set member discovery and communication within a single distributed system.
 
--   **[Configuring Peer-to-Peer Discovery](../../topologies_and_comm/p2p_configuration/setting_up_a_p2p_system.html)**
+-   **[Configuring Peer-to-Peer Discovery](setting_up_a_p2p_system.html)**
 
     Peer members discover each other using one or more locators.
 
--   **[Configuring Peer Communication](../../topologies_and_comm/p2p_configuration/setting_up_peer_communication.html)**
+-   **[Configuring Peer Communication](setting_up_peer_communication.html)**
 
-    By default Apache Geode uses TCP for communication between members of a single distributed system. You can modify this at the member and region levels.
+    By default <%=vars.product_name_long%> uses TCP for communication between members of a single distributed system. You can modify this at the member and region levels.
 
--   **[Organizing Peers into Logical Member Groups](../../topologies_and_comm/p2p_configuration/configuring_peer_member_groups.html)**
+-   **[Organizing Peers into Logical Member Groups](configuring_peer_member_groups.html)**
 
     In a peer-to-peer configuration, you can organize members into logical member groups and use those groups to associate specific data or assign tasks to a pre-defined set of members.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/e2c3d531/geode-docs/topologies_and_comm/p2p_configuration/setting_up_peer_communication.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/topologies_and_comm/p2p_configuration/setting_up_peer_communication.html.md.erb b/geode-docs/topologies_and_comm/p2p_configuration/setting_up_peer_communication.html.md.erb
index 9598aa6..fdd678a 100644
--- a/geode-docs/topologies_and_comm/p2p_configuration/setting_up_peer_communication.html.md.erb
+++ b/geode-docs/topologies_and_comm/p2p_configuration/setting_up_peer_communication.html.md.erb
@@ -19,7 +19,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-By default Apache Geode uses TCP for communication between members of a single distributed system. You can modify this at the member and region levels.
+By default <%=vars.product_name_long%> uses TCP for communication between members of a single distributed system. You can modify this at the member and region levels.
 
 <a id="setting_up_communication__section_34509F5B17A943D8BBF19A3497E32BAE"></a>
 Before you begin, you should have already determined the address and port settings for multicast, including any bind addresses. See [Topology and Communication General Concepts](../topology_concepts/chapter_overview.html).
@@ -57,7 +57,7 @@ See the [Reference](../../reference/book_intro.html#reference).
         ```
 
         **Note:**
-        Improperly configured multicast can affect production systems. If you intend to use multicast on a shared network, work with your network administrator and system administrator from the planning stage of the project. In addition, you may need to address interrelated setup and tuning issues at the Geode, operating system, and network level.
+        Improperly configured multicast can affect production systems. If you intend to use multicast on a shared network, work with your network administrator and system administrator from the planning stage of the project. In addition, you may need to address interrelated setup and tuning issues at the <%=vars.product_name%>, operating system, and network level.
 
 Once your members establish their connections to each other, they will send distributed data and messages according to your configuration.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/e2c3d531/geode-docs/topologies_and_comm/topology_concepts/IPv4_and_IPv6.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/topologies_and_comm/topology_concepts/IPv4_and_IPv6.html.md.erb b/geode-docs/topologies_and_comm/topology_concepts/IPv4_and_IPv6.html.md.erb
index 07f0328..7fdc77f 100644
--- a/geode-docs/topologies_and_comm/topology_concepts/IPv4_and_IPv6.html.md.erb
+++ b/geode-docs/topologies_and_comm/topology_concepts/IPv4_and_IPv6.html.md.erb
@@ -19,13 +19,13 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-By default, Apache Geode uses Internet Protocol version 4 for Geode address specifications. You can switch to Internet Protocol version 6 if all your machines support it. You may lose performance, so you need to understand the costs of making the switch.
+By default, <%=vars.product_name_long%> uses Internet Protocol version 4 for <%=vars.product_name%> address specifications. You can switch to Internet Protocol version 6 if all your machines support it. You may lose performance, so you need to understand the costs of making the switch.
 
 <a id="IPv4_and_IPv6__section_027647C0034042C087FD5C8DBCB8482B"></a>
 -   IPv4 uses a 32-bit address. IPv4 was the first protocol and is still the main one in use, but its address space is expected to be exhausted within a few years.
 -   IPv6 uses a 128-bit address. IPv6 succeeds IPv4, and will provide a much greater number of addresses.
 
-Based on current testing with Geode , IPv4 is generally recommended. IPv6 connections tend to take longer to form and the communication tends to be slower. Not all machines support IPv6 addressing. To use IPv6, all machines in your distributed system must support it or you will have connectivity problems.
+Based on current testing with <%=vars.product_name%> , IPv4 is generally recommended. IPv6 connections tend to take longer to form and the communication tends to be slower. Not all machines support IPv6 addressing. To use IPv6, all machines in your distributed system must support it or you will have connectivity problems.
 
 **Note:**
 Do not mix IPv4 and IPv6 addresses. Use one or the other, across the board.
@@ -34,7 +34,7 @@ IPv4 is the default version.
 
 To use IPv6, set the Java property, `java.net.preferIPv6Addresses`, to `true`.
 
-These examples show the formats to use to specify addresses in Geode .
+These examples show the formats to use to specify addresses in <%=vars.product_name%> .
 
 -   IPv4:
 

http://git-wip-us.apache.org/repos/asf/geode/blob/e2c3d531/geode-docs/topologies_and_comm/topology_concepts/chapter_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/topologies_and_comm/topology_concepts/chapter_overview.html.md.erb b/geode-docs/topologies_and_comm/topology_concepts/chapter_overview.html.md.erb
index 95d0a3f..5cbd1a0 100644
--- a/geode-docs/topologies_and_comm/topology_concepts/chapter_overview.html.md.erb
+++ b/geode-docs/topologies_and_comm/topology_concepts/chapter_overview.html.md.erb
@@ -19,30 +19,30 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Before you configure your Apache Geode members, make sure you understand the options for topology and communication.
+Before you configure your <%=vars.product_name_long%> members, make sure you understand the options for topology and communication.
 
--   **[Topology Types](../../topologies_and_comm/topology_concepts/topology_types.html)**
+-   **[Topology Types](topology_types.html)**
 
-    The Apache Geode topology options allow you to scale horizontally and vertically.
+    The <%=vars.product_name_long%> topology options allow you to scale horizontally and vertically.
 
--   **[Planning Topology and Communication](../../topologies_and_comm/topology_concepts/member_communication.html)**
+-   **[Planning Topology and Communication](member_communication.html)**
 
-    Create a topology plan and a detailed list of machines and communication ports that your members will use. Configure your Apache Geode systems and the communication between systems.
+    Create a topology plan and a detailed list of machines and communication ports that your members will use. Configure your <%=vars.product_name_long%> systems and the communication between systems.
 
--   **[How Member Discovery Works](../../topologies_and_comm/topology_concepts/how_member_discovery_works.html)**
+-   **[How Member Discovery Works](how_member_discovery_works.html)**
 
-    Apache Geode provides various options for member discovery within a distributed system and between clients and servers.
+    <%=vars.product_name_long%> provides various options for member discovery within a distributed system and between clients and servers.
 
--   **[How Communication Works](../../topologies_and_comm/topology_concepts/how_communication_works.html)**
+-   **[How Communication Works](how_communication_works.html)**
 
-    Geode uses a combination of TCP and UDP unicast and multicast for communication between members. You can change the default behavior to optimize communication for your system.
+    <%=vars.product_name%> uses a combination of TCP and UDP unicast and multicast for communication between members. You can change the default behavior to optimize communication for your system.
 
--   **[Using Bind Addresses](../../topologies_and_comm/topology_concepts/using_bind_addresses.html)**
+-   **[Using Bind Addresses](using_bind_addresses.html)**
 
-    You use a bind address configuration to send network traffic through non-default network cards and to distribute the load of network traffic for Geode across multiple cards. If no bind address setting is found, Geode uses the host machine's default address.
+    You use a bind address configuration to send network traffic through non-default network cards and to distribute the load of network traffic for <%=vars.product_name%> across multiple cards. If no bind address setting is found, <%=vars.product_name%> uses the host machine's default address.
 
--   **[Choosing Between IPv4 and IPv6](../../topologies_and_comm/topology_concepts/IPv4_and_IPv6.html)**
+-   **[Choosing Between IPv4 and IPv6](IPv4_and_IPv6.html)**
 
-    By default, Apache Geode uses Internet Protocol version 4 for Geode address specifications. You can switch to Internet Protocol version 6 if all your machines support it. You may lose performance, so you need to understand the costs of making the switch.
+    By default, <%=vars.product_name_long%> uses Internet Protocol version 4 for <%=vars.product_name%> address specifications. You can switch to Internet Protocol version 6 if all your machines support it. You may lose performance, so you need to understand the costs of making the switch.
 
 

http://git-wip-us.apache.org/repos/asf/geode/blob/e2c3d531/geode-docs/topologies_and_comm/topology_concepts/how_communication_works.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/topologies_and_comm/topology_concepts/how_communication_works.html.md.erb b/geode-docs/topologies_and_comm/topology_concepts/how_communication_works.html.md.erb
index 9739e10..6c7cd8e 100644
--- a/geode-docs/topologies_and_comm/topology_concepts/how_communication_works.html.md.erb
+++ b/geode-docs/topologies_and_comm/topology_concepts/how_communication_works.html.md.erb
@@ -19,37 +19,37 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Geode uses a combination of TCP and UDP unicast and multicast for communication between members. You can change the default behavior to optimize communication for your system.
+<%=vars.product_name%> uses a combination of TCP and UDP unicast and multicast for communication between members. You can change the default behavior to optimize communication for your system.
 
 Client/server communication and gateway sender to gateway receiver communication uses TCP/IP sockets. The server listens for client communication at a published address and the client establishes the connection, sending its location. Similarly, the gateway receiver listens for gateway sender communication and the connection is established between sites.
 
-In peer systems, for general messaging and region operations distribution, Geode uses either TCP or UDP unicast. The default is TCP. You can use TCP or UDP unicast for all communications or you can use it as the default but then can target specific regions to use UDP multicast for operations distribution. The best combination for your installation depends in large part on your data use and event messaging.
+In peer systems, for general messaging and region operations distribution, <%=vars.product_name%> uses either TCP or UDP unicast. The default is TCP. You can use TCP or UDP unicast for all communications or you can use it as the default but then can target specific regions to use UDP multicast for operations distribution. The best combination for your installation depends in large part on your data use and event messaging.
 
 ## <a id="how_communication_works__section_4402A20FEEC04055A0EEF6FEE82C116D" class="no-quick-link"></a>TCP
 
 TCP (Transmission Control Protocol) provides reliable in-order delivery of the system messages. TCP is more appropriate than UDP if the data is partitioned, if the distributed system is small, or if network loads are unpredictable. TCP is preferable to UDP unicast in smaller distributed systems because it implements more reliable communications at the operating system level than UDP and its performance can be substantially faster than UDP. As the size of the distributed system increases, however, the relatively small overhead of UDP makes it the better choice. TCP adds new threads and sockets to every member, causing more overhead as the system grows.
 
 **Note:**
-Even when Geode is configured to use UDP for messaging, Geode uses a TCP connection when attempting to detect failed members. See [Failure Detection and Membership Views](../../managing/network_partitioning/failure_detection.html#concept_CFD13177F78C456095622151D6EE10EB) for more details. In addition, the TCP connection's ping is not used for keep alive purposes; it is only used to detect failed members. See [TCP/IP KeepAlive Configuration](../../managing/monitor_tune/socket_tcp_keepalive.html#topic_jvc_pw3_34) for TCP keep alive configuration.
+Even when <%=vars.product_name%> is configured to use UDP for messaging, <%=vars.product_name%> uses a TCP connection when attempting to detect failed members. See [Failure Detection and Membership Views](../../managing/network_partitioning/failure_detection.html#concept_CFD13177F78C456095622151D6EE10EB) for more details. In addition, the TCP connection's ping is not used for keep alive purposes; it is only used to detect failed members. See [TCP/IP KeepAlive Configuration](../../managing/monitor_tune/socket_tcp_keepalive.html#topic_jvc_pw3_34) for TCP keep alive configuration.
 
 ## <a id="how_communication_works__section_E2D56EE03B54435BA9F04B8550F00534" class="no-quick-link"></a>UDP Unicast and Multicast
 
 UDP (User Datagram Protocol) is a connectionless protocol which uses far fewer resources than TCP. Adding another process to the distributed system incurs little overhead for UDP messaging. UDP on its own is not reliable however, and messages are restricted in size to 64k bytes or less, including overhead for message headers. Large messages must be fragmented and transmitted as multiple datagram messages. Consequently, UDP is slower than TCP in many cases and unusable in other cases if network traffic is unpredictable or heavily congested.
 
-UDP is used in Geode for both unicast and multicast messaging. Geode implements retransmission protocols to ensure proper delivery of messages over UDP.
+UDP is used in <%=vars.product_name%> for both unicast and multicast messaging. <%=vars.product_name%> implements retransmission protocols to ensure proper delivery of messages over UDP.
 
 ## <a id="how_communication_works__section_F2393EE1280749F4B59E2558AA907526" class="no-quick-link"></a>UDP Unicast
 
-UDP unicast is the alternative to TCP for general messaging. UDP is more appropriate than TCP for unicast messaging when there are a large number of processes in the distributed system, the network is not congested, cached objects are small, and applications can give the cache enough processing time to read from the network. If you disable TCP, Geode uses UDP for unicast messaging.
+UDP unicast is the alternative to TCP for general messaging. UDP is more appropriate than TCP for unicast messaging when there are a large number of processes in the distributed system, the network is not congested, cached objects are small, and applications can give the cache enough processing time to read from the network. If you disable TCP, <%=vars.product_name%> uses UDP for unicast messaging.
 
-For each member, Geode selects a unique port for UDP unicast communication. You can restrict the range used for the selection by setting `membership-port-range` in the `gemfire.properties` file. Example:
+For each member, <%=vars.product_name%> selects a unique port for UDP unicast communication. You can restrict the range used for the selection by setting `membership-port-range` in the `gemfire.properties` file. Example:
 
 ``` pre
 membership-port-range=1024-60000
 ```
 
 **Note:**
-In addition to UDP port configuration, the `membership-port-range` property defines the TCP port used for failure detection. See the [Reference](../../reference/book_intro.html#reference) for a description of the Geode property.
+In addition to UDP port configuration, the `membership-port-range` property defines the TCP port used for failure detection. See the [Reference](../../reference/book_intro.html#reference) for a description of the <%=vars.product_name%> property.
 
 ## <a id="how_communication_works__section_15F9EEDD65374F3E9D26C5A960D9D9D3" class="no-quick-link"></a>UDP Multicast
 
@@ -59,4 +59,4 @@ When multicast is enabled for a region, all processes in the distributed system
 
 Multicast is most appropriate when the majority of processes in a distributed system are using the same cache regions and need to get updates for them, such as when the processes define replicated regions or have their regions configured to receive all events.
 
-Even if you use multicast for a region, Geode will send unicast messages when appropriate. If data is partitioned, multicast is not a useful option. Even with multicast enabled, partitioned regions still use unicast for almost all purposes.
+Even if you use multicast for a region, <%=vars.product_name%> will send unicast messages when appropriate. If data is partitioned, multicast is not a useful option. Even with multicast enabled, partitioned regions still use unicast for almost all purposes.

http://git-wip-us.apache.org/repos/asf/geode/blob/e2c3d531/geode-docs/topologies_and_comm/topology_concepts/how_member_discovery_works.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/topologies_and_comm/topology_concepts/how_member_discovery_works.html.md.erb b/geode-docs/topologies_and_comm/topology_concepts/how_member_discovery_works.html.md.erb
index 7123c9d..56174d4 100644
--- a/geode-docs/topologies_and_comm/topology_concepts/how_member_discovery_works.html.md.erb
+++ b/geode-docs/topologies_and_comm/topology_concepts/how_member_discovery_works.html.md.erb
@@ -19,7 +19,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Apache Geode provides various options for member discovery within a distributed system and between clients and servers.
+<%=vars.product_name_long%> provides various options for member discovery within a distributed system and between clients and servers.
 
 -   [Peer Member Discovery](how_member_discovery_works.html#how_member_discovery_works__section_F2B8EBF2909440BD90B4CDEE0CAA0C2A)
 -   [Standalone Member](how_member_discovery_works.html#how_member_discovery_works__section_E26DFAFE9E994C0C9A489E325E345816)
@@ -27,7 +27,7 @@ Apache Geode provides various options for member discovery within a distributed
 
 ## <a id="how_member_discovery_works__section_F2B8EBF2909440BD90B4CDEE0CAA0C2A" class="no-quick-link"></a>Peer Member Discovery
 
-Peer member discovery is what defines a distributed system. All applications and cache servers that use the same settings for peer discovery are members of the same distributed system. Each system member has a unique identity and knows the identities of the other members. A member can belong to only one distributed system at a time. Once they have found each other, members communicate directly, independent of the discovery mechanism. In peer discovery, Geode uses a membership coordinator to manage member joins and departures.
+Peer member discovery is what defines a distributed system. All applications and cache servers that use the same settings for peer discovery are members of the same distributed system. Each system member has a unique identity and knows the identities of the other members. A member can belong to only one distributed system at a time. Once they have found each other, members communicate directly, independent of the discovery mechanism. In peer discovery, <%=vars.product_name%> uses a membership coordinator to manage member joins and departures.
 
 Members discover each other using one or more locators. A locator provides both discovery and load balancing services. Peer locators manage a dynamic list of distributed system members. New members connect to one of the locators to retrieve the member list, which it uses to join the system.
 
@@ -38,7 +38,7 @@ Multiple locators ensure the most stable start up and availability for your dist
 
 ## <a id="how_member_discovery_works__section_E26DFAFE9E994C0C9A489E325E345816" class="no-quick-link"></a>Standalone Member
 
-The standalone member has no peers, does no peer discovery, and so does not use locators. It creates a distributed system connection only to access the Geode caching features. Running standalone has a faster startup and is appropriate for any member that is isolated from other applications. The primary use case is for client applications. Standalone members can be accessed and monitored if you enable the member to become a JMX Manager.
+The standalone member has no peers, does no peer discovery, and so does not use locators. It creates a distributed system connection only to access the <%=vars.product_name%> caching features. Running standalone has a faster startup and is appropriate for any member that is isolated from other applications. The primary use case is for client applications. Standalone members can be accessed and monitored if you enable the member to become a JMX Manager.
 
 ## <a id="how_member_discovery_works__section_37DE53BDCDB541618C6DF4E47A1F2B73" class="no-quick-link"></a>Client Discovery of Servers
 
@@ -53,8 +53,8 @@ You do not need to run any special processes to use locators for server discover
 
 ## <a id="how_member_discovery_works__section_1CB9D1439346415FB630E9DCD373CAC9" class="no-quick-link"></a>Multi-site Discovery
 
-In a multi-site (WAN) configuration, a Geode cluster uses locators to discover remote Geode clusters as well as to discover local Geode members. Each locator in a WAN configuration uniquely identifies the local cluster to which it belongs, and it can also identify locators in remote Geode clusters to which it will connect for WAN distribution.
+In a multi-site (WAN) configuration, a <%=vars.product_name%> cluster uses locators to discover remote <%=vars.product_name%> clusters as well as to discover local <%=vars.product_name%> members. Each locator in a WAN configuration uniquely identifies the local cluster to which it belongs, and it can also identify locators in remote <%=vars.product_name%> clusters to which it will connect for WAN distribution.
 
-When a locator starts up, it contacts each remote locator to exchange information about the available locators and gateway receiver configurations in the remote cluster. In addition to sharing information about its own cluster, a locator shares information that it has obtained from all other connected clusters. Each time a new locator starts up or an existing locator shuts down, the changed information is broadcast to other connected Geode clusters across the WAN.
+When a locator starts up, it contacts each remote locator to exchange information about the available locators and gateway receiver configurations in the remote cluster. In addition to sharing information about its own cluster, a locator shares information that it has obtained from all other connected clusters. Each time a new locator starts up or an existing locator shuts down, the changed information is broadcast to other connected <%=vars.product_name%> clusters across the WAN.
 
 See [Discovery for Multi-Site Systems](multisite_overview.html#topic_1742957C8D4B4F7590847EB8DB6CD4F7) for more information.

http://git-wip-us.apache.org/repos/asf/geode/blob/e2c3d531/geode-docs/topologies_and_comm/topology_concepts/how_multisite_systems_work.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/topologies_and_comm/topology_concepts/how_multisite_systems_work.html.md.erb b/geode-docs/topologies_and_comm/topology_concepts/how_multisite_systems_work.html.md.erb
index bbdd813..f5ca063 100644
--- a/geode-docs/topologies_and_comm/topology_concepts/how_multisite_systems_work.html.md.erb
+++ b/geode-docs/topologies_and_comm/topology_concepts/how_multisite_systems_work.html.md.erb
@@ -19,26 +19,26 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-The Apache Geode multi-site implementation connects disparate distributed systems. The systems act as one when they are coupled, and they act as independent systems when communication between sites fails. The coupling is tolerant of weak or slow links between distributed system sites. A wide-area network (WAN) is the main use case for the multi-site topology.
+The <%=vars.product_name_long%> multi-site implementation connects disparate distributed systems. The systems act as one when they are coupled, and they act as independent systems when communication between sites fails. The coupling is tolerant of weak or slow links between distributed system sites. A wide-area network (WAN) is the main use case for the multi-site topology.
 
--   **[Overview of Multi-site Caching](../../topologies_and_comm/topology_concepts/multisite_overview.html#topic_70045702D3994BC692E75102CE01BD7C)**
+-   **[Overview of Multi-site Caching](multisite_overview.html#topic_70045702D3994BC692E75102CE01BD7C)**
 
     A multi-site installation consists of two or more distributed systems that are loosely coupled. Each site manages its own distributed system, but region data is distributed to remote sites using one or more logical connections.
 
--   **[Consistency for WAN Updates](../../topologies_and_comm/topology_concepts/multisite_overview.html#topic_C74A0961937640B199396DC925D8D782)**
+-   **[Consistency for WAN Updates](multisite_overview.html#topic_C74A0961937640B199396DC925D8D782)**
 
-    Geode ensures that all copies of a region eventually reach a consistent state on all members and clients that host the region, including Geode members that distribute region events across a WAN.
+    <%=vars.product_name%> ensures that all copies of a region eventually reach a consistent state on all members and clients that host the region, including <%=vars.product_name%> members that distribute region events across a WAN.
 
--   **[Discovery for Multi-Site Systems](../../topologies_and_comm/topology_concepts/multisite_overview.html#topic_1742957C8D4B4F7590847EB8DB6CD4F7)**
+-   **[Discovery for Multi-Site Systems](multisite_overview.html#topic_1742957C8D4B4F7590847EB8DB6CD4F7)**
 
-    Each Geode cluster in a WAN configuration uses locators to discover remote clusters as well as local members.
+    Each <%=vars.product_name%> cluster in a WAN configuration uses locators to discover remote clusters as well as local members.
 
--   **[Gateway Senders](../../topologies_and_comm/topology_concepts/multisite_overview.html#topic_9AA37B43642D4DE19072CA3367C849BA)**
+-   **[Gateway Senders](multisite_overview.html#topic_9AA37B43642D4DE19072CA3367C849BA)**
 
-    A Geode cluster uses a *gateway sender* to distribute region events to another, remote Geode cluster. You can create multiple gateway sender configurations to distribute region events to multiple remote clusters, and/or to distribute region events concurrently to another remote cluster.
+    A <%=vars.product_name%> cluster uses a *gateway sender* to distribute region events to another, remote <%=vars.product_name%> cluster. You can create multiple gateway sender configurations to distribute region events to multiple remote clusters, and/or to distribute region events concurrently to another remote cluster.
 
--   **[Gateway Receivers](../../topologies_and_comm/topology_concepts/multisite_overview.html#topic_4DB3D9CF01AD4F4899457D1250468D00)**
+-   **[Gateway Receivers](multisite_overview.html#topic_4DB3D9CF01AD4F4899457D1250468D00)**
 
-    A gateway receiver configures a physical connection for receiving region events from gateway senders in one or more remote Geode clusters.
+    A gateway receiver configures a physical connection for receiving region events from gateway senders in one or more remote <%=vars.product_name%> clusters.
 
 

http://git-wip-us.apache.org/repos/asf/geode/blob/e2c3d531/geode-docs/topologies_and_comm/topology_concepts/how_server_discovery_works.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/topologies_and_comm/topology_concepts/how_server_discovery_works.html.md.erb b/geode-docs/topologies_and_comm/topology_concepts/how_server_discovery_works.html.md.erb
index 4abcd8a..78dc35c 100644
--- a/geode-docs/topologies_and_comm/topology_concepts/how_server_discovery_works.html.md.erb
+++ b/geode-docs/topologies_and_comm/topology_concepts/how_server_discovery_works.html.md.erb
@@ -19,10 +19,10 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Apache Geode locators provide reliable and flexible server discovery services for your clients. You can use all servers for all client requests, or group servers according to function, with the locators directing each client request to the right group of servers.
+<%=vars.product_name_long%> locators provide reliable and flexible server discovery services for your clients. You can use all servers for all client requests, or group servers according to function, with the locators directing each client request to the right group of servers.
 
 <a id="how_server_discovery_works__section_91AC081D4C48408B9ABA40430F161E73"></a>
-By default, Geode clients and servers discover each other on a predefined port (40404) on the localhost. This works, but is not typically the way you would deploy a client/server configuration. The recommended solution is to use one or more dedicated locators. A locator provides both discovery and load balancing services. With server locators, clients are configured with a locator list and locators maintain a dynamic server list. The locator listens at an address and port for connecting clients and gives the clients server information. The clients are configured with locator information and have no configuration specific to the servers.
+By default, <%=vars.product_name%> clients and servers discover each other on a predefined port (40404) on the localhost. This works, but is not typically the way you would deploy a client/server configuration. The recommended solution is to use one or more dedicated locators. A locator provides both discovery and load balancing services. With server locators, clients are configured with a locator list and locators maintain a dynamic server list. The locator listens at an address and port for connecting clients and gives the clients server information. The clients are configured with locator information and have no configuration specific to the servers.
 
 ## <a id="how_server_discovery_works__section_95B62F09EF954A99ABBDEBC2756812E3" class="no-quick-link"></a>Basic Configuration
 

http://git-wip-us.apache.org/repos/asf/geode/blob/e2c3d531/geode-docs/topologies_and_comm/topology_concepts/how_the_pool_manages_connections.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/topologies_and_comm/topology_concepts/how_the_pool_manages_connections.html.md.erb b/geode-docs/topologies_and_comm/topology_concepts/how_the_pool_manages_connections.html.md.erb
index c0c93ab..b14f0e3 100644
--- a/geode-docs/topologies_and_comm/topology_concepts/how_the_pool_manages_connections.html.md.erb
+++ b/geode-docs/topologies_and_comm/topology_concepts/how_the_pool_manages_connections.html.md.erb
@@ -19,7 +19,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-The server pools in your Apache Geode client processes manage all client connection requests to the server tier. To make the best use of the pool functionality, you should understand how the pool manages the server connections.
+The server pools in your <%=vars.product_name_long%> client processes manage all client connection requests to the server tier. To make the best use of the pool functionality, you should understand how the pool manages the server connections.
 
 <a id="how_the_pool_manages_connections__section_2C419926908B4A3599FF0B8EAB7E69A1"></a>
 Client/server communication is done in two distinct ways. Each kind of communication uses a different type of connection for maximum performance and availability.

http://git-wip-us.apache.org/repos/asf/geode/blob/e2c3d531/geode-docs/topologies_and_comm/topology_concepts/member_communication.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/topologies_and_comm/topology_concepts/member_communication.html.md.erb b/geode-docs/topologies_and_comm/topology_concepts/member_communication.html.md.erb
index 2826224..5d15ac7 100644
--- a/geode-docs/topologies_and_comm/topology_concepts/member_communication.html.md.erb
+++ b/geode-docs/topologies_and_comm/topology_concepts/member_communication.html.md.erb
@@ -19,7 +19,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Create a topology plan and a detailed list of machines and communication ports that your members will use. Configure your Apache Geode systems and the communication between systems.
+Create a topology plan and a detailed list of machines and communication ports that your members will use. Configure your <%=vars.product_name_long%> systems and the communication between systems.
 
 ## <a id="membership_and_communication__section_AC0D7685A2CA4999A40BCEFD514BF599" class="no-quick-link"></a>Determine Protocols and Addresses
 

http://git-wip-us.apache.org/repos/asf/geode/blob/e2c3d531/geode-docs/topologies_and_comm/topology_concepts/topology_types.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/topologies_and_comm/topology_concepts/topology_types.html.md.erb b/geode-docs/topologies_and_comm/topology_concepts/topology_types.html.md.erb
index fbc51ca..8bdbb4d 100644
--- a/geode-docs/topologies_and_comm/topology_concepts/topology_types.html.md.erb
+++ b/geode-docs/topologies_and_comm/topology_concepts/topology_types.html.md.erb
@@ -19,9 +19,9 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-The Apache Geode topology options allow you to scale horizontally and vertically.
+The <%=vars.product_name_long%> topology options allow you to scale horizontally and vertically.
 
-Apache Geode provides a variety of cache topologies:
+<%=vars.product_name_long%> provides a variety of cache topologies:
 
 -   At the core of all systems is the single, peer-to-peer distributed system.
 -   For horizontal and vertical scaling, you can combine individual systems into client/server and multi-site (WAN) topologies:
@@ -30,19 +30,19 @@ Apache Geode provides a variety of cache topologies:
 
 ## <a id="concept_7628F498DB534A2D8A99748F5DA5DC94__section_333142A36A3E4AF7A1EC31856ED99FCA" class="no-quick-link"></a>Peer-to-Peer Configuration
 
-The peer-to-peer distributed system is the building block for all Geode installations. Peer-to-peer alone is the simplest topology. Each cache instance, or member, directly communicates with every other member in the distributed system. This cache configuration is primarily designed for applications that need to embed a cache within the application process space and participate in a cluster. A typical example is an application server cluster in which the application and the cache are co-located and share the same heap.
+The peer-to-peer distributed system is the building block for all <%=vars.product_name%> installations. Peer-to-peer alone is the simplest topology. Each cache instance, or member, directly communicates with every other member in the distributed system. This cache configuration is primarily designed for applications that need to embed a cache within the application process space and participate in a cluster. A typical example is an application server cluster in which the application and the cache are co-located and share the same heap.
 
 <img src="../../images_svg/p2p_topology.svg" id="concept_7628F498DB534A2D8A99748F5DA5DC94__image_vzs_qwn_4r" class="image" />
 
 ## <a id="concept_7628F498DB534A2D8A99748F5DA5DC94__section_38F7D763AE32466299DC5B7DB9E71C61" class="no-quick-link"></a>Client/Server Configuration
 
-The client/server topology is the model for vertical scaling, where clients typically host a small subset of the data in the application process space and delegate to the server system for the rest. Compared to peer-to-peer by itself, the client/server architecture provides better data isolation, high fetch performance, and more scalability. If data distribution will put a very heavy load on the network, a client/server architecture usually gives better performance. In any client/server installation, the server system is itself a peer-to-peer system, with data distributed between servers. A client system has a connection pool, which it uses to communicate with servers and other Geode members. A client may also contain a local cache.
+The client/server topology is the model for vertical scaling, where clients typically host a small subset of the data in the application process space and delegate to the server system for the rest. Compared to peer-to-peer by itself, the client/server architecture provides better data isolation, high fetch performance, and more scalability. If data distribution will put a very heavy load on the network, a client/server architecture usually gives better performance. In any client/server installation, the server system is itself a peer-to-peer system, with data distributed between servers. A client system has a connection pool, which it uses to communicate with servers and other <%=vars.product_name%> members. A client may also contain a local cache.
 
 <img src="../../images_svg/cs_topology.svg" id="concept_7628F498DB534A2D8A99748F5DA5DC94__image_073094D7ED05419A9EE8E6AE552BE3F3" class="image" />
 
 ## <a id="concept_7628F498DB534A2D8A99748F5DA5DC94__section_566EC05894D6461AA0E7DD7B065D457B" class="no-quick-link"></a>Multi-site Configuration
 
-For horizontal scaling, you can use a loosely coupled multi-site topology. With multi-site, multiple Geode systems are loosely coupled, generally across geographical distances with slower connections, such as with a WAN. This topology provides better performance than the tight coupling of a single system, and greater independence between locations, so that each site can function on its own if the connection or remote site become unavailable. In a multi-site installation, each individual site is a peer-to-peer or Client/Server system.
+For horizontal scaling, you can use a loosely coupled multi-site topology. With multi-site, multiple <%=vars.product_name%> systems are loosely coupled, generally across geographical distances with slower connections, such as with a WAN. This topology provides better performance than the tight coupling of a single system, and greater independence between locations, so that each site can function on its own if the connection or remote site become unavailable. In a multi-site installation, each individual site is a peer-to-peer or Client/Server system.
 
 <img src="../../images/consistent_multisite.png" id="concept_7628F498DB534A2D8A99748F5DA5DC94__image_6501FD66F0F94273A1F7EEE5747B3925" class="image" />
 

http://git-wip-us.apache.org/repos/asf/geode/blob/e2c3d531/geode-docs/topologies_and_comm/topology_concepts/using_bind_addresses.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/topologies_and_comm/topology_concepts/using_bind_addresses.html.md.erb b/geode-docs/topologies_and_comm/topology_concepts/using_bind_addresses.html.md.erb
index 833e3fa..b9f0130 100644
--- a/geode-docs/topologies_and_comm/topology_concepts/using_bind_addresses.html.md.erb
+++ b/geode-docs/topologies_and_comm/topology_concepts/using_bind_addresses.html.md.erb
@@ -19,10 +19,10 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-You use a bind address configuration to send network traffic through non-default network cards and to distribute the load of network traffic for Geode across multiple cards. If no bind address setting is found, Geode uses the host machine's default address.
+You use a bind address configuration to send network traffic through non-default network cards and to distribute the load of network traffic for <%=vars.product_name%> across multiple cards. If no bind address setting is found, <%=vars.product_name%> uses the host machine's default address.
 
 <a id="using_bind_addresses__section_6063D5004787488A90EC03085991F902"></a>
-Host machines transmit data to the network and receive data from the network through one or more network cards, also referred to as network interface cards (NIC) or LAN cards. A host with more than one card is referred to as a multi-homed host. On multi-homed hosts, one network card is used by default. You can use bind addresses to configure your Geode members to use non-default network cards on a multi-homed host.
+Host machines transmit data to the network and receive data from the network through one or more network cards, also referred to as network interface cards (NIC) or LAN cards. A host with more than one card is referred to as a multi-homed host. On multi-homed hosts, one network card is used by default. You can use bind addresses to configure your <%=vars.product_name%> members to use non-default network cards on a multi-homed host.
 
 **Note:**
 When you specify a non-default card address for a process, all processes that connect to it need to use the same address in their connection settings. For example, if you use bind addresses for your server locators, you must use the same addresses to configure the server pools in your clients.
@@ -31,12 +31,12 @@ Use IPv4 or IPv6 numeric address specifications for your bind address settings.
 
 ## <a id="using_bind_addresses__section_63589355AB684F739145E9185806D023" class="no-quick-link"></a>Peer and Server Communication
 
-You can configure peer, and server communication so that each communication type uses its own address or types use the same address. If no setting is found for a specific communication type, Geode uses the host machine's default address.
+You can configure peer, and server communication so that each communication type uses its own address or types use the same address. If no setting is found for a specific communication type, <%=vars.product_name%> uses the host machine's default address.
 
 **Note:**
 Bind addresses set through the APIs, like `CacheServer` and `DistributedSystem`, take precedence over the settings discussed here. If your settings are not working, check to make sure there are no bind address settings being done through API calls.
 
-This table lists the settings used for peer and server communication, ordered by precedence. For example, for server communication, Geode searches first for the cache-server bind address, then the `gfsh start                     server` `server-bind-address` setting, and so on until a setting is found or all possibilities are exhausted.
+This table lists the settings used for peer and server communication, ordered by precedence. For example, for server communication, <%=vars.product_name%> searches first for the cache-server bind address, then the `gfsh start                     server` `server-bind-address` setting, and so on until a setting is found or all possibilities are exhausted.
 
 | Property Setting Ordered by Precedence               | Peer | Server | Gateway Receiver | Syntax                                            |
 |------------------------------------------------------|------|--------|------------------|---------------------------------------------------|
@@ -66,7 +66,7 @@ bind-address=192.0.2.0
 
 If you are using multi-site (WAN) topology, you can also configure gateway receiver communication (in addition to peer and server communication) so that each communication type uses its own address.
 
-This table lists the settings used for peer, server, and gateway receiver communication, ordered by precedence. For example, for gateway receiver communication, Geode searches first for a `cache.xml` `<gateway-receiver>` `bind-address` setting. If that is not set, Geode searches for the `gfsh start server` `server-bind-address` setting, and so on until a setting is found or all possibilities are exhausted.
+This table lists the settings used for peer, server, and gateway receiver communication, ordered by precedence. For example, for gateway receiver communication, <%=vars.product_name%> searches first for a `cache.xml` `<gateway-receiver>` `bind-address` setting. If that is not set, <%=vars.product_name%> searches for the `gfsh start server` `server-bind-address` setting, and so on until a setting is found or all possibilities are exhausted.
 
 | Property Setting Ordered by Precedence               | Peer | Server | Gateway Receiver | Syntax                                            |
 |------------------------------------------------------|------|--------|------------------|---------------------------------------------------|
@@ -105,7 +105,7 @@ Set the locator bind address using one of these methods:
     gfsh>start locator --name=my_locator --bind-address=ip-address-to-bind --port=portNumber
     ```
 
--   Inside a Geode application, take one of the following actions:
+-   Inside a <%=vars.product_name%> application, take one of the following actions:
     -   Automatically start a co-located locator using the gemfire property `start-locator`, and specifying the bind address for it in that property setting.
     -   Use `org.apache.geode.distributed.LocatorLauncher` API to start the locator inside your code. Use the `LocatorLauncher.Builder` class to construct an instance of the `LocatorLauncher`, use the `setBindAddress` method to specify the IP address to use and then use the start() method to start a Locator service embedded in your Java application process.
 


[21/25] geode git commit: GEODE-3395 Variable-ize product version and name in user guide - Managing

Posted by ud...@apache.org.
GEODE-3395 Variable-ize product version and name in user guide - Managing


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/1b84ecbe
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/1b84ecbe
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/1b84ecbe

Branch: refs/heads/feature/GEODE-3503
Commit: 1b84ecbe4d942d843cb0d0fa8c2e03eb55f07f39
Parents: 6f7667d
Author: Dave Barnes <db...@pivotal.io>
Authored: Wed Aug 23 09:47:36 2017 -0700
Committer: Dave Barnes <db...@pivotal.io>
Committed: Wed Aug 23 10:35:48 2017 -0700

----------------------------------------------------------------------
 .../source/subnavs/geode-subnav.erb             |  14 +-
 .../running/running_the_cacheserver.html.md.erb |   2 +-
 .../running/running_the_locator.html.md.erb     |   2 +-
 .../autoreconnect/member-reconnect.html.md.erb  |  83 -------
 geode-docs/managing/book_intro.html.md.erb      |  40 ++--
 .../chapter_overview.html.md.erb                |  14 +-
 .../exporting_a_snapshot.html.md.erb            |   4 +-
 .../importing_a_snapshot.html.md.erb            |   4 +-
 .../disk_storage/chapter_overview.html.md.erb   |  22 +-
 .../compacting_disk_stores.html.md.erb          |  12 +-
 .../disk_store_configuration_params.html.md.erb |   2 +-
 .../how_disk_stores_work.html.md.erb            |  10 +-
 ...eping_offline_disk_store_in_sync.html.md.erb |   2 +-
 .../managing_disk_buffer_flushes.html.md.erb    |  10 +-
 .../managing_disk_stores.html.md.erb            |  16 +-
 .../disk_storage/operation_logs.html.md.erb     |  16 +-
 ...ize_availability_and_performance.html.md.erb |   2 +-
 .../overview_using_disk_stores.html.md.erb      |   8 +-
 ...starting_system_with_disk_stores.html.md.erb |   2 +-
 .../disk_storage/using_disk_stores.html.md.erb  |   8 +-
 .../using_the_default_disk_store.html.md.erb    |   4 +-
 .../heap_use/heap_management.html.md.erb        |  28 +--
 .../managing/heap_use/lock_memory.html.md.erb   |   6 +-
 .../heap_use/off_heap_management.html.md.erb    |  12 +-
 .../logging/configuring_log4j2.html.md.erb      |  26 +--
 .../logging/how_logging_works.html.md.erb       |  20 +-
 geode-docs/managing/logging/logging.html.md.erb |  14 +-
 .../logging/logging_categories.html.md.erb      |  12 +-
 .../logging/setting_up_logging.html.md.erb      |   2 +-
 .../configuring_rmi_connector.html.md.erb       |  10 +-
 .../gfsh_and_management_api.html.md.erb         |   2 +-
 .../management/jmx_manager_node.html.md.erb     |   4 +-
 .../jmx_manager_operations.html.md.erb          |  22 +-
 .../list_of_mbean_notifications.html.md.erb     |   2 +-
 .../management/list_of_mbeans.html.md.erb       |   8 +-
 .../management/list_of_mbeans_full.html.md.erb  |  10 +-
 .../management_and_monitoring.html.md.erb       |  30 ++-
 ...nagement_and_monitoring_features.html.md.erb |  12 +-
 .../management_system_overview.html.md.erb      |  34 +--
 .../management/mbean_architecture.html.md.erb   |  24 +-
 .../management/mbean_notifications.html.md.erb  |   8 +-
 .../management/mbeans_jconsole.html.md.erb      |  10 +-
 .../managing/management/mm_overview.html.md.erb |  48 ++--
 ...tification_federation_and_alerts.html.md.erb |   2 +-
 .../managing/member-reconnect.html.md.erb       |  83 +++++++
 .../monitor_tune/cache_consistency.html.md.erb  |   8 +-
 .../monitor_tune/chapter_overview.html.md.erb   |  28 +--
 .../multicast_communication.html.md.erb         |  14 +-
 ...ication_configuring_speed_limits.html.md.erb |   2 +-
 ...unication_runtime_considerations.html.md.erb |   6 +-
 ...n_testing_multicast_speed_limits.html.md.erb |   2 +-
 .../performance_controls.html.md.erb            |  14 +-
 ...ance_controls_data_serialization.html.md.erb |   4 +-
 .../performance_on_vsphere.html.md.erb          |  38 ++--
 .../monitor_tune/slow_messages.html.md.erb      |   2 +-
 .../monitor_tune/slow_receivers.html.md.erb     |   6 +-
 .../slow_receivers_managing.html.md.erb         |   6 +-
 ...ow_receivers_preventing_problems.html.md.erb |   8 +-
 .../socket_communication.html.md.erb            |  18 +-
 ...ommunication_have_enough_sockets.html.md.erb |   4 +-
 ...tion_setting_socket_buffer_sizes.html.md.erb |   2 +-
 .../socket_tcp_keepalive.html.md.erb            |   4 +-
 .../sockets_and_gateways.html.md.erb            |   2 +-
 .../system_member_performance.html.md.erb       |   8 +-
 ...ber_performance_jvm_mem_settings.html.md.erb |   2 +-
 .../monitor_tune/udp_communication.html.md.erb  |  12 +-
 .../chapter_overview.html.md.erb                |  18 +-
 .../failure_detection.html.md.erb               |   2 +-
 .../handling_network_partitioning.html.md.erb   |   6 +-
 ...rk_partitioning_management_works.html.md.erb |   4 +-
 ...ators_lead_members_and_weighting.html.md.erb |   2 +-
 .../preventing_network_partitions.html.md.erb   |   2 +-
 .../managing/region_compression.html.md.erb     | 226 +++++++++++++++++++
 .../region_compression.html.md.erb              | 226 -------------------
 .../authentication_examples.html.md.erb         |   4 +-
 .../authentication_overview.html.md.erb         |  10 +-
 .../security/authorization_example.html.md.erb  |   2 +-
 .../security/authorization_overview.html.md.erb |   4 +-
 .../security/chapter_overview.html.md.erb       |  14 +-
 .../security/encrypting_passwords.html.md.erb   |   2 +-
 .../encrypting_with_diffie_hellman.html.md.erb  |  66 ++++++
 .../encrypting_with_diffie_helman.html.md.erb   |  66 ------
 .../security/implementing_ssl.html.md.erb       |   8 +-
 .../security/properties_file.html.md.erb        |   2 +-
 .../security/security-audit.html.md.erb         |   8 +-
 .../security_audit_overview.html.md.erb         |   4 +-
 .../managing/security/ssl_example.html.md.erb   |   2 +-
 .../managing/security/ssl_overview.html.md.erb  |  10 +-
 .../application_defined_statistics.html.md.erb  |   4 +-
 .../statistics/chapter_overview.html.md.erb     |  16 +-
 .../statistics/how_statistics_work.html.md.erb  |   2 +-
 .../setting_up_statistics.html.md.erb           |   2 +-
 ...ient_region_and_entry_statistics.html.md.erb |   6 +-
 .../chapter_overview.html.md.erb                |  24 +-
 .../diagnosing_system_probs.html.md.erb         |  24 +-
 ...ent_and_recover_disk_full_errors.html.md.erb |   4 +-
 ...ring_conflicting_data_exceptions.html.md.erb |  14 +-
 .../recovering_from_app_crashes.html.md.erb     |   4 +-
 .../recovering_from_cs_crashes.html.md.erb      |   2 +-
 .../recovering_from_machine_crashes.html.md.erb |   6 +-
 .../recovering_from_network_outages.html.md.erb |   2 +-
 .../recovering_from_p2p_crashes.html.md.erb     |   4 +-
 .../topics/gemfire_properties.html.md.erb       |   2 +-
 .../gfsh/command-pages/create.html.md.erb       |   2 +-
 104 files changed, 842 insertions(+), 860 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-book/master_middleman/source/subnavs/geode-subnav.erb
----------------------------------------------------------------------
diff --git a/geode-book/master_middleman/source/subnavs/geode-subnav.erb b/geode-book/master_middleman/source/subnavs/geode-subnav.erb
index 838b265..788923d 100644
--- a/geode-book/master_middleman/source/subnavs/geode-subnav.erb
+++ b/geode-book/master_middleman/source/subnavs/geode-subnav.erb
@@ -516,20 +516,20 @@ limitations under the License.
                         </ul>
                     </li>
                     <li class="has_submenu">
-                        <a href="/docs/guide/12/managing/region_compression/region_compression.html">Region Compression</a>
+                        <a href="/docs/guide/12/managing/region_compression.html">Region Compression</a>
                         <ul>
                             <li>
-                                <a href="/docs/guide/12/managing/region_compression/region_compression.html#concept_a2c_rhc_gl">Guidelines on Using Compression</a>
+                                <a href="/docs/guide/12/managing/region_compression.html#concept_a2c_rhc_gl">Guidelines on Using Compression</a>
                             </li>
                             <li>
-                                <a href="/docs/guide/12/managing/region_compression/region_compression.html#topic_inm_whc_gl">How to Enable Compression in a Region</a>
+                                <a href="/docs/guide/12/managing/region_compression.html#topic_inm_whc_gl">How to Enable Compression in a Region</a>
                             </li>
                             <li>
-                                <a href="/docs/guide/12/managing/region_compression/region_compression.html#topic_hqf_syj_g4">Working with Compressors
+                                <a href="/docs/guide/12/managing/region_compression.html#topic_hqf_syj_g4">Working with Compressors
                                 </a>
                             </li>
                             <li>
-                                <a href="/docs/guide/12/managing/region_compression/region_compression.html#topic_omw_j3c_gl">Comparing Performance of Compressed and Non-Compressed Regions</a>
+                                <a href="/docs/guide/12/managing/region_compression.html#topic_omw_j3c_gl">Comparing Performance of Compressed and Non-Compressed Regions</a>
                             </li>
                         </ul>
                     </li>
@@ -592,7 +592,7 @@ limitations under the License.
                                         <a href="/docs/guide/12/managing/security/encrypting_passwords.html">Encrypting Passwords for Use in cache.xml</a>
                                     </li>
                                     <li>
-                                        <a href="/docs/guide/12/managing/security/encrypting_with_diffie_helman.html">Encrypt Credentials with Diffie-Hellman</a>
+                                        <a href="/docs/guide/12/managing/security/encrypting_with_diffie_hellman.html">Encrypt Credentials with Diffie-Hellman</a>
                                     </li>
                                     <li>
                                         <a href="/docs/guide/12/managing/security/authentication_examples.html">Authentication Example</a>
@@ -798,7 +798,7 @@ limitations under the License.
                                 <a href="/docs/guide/12/managing/troubleshooting/system_failure_and_recovery.html">System Failure and Recovery</a>
                             </li>
                             <li>
-                                <a href="/docs/guide/12/managing/autoreconnect/member-reconnect.html">Handling Forced Cache Disconnection Using Autoreconnect</a>
+                                <a href="/docs/guide/12/managing/member-reconnect.html">Handling Forced Cache Disconnection Using Autoreconnect</a>
                             </li>
                             <li class="has_submenu">
                                 <a href="/docs/guide/12/managing/troubleshooting/recovering_from_app_crashes.html">Recovering from Application and Cache Server Crashes</a>

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/configuring/running/running_the_cacheserver.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/configuring/running/running_the_cacheserver.html.md.erb b/geode-docs/configuring/running/running_the_cacheserver.html.md.erb
index 2d43f59..578326a 100644
--- a/geode-docs/configuring/running/running_the_cacheserver.html.md.erb
+++ b/geode-docs/configuring/running/running_the_cacheserver.html.md.erb
@@ -40,7 +40,7 @@ The `gfsh` utility uses a working directory for its configuration files and log
 -   If you are using the Spring Framework, you can specify a Spring ApplicationContext XML file when starting up your server in `gfsh` by using the `--spring-xml-location` command-line option. This option allows you to bootstrap your Geode server process with your Spring application's configuration. See [Spring documentation](http://docs.spring.io/spring/docs/3.2.x/spring-framework-reference/html/resources.html#resources-app-ctx) for more information on this file.
 -   For logging output, log file output defaults to `server_name.log` in the cache server's working directory. If you restart a server with the same server name, the existing *server\_name*.log file is automatically renamed for you (for example, `server1-01-01.log` or `server1-02-01.log`). You can modify the level of logging details in this file by specifying a level in the `--log-level` argument when starting up the server.
 -   By default, the server will start in a subdirectory (named after the server's specified `--name`) under the directory where `gfsh` is executed. This subdirectory is considered the current working directory. You can also specify a different working directory when starting the cache server in `gfsh`.
--   By default, a server process that has been shutdown and disconnected due to a network partition event or member unresponsiveness will restart itself and automatically try to reconnect to the existing distributed system. See [Handling Forced Cache Disconnection Using Autoreconnect](../../managing/autoreconnect/member-reconnect.html#concept_22EE6DDE677F4E8CAF5786E17B4183A9) for more details.
+-   By default, a server process that has been shutdown and disconnected due to a network partition event or member unresponsiveness will restart itself and automatically try to reconnect to the existing distributed system. See [Handling Forced Cache Disconnection Using Autoreconnect](../../managing/member-reconnect.html#concept_22EE6DDE677F4E8CAF5786E17B4183A9) for more details.
 -   You can pass JVM parameters to the server's JVM by using the `--J=-Dproperty.name=value` upon server startup. These parameters can be Java properties or Geode configuration properties such as `gemfire.jmx-manager`. For example:
 
     ``` pre

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/configuring/running/running_the_locator.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/configuring/running/running_the_locator.html.md.erb b/geode-docs/configuring/running/running_the_locator.html.md.erb
index 6e3a49f..853cb64 100644
--- a/geode-docs/configuring/running/running_the_locator.html.md.erb
+++ b/geode-docs/configuring/running/running_the_locator.html.md.erb
@@ -55,7 +55,7 @@ Locator configuration and log files have the following properties:
 -   There is no cache configuration specific to locators.
 -   For logging output, the locator creates a log file in its current working directory. Log file output defaults to `locator_name.log` in the locator's working directory. If you restart a locator with a previously used locator name, the existing *locator\_name*.log file is automatically renamed for you (for example, `locator1-01-01.log` or `locator1-02-01.log`). You can modify the level of logging details in this file by specifying a level in the `--log-level` argument when starting up the locator.
 -   By default, a locator will start in a subdirectory (named after the locator) under the directory where `gfsh` is executed. This subdirectory is considered the current working directory. You can also specify a different working directory when starting the locator in `gfsh`.
--   By default, a locator that has been shutdown and disconnected due to a network partition event or member unresponsiveness will restart itself and automatically try to reconnect to the existing distributed system. When a locator is in the reconnecting state, it provides no discovery services for the distributed system. See [Handling Forced Cache Disconnection Using Autoreconnect](../../managing/autoreconnect/member-reconnect.html) for more details.
+-   By default, a locator that has been shutdown and disconnected due to a network partition event or member unresponsiveness will restart itself and automatically try to reconnect to the existing distributed system. When a locator is in the reconnecting state, it provides no discovery services for the distributed system. See [Handling Forced Cache Disconnection Using Autoreconnect](../../managing/member-reconnect.html) for more details.
 
 ## <a id="running_the_locator__section_wst_ykb_rr" class="no-quick-link"></a>Locators and the Cluster Configuration Service
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/autoreconnect/member-reconnect.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/autoreconnect/member-reconnect.html.md.erb b/geode-docs/managing/autoreconnect/member-reconnect.html.md.erb
deleted file mode 100644
index 8d59c0a..0000000
--- a/geode-docs/managing/autoreconnect/member-reconnect.html.md.erb
+++ /dev/null
@@ -1,83 +0,0 @@
----
-title:  Handling Forced Cache Disconnection Using Autoreconnect
----
-
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-A Geode member may be forcibly disconnected from a Geode distributed system if the member is unresponsive for a period of time, or if a network partition separates one or more members into a group that is too small to act as the distributed system.
-
-## How the Autoreconnection Process Works
-
-After being disconnected from a distributed system,
-a Geode member shuts down and, by default, automatically restarts into 
-a "reconnecting" state,
-while periodically attempting to rejoin the distributed system 
-by contacting a list of known locators.
-If the member succeeds in reconnecting to a known locator, the member rebuilds its view of the distributed system from existing members and receives a new distributed member ID.
-
-If the member cannot connect to a known locator, the member will then check to see if it itself is a locator (or hosting an embedded locator process). If the member is a locator, then the member does a quorum-based reconnect; it will attempt to contact a quorum of the members that were in the membership view just before it became disconnected. If a quorum of members can be contacted, then startup of the distributed system is allowed to begin. Since the reconnecting member does not know which members survived the network partition event, all members that are in a reconnecting state will keep their UDP unicast ports open and respond to ping requests.
-
-Membership quorum is determined using the same member weighting system used in network partition detection. See [Membership Coordinators, Lead Members and Member Weighting](../network_partitioning/membership_coordinators_lead_members_and_weighting.html#concept_23C2606D59754106AFBFE17515DF4330).
-
-Note that when a locator is in the reconnecting state,
-it provides no discovery services for the distributed system.
-
-The default settings for reconfiguration of the cache once
-reconnected assume that the cluster configuration service has
-a valid (XML) configuration.
-This will not be the case if the cluster was configured using
-API calls.
-To handle this case,
-either disable autoreconnect by setting the property to
-
-```
-disable-auto-reconnect = true
-```
-
-or, disable the cluster configuration service by setting the property to
-
-```
-enable-cluster-configuration = false
-```
-
-After the cache has reconnected, applications must fetch a reference to the new Cache, Regions, DistributedSystem and other artifacts. Old references will continue to throw cancellation exceptions like `CacheClosedException(cause=ForcedDisconnectException)`.
-
-See the Geode `DistributedSystem` and `Cache` Java API documentation for more information.
-
-## Managing the Autoreconnection Process
-
-By default a Geode member will try to reconnect until it is told to stop by using the `DistributedSystem.stopReconnecting()` or `Cache.stopReconnecting()` method. You can disable automatic reconnection entirely by setting `disable-auto-reconnect` Geode property to "true."
-
-You can use `DistributedSystem` and `Cache` callback methods to perform actions during the reconnect process, or to cancel the reconnect process if necessary.
-
-The `DistributedSystem` and `Cache` API provide several methods you can use to take actions while a member is reconnecting to the distributed system:
-
--   `DistributedSystem.isReconnecting()` returns true if the member is in the process of reconnecting and recreating the cache after having been removed from the system by other members.
--   `DistributedSystem.waitUntilReconnected(long, TimeUnit)` waits for a period of time, and then returns a boolean value to indicate whether the member has reconnected to the DistributedSystem. Use a value of -1 seconds to wait indefinitely until the reconnect completes or the member shuts down. Use a value of 0 seconds as a quick probe to determine if the member has reconnected.
--   `DistributedSystem.getReconnectedSystem()` returns the reconnected DistributedSystem.
--   `DistributedSystem.stopReconnecting()` stops the reconnection process and ensures that the DistributedSystem stays in a disconnected state.
--   `Cache.isReconnecting()` returns true if the cache is attempting to reconnect to a distributed system.
--   `Cache.waitForReconnect(long, TimeUnit)` waits for a period of time, and then returns a boolean value to indicate whether the DistributedSystem has reconnected. Use a value of -1 seconds to wait indefinitely until the reconnect completes or the cache shuts down. Use a value of 0 seconds as a quick probe to determine if the member has reconnected.
--   `Cache.getReconnectedCache()` returns the reconnected Cache.
--   `Cache.stopReconnecting()` stops the reconnection process and ensures that the DistributedSystem stays in a disconnected state.
-
-## Operator Intervention
-
-You may need to intervene in the autoreconnection process if processes or hardware have crashed or are otherwise shut down before the network connection is healed. In this case the members in a "reconnecting" state will not be able to find the lost processes through UDP probes and will not rejoin the system until they are able to contact a locator.
-
-

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/book_intro.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/book_intro.html.md.erb b/geode-docs/managing/book_intro.html.md.erb
index d7929f2..de2dc67 100644
--- a/geode-docs/managing/book_intro.html.md.erb
+++ b/geode-docs/managing/book_intro.html.md.erb
@@ -1,6 +1,4 @@
----
-title:  Managing Apache Geode
----
+<% set_title("Managing", product_name_long)%>
 
 <!--
 Licensed to the Apache Software Foundation (ASF) under one or more
@@ -19,50 +17,50 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-*Managing Apache Geode* describes how to plan and implement tasks associated with managing, monitoring, and troubleshooting Apache Geode.
+*Managing <%=vars.product_name_long%>* describes how to plan and implement tasks associated with managing, monitoring, and troubleshooting <%=vars.product_name_long%>.
 
--   **[Apache Geode Management and Monitoring](../managing/management/management_and_monitoring.html)**
+-   **[<%=vars.product_name_long%> Management and Monitoring](management/management_and_monitoring.html)**
 
-    Apache Geode provides APIs and tools for managing your distributed system and monitoring the health of your distributed system members.
+    <%=vars.product_name_long%> provides APIs and tools for managing your distributed system and monitoring the health of your distributed system members.
 
--   **[Managing Heap and Off-heap Memory](../managing/heap_use/heap_management.html)**
+-   **[Managing Heap and Off-heap Memory](heap_use/heap_management.html)**
 
-    By default, Apache Geode uses the JVM heap. Apache Geode also offers an option to store data off heap. This section describes how to manage heap and off-heap memory to best support your application.
+    By default, <%=vars.product_name_long%> uses the JVM heap. <%=vars.product_name_long%> also offers an option to store data off heap. This section describes how to manage heap and off-heap memory to best support your application.
 
--   **[Disk Storage](../managing/disk_storage/chapter_overview.html)**
+-   **[Disk Storage](disk_storage/chapter_overview.html)**
 
-    With Apache Geode disk stores, you can persist data to disk as a backup to your in-memory copy and overflow data to disk when memory use gets too high.
+    With <%=vars.product_name_long%> disk stores, you can persist data to disk as a backup to your in-memory copy and overflow data to disk when memory use gets too high.
 
--   **[Cache and Region Snapshots](../managing/cache_snapshots/chapter_overview.html)**
+-   **[Cache and Region Snapshots](cache_snapshots/chapter_overview.html)**
 
     Snapshots allow you to save region data and reload it later. A typical use case is loading data from one environment into another, such as capturing data from a production system and moving it into a smaller QA or development system.
 
--   **[Region Compression](../managing/region_compression/region_compression.html)**
+-   **[Region Compression](region_compression.html)**
 
     This section describes region compression, its benefits and usage.
 
--   **[Network Partitioning](../managing/network_partitioning/chapter_overview.html)**
+-   **[Network Partitioning](network_partitioning/chapter_overview.html)**
 
-    Apache Geode architecture and management features help detect and resolve network partition problems.
+    <%=vars.product_name_long%> architecture and management features help detect and resolve network partition problems.
 
--   **[Security](../managing/security/chapter_overview.html)**
+-   **[Security](security/chapter_overview.html)**
 
     The security framework establishes trust by authenticating components 
     and members upon connection. It facilitates the authorization of operations.
 
--   **[Performance Tuning and Configuration](../managing/monitor_tune/chapter_overview.html)**
+-   **[Performance Tuning and Configuration](monitor_tune/chapter_overview.html)**
 
-    A collection of tools and controls allow you to monitor and adjust Apache Geode performance.
+    A collection of tools and controls allow you to monitor and adjust <%=vars.product_name_long%> performance.
 
--   **[Logging](../managing/logging/logging.html)**
+-   **[Logging](logging/logging.html)**
 
     Comprehensive logging messages help you confirm system configuration and debug problems in configuration and code.
 
--   **[Statistics](../managing/statistics/chapter_overview.html)**
+-   **[Statistics](statistics/chapter_overview.html)**
 
-    Every application and server in a distributed system can access statistical data about Apache Geode operations. You can configure the gathering of statistics by using the `alter runtime` command of `gfsh` or in the `gemfire.properties` file to facilitate system analysis and troubleshooting.
+    Every application and server in a distributed system can access statistical data about <%=vars.product_name_long%> operations. You can configure the gathering of statistics by using the `alter runtime` command of `gfsh` or in the `gemfire.properties` file to facilitate system analysis and troubleshooting.
 
--   **[Troubleshooting and System Recovery](../managing/troubleshooting/chapter_overview.html)**
+-   **[Troubleshooting and System Recovery](troubleshooting/chapter_overview.html)**
 
     This section provides strategies for handling common errors and failure situations.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/cache_snapshots/chapter_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/cache_snapshots/chapter_overview.html.md.erb b/geode-docs/managing/cache_snapshots/chapter_overview.html.md.erb
index 1439348..1fae438 100644
--- a/geode-docs/managing/cache_snapshots/chapter_overview.html.md.erb
+++ b/geode-docs/managing/cache_snapshots/chapter_overview.html.md.erb
@@ -28,23 +28,23 @@ The snapshot file is a binary file that contains all data from a particular regi
 **Note:**
 The previous `Region.loadSnapshot` and `Region.saveSnapshot` APIs have been deprecated. Data written in this format is not compatible with the new APIs.
 
--   **[Usage and Performance Notes](../../managing/cache_snapshots/using_cache_and_region_snapshots.html)**
+-   **[Usage and Performance Notes](using_cache_and_region_snapshots.html)**
 
     Optimize the cache and region snapshot feature by understanding how it performs.
 
--   **[Exporting Cache and Region Snapshots](../../managing/cache_snapshots/exporting_a_snapshot.html)**
+-   **[Exporting Cache and Region Snapshots](exporting_a_snapshot.html)**
 
-    To save Geode cache or region data to a snapshot that you can later load into another distributed system or region, use the `cache.getSnapshotService.save` API, `region.getSnapshotService.save` API, or the `gfsh` command-line interface (`export data`).
+    To save <%=vars.product_name%> cache or region data to a snapshot that you can later load into another distributed system or region, use the `cache.getSnapshotService.save` API, `region.getSnapshotService.save` API, or the `gfsh` command-line interface (`export data`).
 
--   **[Importing Cache and Region Snapshots](../../managing/cache_snapshots/importing_a_snapshot.html)**
+-   **[Importing Cache and Region Snapshots](importing_a_snapshot.html)**
 
-    To import a Geode cache or region data snapshot that you previously exported into another distributed system or region, use the `cache.getSnapshotService.load` API, `region.getSnapshotService.load` API, or the `gfsh` command-line interface (`import data`).
+    To import a <%=vars.product_name%> cache or region data snapshot that you previously exported into another distributed system or region, use the `cache.getSnapshotService.load` API, `region.getSnapshotService.load` API, or the `gfsh` command-line interface (`import data`).
 
--   **[Filtering Entries During Import or Export](../../managing/cache_snapshots/filtering_snapshot_entries.html)**
+-   **[Filtering Entries During Import or Export](filtering_snapshot_entries.html)**
 
     You can customize your snapshot by filtering entries during the import or export of a region or a cache.
 
--   **[Reading Snapshots Programmatically](../../managing/cache_snapshots/read_snapshots_programmatically.html)**
+-   **[Reading Snapshots Programmatically](read_snapshots_programmatically.html)**
 
     You can read a snapshot entry-by-entry for further processing or transformation into other formats.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/cache_snapshots/exporting_a_snapshot.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/cache_snapshots/exporting_a_snapshot.html.md.erb b/geode-docs/managing/cache_snapshots/exporting_a_snapshot.html.md.erb
index eaddd41..0963c6a 100644
--- a/geode-docs/managing/cache_snapshots/exporting_a_snapshot.html.md.erb
+++ b/geode-docs/managing/cache_snapshots/exporting_a_snapshot.html.md.erb
@@ -19,7 +19,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-To save Geode cache or region data to a snapshot that you can later load into another distributed system or region, use the `cache.getSnapshotService.save` API, `region.getSnapshotService.save` API, or the `gfsh` command-line interface (`export data`).
+To save <%=vars.product_name%> cache or region data to a snapshot that you can later load into another distributed system or region, use the `cache.getSnapshotService.save` API, `region.getSnapshotService.save` API, or the `gfsh` command-line interface (`export data`).
 
 If an error occurs during export, the export halts and the snapshot operation is canceled. Typical errors that halt an export include scenarios such as full disk, problems with file permissions, and network partitioning.
 
@@ -57,7 +57,7 @@ region.getSnapshotService().save(mySnapshot, SnapshotFormat.GEMFIRE);
 
 **gfsh:**
 
-Open a gfsh prompt. After connecting to a Geode distributed system, at the prompt type:
+Open a gfsh prompt. After connecting to a <%=vars.product_name%> distributed system, at the prompt type:
 
 ``` pre
 gfsh>export data --region=Region --file=filename.gfd 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/cache_snapshots/importing_a_snapshot.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/cache_snapshots/importing_a_snapshot.html.md.erb b/geode-docs/managing/cache_snapshots/importing_a_snapshot.html.md.erb
index f8296a8..b3670bf 100644
--- a/geode-docs/managing/cache_snapshots/importing_a_snapshot.html.md.erb
+++ b/geode-docs/managing/cache_snapshots/importing_a_snapshot.html.md.erb
@@ -19,7 +19,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-To import a Geode cache or region data snapshot that you previously exported into another distributed system or region, use the `cache.getSnapshotService.load` API, `region.getSnapshotService.load` API, or the `gfsh` command-line interface (`import data`).
+To import a <%=vars.product_name%> cache or region data snapshot that you previously exported into another distributed system or region, use the `cache.getSnapshotService.load` API, `region.getSnapshotService.load` API, or the `gfsh` command-line interface (`import data`).
 
 ## <a id="concept_62B1E24DA7F342E9BB16C0818F7A7B70__section_4B2C73CA1A734D9D96693A52BF99D75A" class="no-quick-link"></a>Import Requirements
 
@@ -62,7 +62,7 @@ region.getSnapshotService().load(mySnapshot, SnapshotFormat.GEMFIRE);
 
 **gfsh:**
 
-Open a gfsh prompt. After connecting to a Geode distributed system, at the prompt type:
+Open a gfsh prompt. After connecting to a <%=vars.product_name%> distributed system, at the prompt type:
 
 ``` pre
 gfsh>import data --region=Region --file=filename.gfd 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/disk_storage/chapter_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/disk_storage/chapter_overview.html.md.erb b/geode-docs/managing/disk_storage/chapter_overview.html.md.erb
index 68e089f..49fd6b8 100644
--- a/geode-docs/managing/disk_storage/chapter_overview.html.md.erb
+++ b/geode-docs/managing/disk_storage/chapter_overview.html.md.erb
@@ -19,37 +19,37 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-With Apache Geode disk stores, you can persist data to disk as a backup to your in-memory copy and overflow data to disk when memory use gets too high.
+With <%=vars.product_name_long%> disk stores, you can persist data to disk as a backup to your in-memory copy and overflow data to disk when memory use gets too high.
 
--   **[How Disk Stores Work](../../managing/disk_storage/how_disk_stores_work.html)**
+-   **[How Disk Stores Work](how_disk_stores_work.html)**
 
     Overflow and persistence use disk stores individually or together to store data.
 
--   **[Disk Store File Names and Extensions](../../managing/disk_storage/file_names_and_extensions.html)**
+-   **[Disk Store File Names and Extensions](file_names_and_extensions.html)**
 
     Disk store files include store management files, access control files, and the operation log, or oplog, files, consisting of one file for deletions and another for all other operations.
 
--   **[Disk Store Operation Logs](../../managing/disk_storage/operation_logs.html)**
+-   **[Disk Store Operation Logs](operation_logs.html)**
 
-    At creation, each operation log is initialized at the disk store's `max-oplog-size`, with the size divided between the `crf` and `drf` files. When the oplog is closed, Apache Geode shrinks the files to the space used in each file.
+    At creation, each operation log is initialized at the disk store's `max-oplog-size`, with the size divided between the `crf` and `drf` files. When the oplog is closed, <%=vars.product_name_long%> shrinks the files to the space used in each file.
 
--   **[Configuring Disk Stores](../../managing/disk_storage/overview_using_disk_stores.html)**
+-   **[Configuring Disk Stores](overview_using_disk_stores.html)**
 
-    In addition to the disk stores you specify, Apache Geode has a default disk store that it uses when disk use is configured with no disk store name specified. You can modify default disk store behavior.
+    In addition to the disk stores you specify, <%=vars.product_name_long%> has a default disk store that it uses when disk use is configured with no disk store name specified. You can modify default disk store behavior.
 
--   **[Optimizing a System with Disk Stores](../../managing/disk_storage/optimize_availability_and_performance.html)**
+-   **[Optimizing a System with Disk Stores](optimize_availability_and_performance.html)**
 
     Optimize availability and performance by following the guidelines in this section.
 
--   **[Start Up and Shut Down with Disk Stores](../../managing/disk_storage/starting_system_with_disk_stores.html)**
+-   **[Start Up and Shut Down with Disk Stores](starting_system_with_disk_stores.html)**
 
     This section describes what happens during startup and shutdown and provides procedures for those operations.
 
--   **[Disk Store Management](../../managing/disk_storage/managing_disk_stores.html)**
+-   **[Disk Store Management](managing_disk_stores.html)**
 
     The `gfsh` command-line tool has a number of options for examining and managing your disk stores. The `gfsh` tool, the `cache.xml` file and the DiskStore APIs are your management tools for online and offline disk stores.
 
--   **[Creating Backups for System Recovery and Operational Management](../../managing/disk_storage/backup_restore_disk_store.html)**
+-   **[Creating Backups for System Recovery and Operational Management](backup_restore_disk_store.html)**
 
     A backup is a copy of persisted data from a disk store. A backup is used to restore the disk store to the state it was in when the backup was made. The appropriate back up and restore procedures differ based upon whether the distributed system is online or offline. An online system has currently running members. An offline system does not have any running members.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/disk_storage/compacting_disk_stores.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/disk_storage/compacting_disk_stores.html.md.erb b/geode-docs/managing/disk_storage/compacting_disk_stores.html.md.erb
index 0a88811..17910b5 100644
--- a/geode-docs/managing/disk_storage/compacting_disk_stores.html.md.erb
+++ b/geode-docs/managing/disk_storage/compacting_disk_stores.html.md.erb
@@ -20,9 +20,9 @@ limitations under the License.
 -->
 
 <a id="compacting_disk_stores__section_64BA304595364E38A28098EB09494531"></a>
-When a cache operation is added to a disk store, any preexisting operation record for the same entry becomes obsolete, and Apache Geode marks it as garbage. For example, when you create an entry, the create operation is added to the store. If you update the entry later, the update operation is added and the create operation becomes garbage. Geode does not remove garbage records as it goes, but it tracks the percentage of garbage in each operation log, and provides mechanisms for removing garbage to compact your log files.
+When a cache operation is added to a disk store, any preexisting operation record for the same entry becomes obsolete, and <%=vars.product_name_long%> marks it as garbage. For example, when you create an entry, the create operation is added to the store. If you update the entry later, the update operation is added and the create operation becomes garbage. <%=vars.product_name%> does not remove garbage records as it goes, but it tracks the percentage of garbage in each operation log, and provides mechanisms for removing garbage to compact your log files.
 
-Geode compacts an old operation log by copying all non-garbage records into the current log and discarding the old files. As with logging, oplogs are rolled as needed during compaction to stay within the max oplog setting.
+<%=vars.product_name%> compacts an old operation log by copying all non-garbage records into the current log and discarding the old files. As with logging, oplogs are rolled as needed during compaction to stay within the max oplog setting.
 
 You can configure the system to automatically compact any closed operation log when its garbage content reaches a certain percentage. You can also manually request compaction for online and offline disk stores. For the online disk store, the current operation log is not available for compaction, no matter how much garbage it contains.
 
@@ -36,10 +36,10 @@ Offline compaction runs essentially in the same way, but without the incoming ca
 
 Old log files become eligible for online compaction when their garbage content surpasses a configured percentage of the total file. A record is garbage when its operation is superseded by a more recent operation for the same object. During compaction, the non-garbage records are added to the current log along with new cache operations. Online compaction does not block current system operations.
 
--   **Automatic compaction**. When `auto-compact` is true, Geode automatically compacts each oplog when its garbage content surpasses the `compaction-threshold`. This takes cycles from your other operations, so you may want to disable this and only do manual compaction, to control the timing.
+-   **Automatic compaction**. When `auto-compact` is true, <%=vars.product_name%> automatically compacts each oplog when its garbage content surpasses the `compaction-threshold`. This takes cycles from your other operations, so you may want to disable this and only do manual compaction, to control the timing.
 -   **Manual compaction**. To run manual compaction:
-    -   Set the disk store attribute `allow-force-compaction` to true. This causes Geode to maintain extra data about the files so it can compact on demand. This is disabled by default to save space. You can run manual online compaction at any time while the system is running. Oplogs eligible for compaction based on the `compaction-threshold` are compacted into the current oplog.
-    -   Run manual compaction as needed. Geode has two types of manual compaction:
+    -   Set the disk store attribute `allow-force-compaction` to true. This causes <%=vars.product_name%> to maintain extra data about the files so it can compact on demand. This is disabled by default to save space. You can run manual online compaction at any time while the system is running. Oplogs eligible for compaction based on the `compaction-threshold` are compacted into the current oplog.
+    -   Run manual compaction as needed. <%=vars.product_name%> has two types of manual compaction:
         -   Compact the logs for a single online disk store through the API, with the `forceCompaction` method. This method first rolls the oplogs and then compacts them. Example:
 
             ``` pre
@@ -73,7 +73,7 @@ gfsh>compact offline-disk-store --name=Disk2 --disk-dirs=/Disks/Disk2
 **Note:**
 Do not perform offline compaction on the baseline directory of an incremental backup.
 
-You must provide all of the directories in the disk store. If no oplog max size is specified, Geode uses the system default.
+You must provide all of the directories in the disk store. If no oplog max size is specified, <%=vars.product_name%> uses the system default.
 
 Offline compaction can take a lot of memory. If you get a `java.lang.OutOfMemory` error while running this, you may need to increase your heap size with the `-J=-Xmx` parameter.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/disk_storage/disk_store_configuration_params.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/disk_storage/disk_store_configuration_params.html.md.erb b/geode-docs/managing/disk_storage/disk_store_configuration_params.html.md.erb
index 939028e..25f2083 100644
--- a/geode-docs/managing/disk_storage/disk_store_configuration_params.html.md.erb
+++ b/geode-docs/managing/disk_storage/disk_store_configuration_params.html.md.erb
@@ -118,6 +118,6 @@ Example:
 ```
 
 **Note:**
-The directories must exist when the disk store is created or the system throws an exception. Geode does not create directories.
+The directories must exist when the disk store is created or the system throws an exception. <%=vars.product_name%> does not create directories.
 
 Use different disk-dir specifications for different disk stores. You cannot use the same directory for the same named disk store in two different members.

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/disk_storage/how_disk_stores_work.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/disk_storage/how_disk_stores_work.html.md.erb b/geode-docs/managing/disk_storage/how_disk_stores_work.html.md.erb
index ee75b98..5d8a89e 100644
--- a/geode-docs/managing/disk_storage/how_disk_stores_work.html.md.erb
+++ b/geode-docs/managing/disk_storage/how_disk_stores_work.html.md.erb
@@ -27,7 +27,7 @@ Disk storage is available for these items:
 -   **Regions**. Persist and/or overflow data from regions.
 -   **Server’s client subscription queues**. Overflow the messaging queues to control memory use.
 -   **Gateway sender queues**. Persist these for high availability. These queues always overflow.
--   **PDX serialization metadata**. Persist metadata about objects you serialize using Geode PDX serialization.
+-   **PDX serialization metadata**. Persist metadata about objects you serialize using <%=vars.product_name%> PDX serialization.
 
 Each member has its own set of disk stores, and they are completely separate from the disk stores of any other member. For each disk store, define where and how the data is stored to disk. You can store data from multiple regions and queues in a single disk store.
 
@@ -35,9 +35,9 @@ This figure shows a member with disk stores D through R defined. The member has
 
 <img src="../../images/diskStores-1.gif" id="how_disk_stores_work__image_CB7972998C4A40B2A02550B97A723536" class="image" />
 
-## <a id="how_disk_stores_work__section_433EEEA1560D40DD9842200181EB1D0A" class="no-quick-link"></a>What Geode Writes to the Disk Store
+## <a id="how_disk_stores_work__section_433EEEA1560D40DD9842200181EB1D0A" class="no-quick-link"></a>What <%=vars.product_name%> Writes to the Disk Store
 
-This list describes the items that Geode comprise the disk store:
+This list describes the items that <%=vars.product_name%> comprise the disk store:
 
 -   The members that host the store, and information on their status, such as which members are online and which members are offline and time stamps.
 -   A disk store identifier.
@@ -45,11 +45,11 @@ This list describes the items that Geode comprise the disk store:
 -   Colocated regions that the regions in the disk store are dependent upon.
 -   A set of files that specify all keys for the regions, as well as all operations on the regions. Given both keys and operations, a region can be recreated when a member is restarted.
 
-Geode does not write indexes to disk.
+<%=vars.product_name%> does not write indexes to disk.
 
 ## <a id="how_disk_stores_work__section_C1A047CD5518499D94A0E9A0328F6DB8" class="no-quick-link"></a>Disk Store State
 
-The files for a disk store are used by Geode as a group. Treat them as a single entity. If you copy them, copy them all together. Do not change the file names.
+The files for a disk store are used by <%=vars.product_name%> as a group. Treat them as a single entity. If you copy them, copy them all together. Do not change the file names.
 
 Disk store access and management differs according to whether the member is online or offline.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/disk_storage/keeping_offline_disk_store_in_sync.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/disk_storage/keeping_offline_disk_store_in_sync.html.md.erb b/geode-docs/managing/disk_storage/keeping_offline_disk_store_in_sync.html.md.erb
index 1d32d8c..cd1bce4 100644
--- a/geode-docs/managing/disk_storage/keeping_offline_disk_store_in_sync.html.md.erb
+++ b/geode-docs/managing/disk_storage/keeping_offline_disk_store_in_sync.html.md.erb
@@ -79,4 +79,4 @@ You can remove the region from the disk store in one of two ways:
     --disk-dirs=/firstDiskStoreDir,/secondDiskStoreDir,/thirdDiskStoreDir --remove
     ```
 
-To guard against unintended data loss, Geode maintains the region in the disk store until you manually remove it. Regions in the disk stores that are not associated with any region in your application are still loaded into temporary regions in memory and kept there for the life of the member. The system has no way of detecting whether the cache region will be created by your API at some point, so it keeps the temporary region loaded and available.
+To guard against unintended data loss, <%=vars.product_name%> maintains the region in the disk store until you manually remove it. Regions in the disk stores that are not associated with any region in your application are still loaded into temporary regions in memory and kept there for the life of the member. The system has no way of detecting whether the cache region will be created by your API at some point, so it keeps the temporary region loaded and available.

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/disk_storage/managing_disk_buffer_flushes.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/disk_storage/managing_disk_buffer_flushes.html.md.erb b/geode-docs/managing/disk_storage/managing_disk_buffer_flushes.html.md.erb
index 7238843..87076f7 100644
--- a/geode-docs/managing/disk_storage/managing_disk_buffer_flushes.html.md.erb
+++ b/geode-docs/managing/disk_storage/managing_disk_buffer_flushes.html.md.erb
@@ -19,19 +19,19 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-You can configure Geode to write immediately to disk and you may be able to modify your operating system behavior to perform buffer flushes more frequently.
+You can configure <%=vars.product_name%> to write immediately to disk and you may be able to modify your operating system behavior to perform buffer flushes more frequently.
 
-Typically, Geode writes disk data into the operating system's disk buffers and the operating system periodically flushes the buffers to disk. Increasing the frequency of writes to disk decreases the likelihood of data loss from application or machine crashes, but it impacts performance. Your other option, which may give you better performance, is to use Geode's in-memory data backups. Do this by storing your data in multiple replicated regions or in partitioned regions that are configured with redundant copies. See [Region Types](../../developing/region_options/region_types.html#region_types).
+Typically, <%=vars.product_name%> writes disk data into the operating system's disk buffers and the operating system periodically flushes the buffers to disk. Increasing the frequency of writes to disk decreases the likelihood of data loss from application or machine crashes, but it impacts performance. Your other option, which may give you better performance, is to use <%=vars.product_name%>'s in-memory data backups. Do this by storing your data in multiple replicated regions or in partitioned regions that are configured with redundant copies. See [Region Types](../../developing/region_options/region_types.html#region_types).
 
 ## <a id="disk_buffer_flushes__section_448348BD28B14F478D81CC2EDC6C7049" class="no-quick-link"></a>Modifying Disk Flushes for the Operating System
 
 You may be able to change the operating system settings for periodic flushes. You may also be able to perform explicit disk flushes from your application code. For information on these options, see your operating system's documentation. For example, in Linux you can change the disk flush interval by modifying the setting `/proc/sys/vm/dirty_expire_centiseconds`. It defaults to 30 seconds. To alter this setting, see the Linux documentation for `dirty_expire_centiseconds`.
 
-## <a id="disk_buffer_flushes__section_D1068505581A43EE8395DBE97297C60F" class="no-quick-link"></a>Modifying Geode to Flush Buffers on Disk Writes
+## <a id="disk_buffer_flushes__section_D1068505581A43EE8395DBE97297C60F" class="no-quick-link"></a>Modifying <%=vars.product_name%> to Flush Buffers on Disk Writes
 
-You can have Geode flush the disk buffers on every disk write. Do this by setting the system property `gemfire.syncWrites` to true at the command line when you start your Geode member. You can only modify this setting when you start a member. When this is set, Geode uses a Java `RandomAccessFile` with the flags "rwd", which causes every file update to be written synchronously to the storage device. This only guarantees your data if your disk stores are on a local device. See the Java documentation for `java.IO.RandomAccessFile`.
+You can have <%=vars.product_name%> flush the disk buffers on every disk write. Do this by setting the system property `gemfire.syncWrites` to true at the command line when you start your <%=vars.product_name%> member. You can only modify this setting when you start a member. When this is set, <%=vars.product_name%> uses a Java `RandomAccessFile` with the flags "rwd", which causes every file update to be written synchronously to the storage device. This only guarantees your data if your disk stores are on a local device. See the Java documentation for `java.IO.RandomAccessFile`.
 
-To modify the setting for a Geode application, add this to the java command line when you start the member:
+To modify the setting for a <%=vars.product_name%> application, add this to the java command line when you start the member:
 
 ``` pre
 -Dgemfire.syncWrites=true

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/disk_storage/managing_disk_stores.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/disk_storage/managing_disk_stores.html.md.erb b/geode-docs/managing/disk_storage/managing_disk_stores.html.md.erb
index 5262be1..138ca25 100644
--- a/geode-docs/managing/disk_storage/managing_disk_stores.html.md.erb
+++ b/geode-docs/managing/disk_storage/managing_disk_stores.html.md.erb
@@ -23,20 +23,20 @@ The `gfsh` command-line tool has a number of options for examining and managing
 
 See [Disk Store Commands](../../tools_modules/gfsh/quick_ref_commands_by_area.html#topic_1ACC91B493EE446E89EC7DBFBBAE00EA) for a list of available commands.
 
--   **[Disk Store Management Commands and Operations](../../managing/disk_storage/managing_disk_stores_cmds.html)**
+-   **[Disk Store Management Commands and Operations](managing_disk_stores_cmds.html)**
 
--   **[Validating a Disk Store](../../managing/disk_storage/validating_disk_store.html)**
+-   **[Validating a Disk Store](validating_disk_store.html)**
 
--   **[Running Compaction on Disk Store Log Files](../../managing/disk_storage/compacting_disk_stores.html)**
+-   **[Running Compaction on Disk Store Log Files](compacting_disk_stores.html)**
 
--   **[Keeping a Disk Store Synchronized with the Cache](../../managing/disk_storage/keeping_offline_disk_store_in_sync.html)**
+-   **[Keeping a Disk Store Synchronized with the Cache](keeping_offline_disk_store_in_sync.html)**
 
--   **[Configuring Disk Free Space Monitoring](../../managing/disk_storage/disk_free_space_monitoring.html)**
+-   **[Configuring Disk Free Space Monitoring](disk_free_space_monitoring.html)**
 
--   **[Handling Missing Disk Stores](../../managing/disk_storage/handling_missing_disk_stores.html)**
+-   **[Handling Missing Disk Stores](handling_missing_disk_stores.html)**
 
--   **[Altering When Buffers Are Flushed to Disk](../../managing/disk_storage/managing_disk_buffer_flushes.html)**
+-   **[Altering When Buffers Are Flushed to Disk](managing_disk_buffer_flushes.html)**
 
-    You can configure Geode to write immediately to disk and you may be able to modify your operating system behavior to perform buffer flushes more frequently.
+    You can configure <%=vars.product_name%> to write immediately to disk and you may be able to modify your operating system behavior to perform buffer flushes more frequently.
 
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/disk_storage/operation_logs.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/disk_storage/operation_logs.html.md.erb b/geode-docs/managing/disk_storage/operation_logs.html.md.erb
index b8d4211..db0fd09 100644
--- a/geode-docs/managing/disk_storage/operation_logs.html.md.erb
+++ b/geode-docs/managing/disk_storage/operation_logs.html.md.erb
@@ -19,15 +19,15 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-At creation, each operation log is initialized at the disk store's `max-oplog-size`, with the size divided between the `crf` and `drf` files. When the oplog is closed, Apache Geode shrinks the files to the space used in each file.
+At creation, each operation log is initialized at the disk store's `max-oplog-size`, with the size divided between the `crf` and `drf` files. When the oplog is closed, <%=vars.product_name_long%> shrinks the files to the space used in each file.
 
 <a id="operation_logs__section_C0B1391492394A908577C29772902A42"></a>
-After the oplog is closed, Geode also attempts to create a `krf` file, which contains the key names as well as the offset for the value within the `crf` file. Although this file is not required for startup, if it is available, it will improve startup performance by allowing Geode to load the entry values in the background after the entry keys are loaded.
+After the oplog is closed, <%=vars.product_name%> also attempts to create a `krf` file, which contains the key names as well as the offset for the value within the `crf` file. Although this file is not required for startup, if it is available, it will improve startup performance by allowing <%=vars.product_name%> to load the entry values in the background after the entry keys are loaded.
 
-When an operation log is full, Geode automatically closes it and creates a new log with the next sequence number. This is called *oplog rolling*. You can also request an oplog rolling through the API call `DiskStore.forceRoll`. You may want to do this immediately before compacting your disk stores, so the latest oplog is available for compaction.
+When an operation log is full, <%=vars.product_name%> automatically closes it and creates a new log with the next sequence number. This is called *oplog rolling*. You can also request an oplog rolling through the API call `DiskStore.forceRoll`. You may want to do this immediately before compacting your disk stores, so the latest oplog is available for compaction.
 
 **Note:**
-Log compaction can change the names of the disk store files. File number sequencing is usually altered, with some existing logs removed or replaced by newer logs with higher numbering. Geode always starts a new log at a number higher than any existing number.
+Log compaction can change the names of the disk store files. File number sequencing is usually altered, with some existing logs removed or replaced by newer logs with higher numbering. <%=vars.product_name%> always starts a new log at a number higher than any existing number.
 
 This example listing shows the logs in a system with only one disk directory specified for the store. The first log (`BACKUPCacheOverflow_1.crf` and `BACKUPCacheOverflow_1.drf`) has been closed and the system is writing to the second log.
 
@@ -47,9 +47,9 @@ The system rotates through all available disk directories to write its logs. The
 
 ## <a id="operation_logs__section_8431984F4E6644D79292850CCA60E6E3" class="no-quick-link"></a>When Disk Store Oplogs Reach the Configured Disk Capacity
 
-If no directory exists that is within its capacity limits, how Geode handles this depends on whether automatic compaction is enabled.
+If no directory exists that is within its capacity limits, how <%=vars.product_name%> handles this depends on whether automatic compaction is enabled.
 
--   If auto-compaction is enabled, Geode creates a new oplog in one of the directories, going over the limit, and logs a warning that reports:
+-   If auto-compaction is enabled, <%=vars.product_name%> creates a new oplog in one of the directories, going over the limit, and logs a warning that reports:
 
     ``` pre
     Even though the configured directory size limit has been exceeded a 
@@ -58,9 +58,9 @@ If no directory exists that is within its capacity limits, how Geode handles thi
     ```
 
     **Note:**
-    When auto-compaction is enabled, `dir-size` does not limit how much disk space is used. Geode will perform auto-compaction, which should free space, but the system may go over the configured disk limits.
+    When auto-compaction is enabled, `dir-size` does not limit how much disk space is used. <%=vars.product_name%> will perform auto-compaction, which should free space, but the system may go over the configured disk limits.
 
--   If auto-compaction is disabled, Geode does not create a new oplog, operations in the regions attached to the disk store block, and Geode logs this error:
+-   If auto-compaction is disabled, <%=vars.product_name%> does not create a new oplog, operations in the regions attached to the disk store block, and <%=vars.product_name%> logs this error:
 
     ``` pre
     Disk is full and rolling is disabled. No space can be created

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/disk_storage/optimize_availability_and_performance.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/disk_storage/optimize_availability_and_performance.html.md.erb b/geode-docs/managing/disk_storage/optimize_availability_and_performance.html.md.erb
index 5443d93..f8f93d4 100644
--- a/geode-docs/managing/disk_storage/optimize_availability_and_performance.html.md.erb
+++ b/geode-docs/managing/disk_storage/optimize_availability_and_performance.html.md.erb
@@ -21,7 +21,7 @@ limitations under the License.
 
 Optimize availability and performance by following the guidelines in this section.
 
-1.  Apache Geode recommends the use of `ext4` filesystems when operating on Linux or Solaris platforms. The `ext4` filesystem supports preallocation, which benefits disk startup performance. If you are using `ext3` filesystems in latency-sensitive environments with high write throughput, you can improve disk startup performance by setting the `maxOplogSize` (see the `DiskStoreFactory.setMaxOplogSize`) to a value lower than the default 1 GB and by disabling preallocation by specifying the system property `gemfire.preAllocateDisk=false` upon Geode process startup.
+1.  <%=vars.product_name_long%> recommends the use of `ext4` filesystems when operating on Linux or Solaris platforms. The `ext4` filesystem supports preallocation, which benefits disk startup performance. If you are using `ext3` filesystems in latency-sensitive environments with high write throughput, you can improve disk startup performance by setting the `maxOplogSize` (see the `DiskStoreFactory.setMaxOplogSize`) to a value lower than the default 1 GB and by disabling preallocation by specifying the system property `gemfire.preAllocateDisk=false` upon <%=vars.product_name%> process startup.
 2.  When you start your system, start all the members that have persistent regions at roughly the same time. Create and use startup scripts for consistency and completeness.
 3.  Shut down your system using the gfsh `shutdown` command. This is an ordered shutdown that positions your disk stores for a faster startup.
 4.  Configure critical usage thresholds (`disk-usage-warning-percentage` and `disk-usage-critical-percentage`) for the disk. By default, these are set to 80% for warning and 99% for errors that will shut down the cache.

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/disk_storage/overview_using_disk_stores.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/disk_storage/overview_using_disk_stores.html.md.erb b/geode-docs/managing/disk_storage/overview_using_disk_stores.html.md.erb
index 74c1b96..ce5e2cf 100644
--- a/geode-docs/managing/disk_storage/overview_using_disk_stores.html.md.erb
+++ b/geode-docs/managing/disk_storage/overview_using_disk_stores.html.md.erb
@@ -19,17 +19,17 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-In addition to the disk stores you specify, Apache Geode has a default disk store that it uses when disk use is configured with no disk store name specified. You can modify default disk store behavior.
+In addition to the disk stores you specify, <%=vars.product_name_long%> has a default disk store that it uses when disk use is configured with no disk store name specified. You can modify default disk store behavior.
 
--   **[Designing and Configuring Disk Stores](../../managing/disk_storage/using_disk_stores.html)**
+-   **[Designing and Configuring Disk Stores](using_disk_stores.html)**
 
     You define disk stores in your cache, then you assign them to your regions and queues by setting the `disk-store-name` attribute in your region and queue configurations.
 
--   **[Disk Store Configuration Parameters](../../managing/disk_storage/disk_store_configuration_params.html)**
+-   **[Disk Store Configuration Parameters](disk_store_configuration_params.html)**
 
     You define your disk stores by using the `gfsh create disk-store` command or in `<disk-store>` subelements of your cache declaration in `cache.xml`. All disk stores are available for use by all of your regions and queues.
 
--   **[Modifying the Default Disk Store](../../managing/disk_storage/using_the_default_disk_store.html)**
+-   **[Modifying the Default Disk Store](using_the_default_disk_store.html)**
 
     You can modify the behavior of the default disk store by specifying the attributes you want for the disk store named "DEFAULT".
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/disk_storage/starting_system_with_disk_stores.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/disk_storage/starting_system_with_disk_stores.html.md.erb b/geode-docs/managing/disk_storage/starting_system_with_disk_stores.html.md.erb
index d4a8cbc..977185b 100644
--- a/geode-docs/managing/disk_storage/starting_system_with_disk_stores.html.md.erb
+++ b/geode-docs/managing/disk_storage/starting_system_with_disk_stores.html.md.erb
@@ -58,7 +58,7 @@ If the member's disk store has data for a region that is never created, the data
 Each member’s persistent regions load and go online as quickly as possible, not waiting unnecessarily for other members to complete. For performance reasons, these actions occur asynchronously:
 
 -   Once at least one copy of each and every bucket is recovered from disk, the region is available. Secondary buckets will load asynchronously.
--   Entry keys are loaded from the key file in the disk store before considering entry values. Once all keys are loaded, Geode loads the entry values asynchronously. If a value is requested before it has loaded, the value will immediately be fetched from the disk store.
+-   Entry keys are loaded from the key file in the disk store before considering entry values. Once all keys are loaded, <%=vars.product_name%> loads the entry values asynchronously. If a value is requested before it has loaded, the value will immediately be fetched from the disk store.
 
 ## <a id="starting_system_with_disk_stores__section_D0A7403707B847749A22BF9221A2C823" class="no-quick-link"></a>Start Up Procedure
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/disk_storage/using_disk_stores.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/disk_storage/using_disk_stores.html.md.erb b/geode-docs/managing/disk_storage/using_disk_stores.html.md.erb
index 4835533..bcf7b5b 100644
--- a/geode-docs/managing/disk_storage/using_disk_stores.html.md.erb
+++ b/geode-docs/managing/disk_storage/using_disk_stores.html.md.erb
@@ -22,7 +22,7 @@ limitations under the License.
 You define disk stores in your cache, then you assign them to your regions and queues by setting the `disk-store-name` attribute in your region and queue configurations.
 
 **Note:**
-Besides the disk stores you specify, Apache Geode has a default disk store that it uses when disk use is configured with no disk store name specified. By default, this disk store is saved to the application’s working directory. You can change its behavior, as indicated in [Create and Configure Your Disk Stores](using_disk_stores.html#defining_disk_stores__section_37BC5A4D84B34DB49E489DD4141A4884) and [Modifying the Default Disk Store](using_the_default_disk_store.html#using_the_default_disk_store).
+Besides the disk stores you specify, <%=vars.product_name_long%> has a default disk store that it uses when disk use is configured with no disk store name specified. By default, this disk store is saved to the application’s working directory. You can change its behavior, as indicated in [Create and Configure Your Disk Stores](using_disk_stores.html#defining_disk_stores__section_37BC5A4D84B34DB49E489DD4141A4884) and [Modifying the Default Disk Store](using_the_default_disk_store.html#using_the_default_disk_store).
 
 -   [Design Your Disk Stores](using_disk_stores.html#defining_disk_stores__section_0CD724A12EE4418587046AAD9EEC59C5)
 -   [Create and Configure Your Disk Stores](using_disk_stores.html#defining_disk_stores__section_37BC5A4D84B34DB49E489DD4141A4884)
@@ -31,11 +31,11 @@ Besides the disk stores you specify, Apache Geode has a default disk store that
 
 ## <a id="defining_disk_stores__section_0CD724A12EE4418587046AAD9EEC59C5" class="no-quick-link"></a>Design Your Disk Stores
 
-Before you begin, you should understand Geode [Basic Configuration and Programming](../../basic_config/book_intro.html).
+Before you begin, you should understand <%=vars.product_name%> [Basic Configuration and Programming](../../basic_config/book_intro.html).
 
 1.  Work with your system designers and developers to plan for anticipated disk storage requirements in your testing and production caching systems. Take into account space and functional requirements.
     -   For efficiency, separate data that is only overflowed in separate disk stores from data that is persisted or persisted and overflowed. Regions can be overflowed, persisted, or both. Server subscription queues are only overflowed.
-    -   When calculating your disk requirements, figure in your data modification patterns and your compaction strategy. Geode creates each oplog file at the max-oplog-size, which defaults to 1 GB. Obsolete operations are only removed from the oplogs during compaction, so you need enough space to store all operations that are done between compactions. For regions where you are doing a mix of updates and deletes, if you use automatic compaction, a good upper bound for the required disk space is
+    -   When calculating your disk requirements, figure in your data modification patterns and your compaction strategy. <%=vars.product_name%> creates each oplog file at the max-oplog-size, which defaults to 1 GB. Obsolete operations are only removed from the oplogs during compaction, so you need enough space to store all operations that are done between compactions. For regions where you are doing a mix of updates and deletes, if you use automatic compaction, a good upper bound for the required disk space is
 
         ``` pre
         (1 / (1 - (compaction_threshold/100)) ) * data size
@@ -49,7 +49,7 @@ Before you begin, you should understand Geode [Basic Configuration and Programmi
 
 ## <a id="defining_disk_stores__section_37BC5A4D84B34DB49E489DD4141A4884" class="no-quick-link"></a>Create and Configure Your Disk Stores
 
-1.  In the locations you have chosen, create all directories you will specify for your disk stores to use. Geode throws an exception if the specified directories are not available when a disk store is created. You do not need to populate these directories with anything.
+1.  In the locations you have chosen, create all directories you will specify for your disk stores to use. <%=vars.product_name%> throws an exception if the specified directories are not available when a disk store is created. You do not need to populate these directories with anything.
 2.  Open a `gfsh` prompt and connect to the distributed system.
 3.  At the `gfsh` prompt, create and configure a disk store:
     -  Specify the name (`--name`) of the disk-store.

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/disk_storage/using_the_default_disk_store.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/disk_storage/using_the_default_disk_store.html.md.erb b/geode-docs/managing/disk_storage/using_the_default_disk_store.html.md.erb
index 2618290..b2f37de 100644
--- a/geode-docs/managing/disk_storage/using_the_default_disk_store.html.md.erb
+++ b/geode-docs/managing/disk_storage/using_the_default_disk_store.html.md.erb
@@ -22,7 +22,7 @@ limitations under the License.
 You can modify the behavior of the default disk store by specifying the attributes you want for the disk store named "DEFAULT".
 
 <a id="using_the_default_disk_store__section_7D6E1A05D28840AC8606EF0D88E9B373"></a>
-Whenever you use disk stores without specifying the disk store to use, Geode uses the disk store named "DEFAULT".
+Whenever you use disk stores without specifying the disk store to use, <%=vars.product_name%> uses the disk store named "DEFAULT".
 
 For example, these region and queue configurations specify persistence and/or overflow, but do not specify the disk-store-name. Because no disk store is specified, these use the disk store named "DEFAULT".
 
@@ -50,7 +50,7 @@ Example of using the default disk store for server subscription queue overflow (
 
 ## <a id="using_the_default_disk_store__section_671AED6EAFEE485D837411DEBE0C6BC6" class="no-quick-link"></a>Change the Behavior of the Default Disk Store
 
-Geode initializes the default disk store with the default disk store configuration settings. You can modify the behavior of the default disk store by specifying the attributes you want for the disk store named "DEFAULT". The only thing you can’t change about the default disk store is the name.
+<%=vars.product_name%> initializes the default disk store with the default disk store configuration settings. You can modify the behavior of the default disk store by specifying the attributes you want for the disk store named "DEFAULT". The only thing you can’t change about the default disk store is the name.
 
 The following example changes the default disk store to allow manual compaction and to use multiple, non-default directories:
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/heap_use/heap_management.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/heap_use/heap_management.html.md.erb b/geode-docs/managing/heap_use/heap_management.html.md.erb
index 7fab249..f4d934d 100644
--- a/geode-docs/managing/heap_use/heap_management.html.md.erb
+++ b/geode-docs/managing/heap_use/heap_management.html.md.erb
@@ -19,17 +19,17 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-By default, Apache Geode uses the JVM heap. Apache Geode also offers an option to store data off heap. This section describes how to manage heap and off-heap memory to best support your application.
+By default, <%=vars.product_name_long%> uses the JVM heap. <%=vars.product_name_long%> also offers an option to store data off heap. This section describes how to manage heap and off-heap memory to best support your application.
 
 ## <a id="section_590DA955523246ED980E4E351FF81F71" class="no-quick-link"></a>Tuning the JVM's Garbage Collection Parameters
 
-Because Apache Geode is specifically designed to manipulate data held in memory, you can optimize your application's performance by tuning the way Apache Geode uses the JVM heap.
+Because <%=vars.product_name_long%> is specifically designed to manipulate data held in memory, you can optimize your application's performance by tuning the way <%=vars.product_name_long%> uses the JVM heap.
 
 See your JVM documentation for all JVM-specific settings that can be used to improve garbage collection (GC) response. At a minimum, do the following:
 
 1.  Set the initial and maximum heap switches, `-Xms` and `-Xmx`, to the same values. The `gfsh start server` options `--initial-heap` and `--max-heap` accomplish the same purpose, with the added value of providing resource manager defaults such as eviction threshold and critical threshold.
 2.  Configure your JVM for concurrent mark-sweep (CMS) garbage collection.
-3.  If your JVM allows, configure it to initiate CMS collection when heap use is at least 10% lower than your setting for the resource manager `eviction-heap-percentage`. You want the collector to be working when Geode is evicting or the evictions will not result in more free memory. For example, if the `eviction-heap-percentage` is set to 65, set your garbage collection to start when the heap use is no higher than 55%.
+3.  If your JVM allows, configure it to initiate CMS collection when heap use is at least 10% lower than your setting for the resource manager `eviction-heap-percentage`. You want the collector to be working when <%=vars.product_name%> is evicting or the evictions will not result in more free memory. For example, if the `eviction-heap-percentage` is set to 65, set your garbage collection to start when the heap use is no higher than 55%.
 
 | JVM         | CMS switch flag           | CMS initiation (begin at heap % N)     |
 |-------------|---------------------------|----------------------------------------|
@@ -54,12 +54,12 @@ $ gfsh start server --name=app.MyApplication --initial-heap=30m --max-heap=30m \
 --J=-XX:+UseConcMarkSweepGC --J=-XX:CMSInitiatingOccupancyFraction=60
 ```
 
-## <a id="how_the_resource_manager_works" class="no-quick-link"></a>Using the Geode Resource Manager
+## <a id="how_the_resource_manager_works" class="no-quick-link"></a>Using the <%=vars.product_name%> Resource Manager
 
-The Geode resource manager works with your JVM's tenured garbage collector to control heap use and protect your member from hangs and crashes due to memory overload.
+The <%=vars.product_name%> resource manager works with your JVM's tenured garbage collector to control heap use and protect your member from hangs and crashes due to memory overload.
 
 <a id="how_the_resource_manager_works__section_53E80B61991447A2915E8A754383B32D"></a>
-The Geode resource manager prevents the cache from consuming too much memory by evicting old data. If the garbage collector is unable to keep up, the resource manager refuses additions to the cache until the collector has freed an adequate amount of memory.
+The <%=vars.product_name%> resource manager prevents the cache from consuming too much memory by evicting old data. If the garbage collector is unable to keep up, the resource manager refuses additions to the cache until the collector has freed an adequate amount of memory.
 
 The resource manager has two threshold settings, each expressed as a percentage of the total tenured heap. Both are disabled by default.
 
@@ -75,7 +75,7 @@ The resource manager has two threshold settings, each expressed as a percentage
 
 When heap use passes the eviction threshold in either direction, the manager logs an info-level message.
 
-When heap use exceeds the critical threshold, the manager logs an error-level message. Avoid exceeding the critical threshold. Once identified as critical, the Geode member becomes a read-only member that refuses cache updates for all of its regions, including incoming distributed updates.
+When heap use exceeds the critical threshold, the manager logs an error-level message. Avoid exceeding the critical threshold. Once identified as critical, the <%=vars.product_name%> member becomes a read-only member that refuses cache updates for all of its regions, including incoming distributed updates.
 
 For more information, see `org.apache.geode.cache.control.ResourceManager` in the online API documentation.
 
@@ -98,13 +98,13 @@ Resource manager behavior is closely tied to the triggering of Garbage Collectio
 <a id="configuring_resource_manager__section_B47A78E7BA0048C89FBBDB7441C308BE"></a>
 The recommendations provided here for using the manager assume you have a solid understanding of your Java VM's heap management and garbage collection service.
 
-The resource manager is available for use in any Apache Geode member, but you may not want to activate it everywhere. For some members it might be better to occasionally restart after a hang or OME crash than to evict data and/or refuse distributed caching activities. Also, members that do not risk running past their memory limits would not benefit from the overhead the resource manager consumes. Cache servers are often configured to use the manager because they generally host more data and have more data activity than other members, requiring greater responsiveness in data cleanup and collection.
+The resource manager is available for use in any <%=vars.product_name_long%> member, but you may not want to activate it everywhere. For some members it might be better to occasionally restart after a hang or OME crash than to evict data and/or refuse distributed caching activities. Also, members that do not risk running past their memory limits would not benefit from the overhead the resource manager consumes. Cache servers are often configured to use the manager because they generally host more data and have more data activity than other members, requiring greater responsiveness in data cleanup and collection.
 
 For the members where you want to activate the resource manager:
 
-1.  Configure Geode for heap LRU management.
+1.  Configure <%=vars.product_name%> for heap LRU management.
 
-2.  Set the JVM GC tuning parameters to handle heap and garbage collection in conjunction with the Geode manager.
+2.  Set the JVM GC tuning parameters to handle heap and garbage collection in conjunction with the <%=vars.product_name%> manager.
 
 3.  Monitor and tune heap LRU configurations and your GC configurations.
 
@@ -112,7 +112,7 @@ For the members where you want to activate the resource manager:
 
 5.  In production, keep monitoring and tuning to meet changing needs.
 
-## <a id="configuring_resource_manager__section_4949882892DA46F6BB8588FA97037F45" class="no-quick-link"></a>Configure Geode for Heap LRU Management
+## <a id="configuring_resource_manager__section_4949882892DA46F6BB8588FA97037F45" class="no-quick-link"></a>Configure <%=vars.product_name%> for Heap LRU Management
 
 The configuration terms used here are `cache.xml` elements and attributes, but you can also configure through `gfsh` and the `org.apache.geode.cache.control.ResourceManager` and `Region` APIs.
 
@@ -149,15 +149,15 @@ cache.xml example:
 
 ## <a id="set_jvm_gc_tuning_params" class="no-quick-link"></a>Set the JVM GC Tuning Parameters
 
-If your JVM allows, configure it to initiate concurrent mark-sweep (CMS) garbage collection when heap use is at least 10% lower than your setting for the resource manager `eviction-heap-percentage`. You want the collector to be working when Geode is evicting or the evictions will not result in more free memory. For example, if the `eviction-heap-percentage` is set to 65, set your garbage collection to start when the heap use is no higher than 55%.
+If your JVM allows, configure it to initiate concurrent mark-sweep (CMS) garbage collection when heap use is at least 10% lower than your setting for the resource manager `eviction-heap-percentage`. You want the collector to be working when <%=vars.product_name%> is evicting or the evictions will not result in more free memory. For example, if the `eviction-heap-percentage` is set to 65, set your garbage collection to start when the heap use is no higher than 55%.
 
 ## <a id="configuring_resource_manager__section_DE1CC494C2B547B083AA00821250972A" class="no-quick-link"></a>Monitor and Tune Heap LRU Configurations
 
-In tuning the resource manager, your central focus should be keeping the member below the critical threshold. The critical threshold is provided to avoid member hangs and crashes, but because of its exception-throwing behavior for distributed updates, the time spent in critical negatively impacts the entire distributed system. To stay below critical, tune so that the Geode eviction and the JVM's GC respond adequately when the eviction threshold is reached.
+In tuning the resource manager, your central focus should be keeping the member below the critical threshold. The critical threshold is provided to avoid member hangs and crashes, but because of its exception-throwing behavior for distributed updates, the time spent in critical negatively impacts the entire distributed system. To stay below critical, tune so that the <%=vars.product_name%> eviction and the JVM's GC respond adequately when the eviction threshold is reached.
 
 Use the statistics provided by your JVM to make sure your memory and GC settings are sufficient for your needs.
 
-The Geode `ResourceManagerStats` provide information about memory use and the manager thresholds and eviction activities.
+The <%=vars.product_name%> `ResourceManagerStats` provide information about memory use and the manager thresholds and eviction activities.
 
 If your application spikes above the critical threshold on a regular basis, try lowering the eviction threshold. If the application never goes near critical, you might raise the eviction threshold to gain more usable memory without the overhead of unneeded evictions or GC cycles.
 


[22/25] geode git commit: Closing pull request for GEODE-3406. Closes #716

Posted by ud...@apache.org.
Closing pull request for GEODE-3406. Closes #716


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/fa29ec13
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/fa29ec13
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/fa29ec13

Branch: refs/heads/feature/GEODE-3503
Commit: fa29ec13f049e39c13ff4e711364e55708ede94d
Parents: 1b84ecb
Author: Hitesh Khamesra <hk...@pivotal.io>
Authored: Wed Aug 23 11:08:51 2017 -0700
Committer: Hitesh Khamesra <hk...@pivotal.io>
Committed: Wed Aug 23 11:08:51 2017 -0700

----------------------------------------------------------------------

----------------------------------------------------------------------



[02/25] geode git commit: GEODE-3406: Locator accepts Protobuf requests

Posted by ud...@apache.org.
GEODE-3406: Locator accepts Protobuf requests

Also addresses GEODE-3400, GEODE-3399
This allows the locator to respond to Protobuf requests. Currently it
will only be able to respond to getAvailableServers.

To enable this we are introducing a new value of "0" that will be sent
in place of the Gossip version. After it we expect the same magic byte
("110") as in AcceptorImpl.

This also is gated by the `geode.feature-protobuf-protocol` system
property.

The getAvailableServers request handler now uses the locator directly,
since we are on the locator.

Signed-off-by: Brian Rowe <br...@pivotal.io>


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/530f48f3
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/530f48f3
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/530f48f3

Branch: refs/heads/feature/GEODE-3503
Commit: 530f48f35a96c4f8af7e51ed03b1ee2e5e150ebd
Parents: be45511
Author: Alexander Murmann <am...@pivotal.io>
Authored: Mon Aug 14 15:08:14 2017 -0700
Committer: Hitesh Khamesra <hk...@pivotal.io>
Committed: Tue Aug 22 10:52:50 2017 -0700

----------------------------------------------------------------------
 .../distributed/internal/InternalLocator.java   |   4 +-
 .../distributed/internal/ServerLocator.java     |   4 +
 .../internal/tcpserver/TcpServer.java           |  77 +++++++----
 .../geode/internal/cache/InternalCache.java     |   4 +-
 .../ClientProtoclMessageHandlerLoader.java      |  64 +++++++++
 .../sockets/ClientProtocolMessageHandler.java   |   7 +-
 .../ClientProtocolMessageHandlerLoader.java     |  64 +++++++++
 .../cache/tier/sockets/ExecutionContext.java    |  54 ++++++++
 .../GenericProtocolServerConnection.java        |   3 +-
 .../InvalidExecutionContextException.java       |  33 +++++
 .../AutoConnectionSourceImplJUnitTest.java      |   8 +-
 .../tcpserver/TCPServerSSLJUnitTest.java        |   2 +-
 .../internal/tcpserver/TcpServerJUnitTest.java  |   2 +-
 .../protocol/operations/OperationHandler.java   |   6 +-
 .../protocol/protobuf/ProtobufOpsProcessor.java |  17 ++-
 .../protobuf/ProtobufStreamProcessor.java       |  29 ++--
 .../protocol/protobuf/ProtocolErrorCode.java    |   1 +
 .../GetAllRequestOperationHandler.java          |   8 +-
 .../GetAvailableServersOperationHandler.java    |  65 ++-------
 .../GetRegionNamesRequestOperationHandler.java  |   8 +-
 .../GetRegionRequestOperationHandler.java       |   8 +-
 .../operations/GetRequestOperationHandler.java  |   8 +-
 .../PutAllRequestOperationHandler.java          |   8 +-
 .../operations/PutRequestOperationHandler.java  |   8 +-
 .../RemoveRequestOperationHandler.java          |   9 +-
 .../protocol/GetAvailableServersDUnitTest.java  | 108 ---------------
 .../RoundTripLocatorConnectionJUnitTest.java    | 132 +++++++++++++++++++
 .../protobuf/ProtobufStreamProcessorTest.java   |   4 +-
 .../GetAllRequestOperationHandlerJUnitTest.java |  18 +--
 ...ailableServersOperationHandlerJUnitTest.java |  97 ++++----------
 ...onNamesRequestOperationHandlerJUnitTest.java |  26 ++--
 ...tRegionRequestOperationHandlerJUnitTest.java |  16 ++-
 .../GetRequestOperationHandlerJUnitTest.java    |  33 ++---
 .../PutAllRequestOperationHandlerJUnitTest.java |  13 +-
 .../PutRequestOperationHandlerJUnitTest.java    |  39 +++---
 .../RemoveRequestOperationHandlerJUnitTest.java |  27 ++--
 36 files changed, 618 insertions(+), 396 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalLocator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalLocator.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalLocator.java
index 4725518..8d2daf6 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalLocator.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalLocator.java
@@ -315,7 +315,6 @@ public class InternalLocator extends Locator implements ConnectListener {
 
       // TODO:GEODE-1243: this.server is now a TcpServer and it should store or return its non-zero
       // port in a variable to use here
-
       try {
         newLocator.startPeerLocation(startDistributedSystem);
         if (startDistributedSystem) {
@@ -500,7 +499,7 @@ public class InternalLocator extends Locator implements ConnectListener {
     this.stats = new LocatorStats();
 
     this.server = new TcpServer(port, this.bindAddress, null, this.config, this.handler,
-        new DelayedPoolStatHelper(), group, this.toString());
+        new DelayedPoolStatHelper(), group, this.toString(), this);
   }
 
   // Reset the file names with the correct port number if startLocatorAndDS was called with port
@@ -636,7 +635,6 @@ public class InternalLocator extends Locator implements ConnectListener {
    */
   private void startDistributedSystem() throws UnknownHostException {
     InternalDistributedSystem existing = InternalDistributedSystem.getConnectedInstance();
-
     if (existing != null) {
       // LOG: changed from config to info
       logger.info(LocalizedMessage

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-core/src/main/java/org/apache/geode/distributed/internal/ServerLocator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/ServerLocator.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/ServerLocator.java
index fb66b4c..27c557c 100755
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/ServerLocator.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/ServerLocator.java
@@ -103,6 +103,10 @@ public class ServerLocator implements TcpHandler, DistributionAdvisee {
     this.stats = null;
   }
 
+  public LocatorLoadSnapshot getLoadSnapshot() {
+    return loadSnapshot;
+  }
+
   public ServerLocator(int port, InetAddress bindAddress, String hostNameForClients, File logFile,
       ProductUseLog productUseLogWriter, String memberName, InternalDistributedSystem ds,
       LocatorStats stats) throws IOException {

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-core/src/main/java/org/apache/geode/distributed/internal/tcpserver/TcpServer.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/tcpserver/TcpServer.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/tcpserver/TcpServer.java
index 976f504..c3d51c1 100755
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/tcpserver/TcpServer.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/tcpserver/TcpServer.java
@@ -22,6 +22,7 @@ import org.apache.geode.distributed.internal.DistributionConfig;
 import org.apache.geode.distributed.internal.DistributionConfigImpl;
 import org.apache.geode.distributed.internal.DistributionStats;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.InternalLocator;
 import org.apache.geode.distributed.internal.PoolStatHelper;
 import org.apache.geode.distributed.internal.PooledExecutorWithDMStats;
 import org.apache.geode.internal.DSFIDFactory;
@@ -31,6 +32,10 @@ import org.apache.geode.internal.VersionedDataInputStream;
 import org.apache.geode.internal.VersionedDataOutputStream;
 import org.apache.geode.internal.cache.InternalCache;
 import org.apache.geode.internal.cache.tier.Acceptor;
+import org.apache.geode.internal.cache.tier.sockets.AcceptorImpl;
+import org.apache.geode.internal.cache.tier.sockets.ClientProtocolMessageHandlerLoader;
+import org.apache.geode.internal.cache.tier.sockets.ClientProtocolMessageHandler;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
 import org.apache.geode.internal.cache.tier.sockets.HandShake;
 import org.apache.geode.internal.logging.LogService;
 import org.apache.geode.internal.net.SocketCreator;
@@ -49,7 +54,6 @@ import java.net.ServerSocket;
 import java.net.Socket;
 import java.net.SocketAddress;
 import java.net.URL;
-import java.text.SimpleDateFormat;
 import java.util.Date;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -78,6 +82,7 @@ public class TcpServer {
    * <p>
    * This should be incremented if the gossip message structures change
    * <p>
+   * 0 - special indicator of a non-gossip message from a client<br>
    * 1000 - gemfire 5.5 - using java serialization<br>
    * 1001 - 5.7 - using DataSerializable and supporting server locator messages.<br>
    * 1002 - 7.1 - sending GemFire version along with GOSSIP_VERSION in each request.
@@ -86,6 +91,7 @@ public class TcpServer {
    * version number
    */
   public final static int GOSSIPVERSION = 1002;
+  public final static int NON_GOSSIP_REQUEST_VERSION = 0;
   // Don't change it ever. We did NOT send GemFire version in a Gossip request till 1001 version.
   // This GOSSIPVERSION is used in _getVersionForAddress request for getting GemFire version of a
   // GossipServer.
@@ -120,6 +126,7 @@ public class TcpServer {
   private InetAddress bind_address;
   private volatile boolean shuttingDown = false; // GemStoneAddition
   private final PoolStatHelper poolHelper;
+  private InternalLocator internalLocator;
   private final TcpHandler handler;
 
   private PooledExecutorWithDMStats executor;
@@ -143,11 +150,12 @@ public class TcpServer {
 
   public TcpServer(int port, InetAddress bind_address, Properties sslConfig,
       DistributionConfigImpl cfg, TcpHandler handler, PoolStatHelper poolHelper,
-      ThreadGroup threadGroup, String threadName) {
+      ThreadGroup threadGroup, String threadName, InternalLocator internalLocator) {
     this.port = port;
     this.bind_address = bind_address;
     this.handler = handler;
     this.poolHelper = poolHelper;
+    this.internalLocator = internalLocator;
     // register DSFID types first; invoked explicitly so that all message type
     // initializations do not happen in first deserialization on a possibly
     // "precious" thread
@@ -334,42 +342,46 @@ public class TcpServer {
    * fix for bug 33711 - client requests are spun off to another thread for processing. Requests are
    * synchronized in processGossip.
    */
-  private void processRequest(final Socket sock) {
+  private void processRequest(final Socket socket) {
     executor.execute(() -> {
       long startTime = DistributionStats.getStatTime();
       DataInputStream input = null;
       Object request, response;
       try {
 
-        sock.setSoTimeout(READ_TIMEOUT);
-        getSocketCreator().configureServerSSLSocket(sock);
+        socket.setSoTimeout(READ_TIMEOUT);
+        getSocketCreator().configureServerSSLSocket(socket);
 
         try {
-          input = new DataInputStream(sock.getInputStream());
+          input = new DataInputStream(socket.getInputStream());
         } catch (StreamCorruptedException e) {
           // Some garbage can be left on the socket stream
           // if a peer disappears at exactly the wrong moment.
           log.debug("Discarding illegal request from "
-              + (sock.getInetAddress().getHostAddress() + ":" + sock.getPort()), e);
+              + (socket.getInetAddress().getHostAddress() + ":" + socket.getPort()), e);
           return;
         }
-        int gossipVersion = readGossipVersion(sock, input);
+        int gossipVersion = readGossipVersion(socket, input);
 
         short versionOrdinal;
+        if (gossipVersion == NON_GOSSIP_REQUEST_VERSION) {
+          if (input.readUnsignedByte() == AcceptorImpl.PROTOBUF_CLIENT_SERVER_PROTOCOL
+              && Boolean.getBoolean("geode.feature-protobuf-protocol")) {
+            ClientProtocolMessageHandler messageHandler = ClientProtocolMessageHandlerLoader.load();
+            messageHandler.receiveMessage(input, socket.getOutputStream(),
+                new ExecutionContext(internalLocator));
+          } else {
+            rejectUnknownProtocolConnection(socket, gossipVersion);
+            return;
+          }
+        }
         if (gossipVersion <= getCurrentGossipVersion()
             && GOSSIP_TO_GEMFIRE_VERSION_MAP.containsKey(gossipVersion)) {
           // Create a versioned stream to remember sender's GemFire version
           versionOrdinal = (short) GOSSIP_TO_GEMFIRE_VERSION_MAP.get(gossipVersion);
         } else {
           // Close the socket. We can not accept requests from a newer version
-          try {
-            sock.getOutputStream().write("unknown protocol version".getBytes());
-            sock.getOutputStream().flush();
-          } catch (IOException e) {
-            log.debug(
-                "exception in sending reply to process using unknown protocol " + gossipVersion, e);
-          }
-          sock.close();
+          rejectUnknownProtocolConnection(socket, gossipVersion);
           return;
         }
         if (Version.GFE_71.compareTo(versionOrdinal) <= 0) {
@@ -378,13 +390,13 @@ public class TcpServer {
         }
 
         if (log.isDebugEnabled() && versionOrdinal != Version.CURRENT_ORDINAL) {
-          log.debug("Locator reading request from " + sock.getInetAddress() + " with version "
+          log.debug("Locator reading request from " + socket.getInetAddress() + " with version "
               + Version.fromOrdinal(versionOrdinal, false));
         }
         input = new VersionedDataInputStream(input, Version.fromOrdinal(versionOrdinal, false));
         request = DataSerializer.readObject(input);
         if (log.isDebugEnabled()) {
-          log.debug("Locator received request " + request + " from " + sock.getInetAddress());
+          log.debug("Locator received request " + request + " from " + socket.getInetAddress());
         }
         if (request instanceof ShutdownRequest) {
           shuttingDown = true;
@@ -405,7 +417,7 @@ public class TcpServer {
 
         startTime = DistributionStats.getStatTime();
         if (response != null) {
-          DataOutputStream output = new DataOutputStream(sock.getOutputStream());
+          DataOutputStream output = new DataOutputStream(socket.getOutputStream());
           if (versionOrdinal != Version.CURRENT_ORDINAL) {
             output =
                 new VersionedDataOutputStream(output, Version.fromOrdinal(versionOrdinal, false));
@@ -422,19 +434,19 @@ public class TcpServer {
         // ignore
       } catch (ClassNotFoundException ex) {
         String sender = null;
-        if (sock != null) {
-          sender = sock.getInetAddress().getHostAddress();
+        if (socket != null) {
+          sender = socket.getInetAddress().getHostAddress();
         }
         log.info("Unable to process request from " + sender + " exception=" + ex.getMessage());
       } catch (Exception ex) {
         String sender = null;
-        if (sock != null) {
-          sender = sock.getInetAddress().getHostAddress();
+        if (socket != null) {
+          sender = socket.getInetAddress().getHostAddress();
         }
         if (ex instanceof IOException) {
           // IOException could be caused by a client failure. Don't
           // log with severe.
-          if (!sock.isClosed()) {
+          if (!socket.isClosed()) {
             log.info("Exception in processing request from " + sender, ex);
           }
         } else {
@@ -447,8 +459,8 @@ public class TcpServer {
       } catch (Throwable ex) {
         SystemFailure.checkFailure();
         String sender = null;
-        if (sock != null) {
-          sender = sock.getInetAddress().getHostAddress();
+        if (socket != null) {
+          sender = socket.getInetAddress().getHostAddress();
         }
         try {
           log.fatal("Exception in processing request from " + sender, ex);
@@ -461,7 +473,7 @@ public class TcpServer {
         }
       } finally {
         try {
-          sock.close();
+          socket.close();
         } catch (IOException ignore) {
           // ignore
         }
@@ -469,6 +481,17 @@ public class TcpServer {
     });
   }
 
+  private void rejectUnknownProtocolConnection(Socket socket, int gossipVersion)
+      throws IOException {
+    try {
+      socket.getOutputStream().write("unknown protocol version".getBytes());
+      socket.getOutputStream().flush();
+    } catch (IOException e) {
+      log.debug("exception in sending reply to process using unknown protocol " + gossipVersion, e);
+    }
+    socket.close();
+  }
+
   private int readGossipVersion(Socket sock, DataInputStream input) throws Exception {
     // read the first byte & check for an improperly configured client pool trying
     // to contact a cache server

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-core/src/main/java/org/apache/geode/internal/cache/InternalCache.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/InternalCache.java b/geode-core/src/main/java/org/apache/geode/internal/cache/InternalCache.java
index 84aa66e..4c7a6ef 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/InternalCache.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/InternalCache.java
@@ -75,7 +75,9 @@ import org.apache.geode.pdx.internal.TypeRegistry;
  */
 public interface InternalCache extends Cache, Extensible<Cache>, CacheTime {
 
-  InternalDistributedMember getMyId();
+  default InternalDistributedMember getMyId() {
+    return null;
+  }
 
   Collection<DiskStore> listDiskStores();
 

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtoclMessageHandlerLoader.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtoclMessageHandlerLoader.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtoclMessageHandlerLoader.java
new file mode 100644
index 0000000..6654757
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtoclMessageHandlerLoader.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.geode.internal.cache.tier.sockets;
+
+import java.io.IOException;
+import java.net.Socket;
+import java.util.Iterator;
+import java.util.ServiceLoader;
+
+import org.apache.geode.internal.cache.InternalCache;
+import org.apache.geode.internal.cache.tier.Acceptor;
+import org.apache.geode.internal.cache.tier.CachedRegionHelper;
+import org.apache.geode.internal.security.SecurityService;
+
+/**
+ * Creates instances of ServerConnection based on the connection mode provided.
+ */
+public class ClientProtoclMessageHandlerLoader {
+  private static ClientProtocolMessageHandler protobufProtocolHandler;
+  private static final Object protocolLoadLock = new Object();
+
+  public static ClientProtocolMessageHandler load() {
+    if (protobufProtocolHandler != null) {
+      return protobufProtocolHandler;
+    }
+
+    synchronized (protocolLoadLock) {
+      if (protobufProtocolHandler != null) {
+        return protobufProtocolHandler;
+      }
+
+      ServiceLoader<ClientProtocolMessageHandler> loader =
+          ServiceLoader.load(ClientProtocolMessageHandler.class);
+      Iterator<ClientProtocolMessageHandler> iterator = loader.iterator();
+
+      if (!iterator.hasNext()) {
+        throw new ServiceLoadingFailureException(
+            "ClientProtocolMessageHandler implementation not found in JVM");
+      }
+
+      ClientProtocolMessageHandler returnValue = iterator.next();
+
+      if (iterator.hasNext()) {
+        throw new ServiceLoadingFailureException(
+            "Multiple service implementations found for ClientProtocolMessageHandler");
+      }
+
+      return returnValue;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtocolMessageHandler.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtocolMessageHandler.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtocolMessageHandler.java
index 32e9e4b..38ab73e 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtocolMessageHandler.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtocolMessageHandler.java
@@ -15,12 +15,11 @@
 
 package org.apache.geode.internal.cache.tier.sockets;
 
-import org.apache.geode.internal.cache.InternalCache;
-
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 
+
 /**
  * This is an interface that other modules can implement to hook into
  * {@link GenericProtocolServerConnection} to handle messages sent to Geode.
@@ -30,6 +29,6 @@ import java.io.OutputStream;
  * {@link GenericProtocolServerConnection}.
  */
 public interface ClientProtocolMessageHandler {
-  void receiveMessage(InputStream inputStream, OutputStream outputStream, InternalCache cache)
-      throws IOException;
+  void receiveMessage(InputStream inputStream, OutputStream outputStream,
+      ExecutionContext executionContext) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtocolMessageHandlerLoader.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtocolMessageHandlerLoader.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtocolMessageHandlerLoader.java
new file mode 100644
index 0000000..1dc6129
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtocolMessageHandlerLoader.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.geode.internal.cache.tier.sockets;
+
+import java.io.IOException;
+import java.net.Socket;
+import java.util.Iterator;
+import java.util.ServiceLoader;
+
+import org.apache.geode.internal.cache.InternalCache;
+import org.apache.geode.internal.cache.tier.Acceptor;
+import org.apache.geode.internal.cache.tier.CachedRegionHelper;
+import org.apache.geode.internal.security.SecurityService;
+
+/**
+ * Creates instances of ServerConnection based on the connection mode provided.
+ */
+public class ClientProtocolMessageHandlerLoader {
+  private static ClientProtocolMessageHandler protobufProtocolHandler;
+  private static final Object protocolLoadLock = new Object();
+
+  public static ClientProtocolMessageHandler load() {
+    if (protobufProtocolHandler != null) {
+      return protobufProtocolHandler;
+    }
+
+    synchronized (protocolLoadLock) {
+      if (protobufProtocolHandler != null) {
+        return protobufProtocolHandler;
+      }
+
+      ServiceLoader<ClientProtocolMessageHandler> loader =
+          ServiceLoader.load(ClientProtocolMessageHandler.class);
+      Iterator<ClientProtocolMessageHandler> iterator = loader.iterator();
+
+      if (!iterator.hasNext()) {
+        throw new ServiceLoadingFailureException(
+            "ClientProtocolMessageHandler implementation not found in JVM");
+      }
+
+      ClientProtocolMessageHandler returnValue = iterator.next();
+
+      if (iterator.hasNext()) {
+        throw new ServiceLoadingFailureException(
+            "Multiple service implementations found for ClientProtocolMessageHandler");
+      }
+
+      return returnValue;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ExecutionContext.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ExecutionContext.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ExecutionContext.java
new file mode 100644
index 0000000..27da205
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ExecutionContext.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.geode.internal.cache.tier.sockets;
+
+import org.apache.geode.cache.Cache;
+import org.apache.geode.distributed.internal.InternalLocator;
+
+public class ExecutionContext {
+  private Cache cache;
+  private InternalLocator locator;
+
+  public ExecutionContext(Cache cache) {
+    this.cache = cache;
+  }
+
+  public ExecutionContext(InternalLocator locator) {
+    this.locator = locator;
+  }
+
+  // This throws if the cache isn't present because we know that non of the callers can take any
+  // reasonable action if the cache is not present
+  public Cache getCache() throws InvalidExecutionContextException {
+    if (cache != null) {
+      return cache;
+    } else {
+      throw new InvalidExecutionContextException(
+          "Execution context's cache was accessed but isn't present. Did this happen on a locator? Operations on the locator should not try to operate on a cache");
+    }
+  }
+
+  // This throws if the locator isn't present because we know that non of the callers can take any
+  // reasonable action if the locator is not present
+  public InternalLocator getLocator() throws InvalidExecutionContextException {
+    if (locator != null) {
+      return locator;
+    } else {
+      throw new InvalidExecutionContextException(
+          "Execution context's locator was accessed but isn't present. Did this happen on a server? Operations on the locator should not try to operate on a cache");
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/GenericProtocolServerConnection.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/GenericProtocolServerConnection.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/GenericProtocolServerConnection.java
index 93a7f6f..8f6720e 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/GenericProtocolServerConnection.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/GenericProtocolServerConnection.java
@@ -61,7 +61,8 @@ public class GenericProtocolServerConnection extends ServerConnection {
       if (!authenticator.isAuthenticated()) {
         authenticator.receiveMessage(inputStream, outputStream, securityManager);
       } else {
-        messageHandler.receiveMessage(inputStream, outputStream, this.getCache());
+        messageHandler.receiveMessage(inputStream, outputStream,
+            new ExecutionContext(this.getCache()));
       }
     } catch (IOException e) {
       logger.warn(e);

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/InvalidExecutionContextException.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/InvalidExecutionContextException.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/InvalidExecutionContextException.java
new file mode 100644
index 0000000..919e301
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/InvalidExecutionContextException.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache.tier.sockets;
+
+import org.apache.geode.annotations.Experimental;
+
+/*
+ * Indicates that OperationContext was missing required data. This will typically happen if a
+ * operation that is supposed to run on a server runs on a locator and receives a locator in its
+ * context instead of a cache. The reverse case applies as well.
+ */
+@Experimental
+public class InvalidExecutionContextException extends Exception {
+  public InvalidExecutionContextException(String message) {
+    super(message);
+  }
+
+  public InvalidExecutionContextException(String message, Throwable cause) {
+    super(message, cause);
+  }
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-core/src/test/java/org/apache/geode/cache/client/internal/AutoConnectionSourceImplJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache/client/internal/AutoConnectionSourceImplJUnitTest.java b/geode-core/src/test/java/org/apache/geode/cache/client/internal/AutoConnectionSourceImplJUnitTest.java
index 5c33468..802620c 100644
--- a/geode-core/src/test/java/org/apache/geode/cache/client/internal/AutoConnectionSourceImplJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/cache/client/internal/AutoConnectionSourceImplJUnitTest.java
@@ -18,11 +18,8 @@ import org.apache.geode.CancelCriterion;
 import org.apache.geode.cache.*;
 import org.apache.geode.cache.client.NoAvailableLocatorsException;
 import org.apache.geode.cache.client.SubscriptionNotEnabledException;
-import org.apache.geode.cache.client.internal.AutoConnectionSourceImpl.UpdateLocatorListTask;
-import org.apache.geode.cache.client.internal.PoolImpl.PoolTask;
 import org.apache.geode.cache.client.internal.locator.ClientConnectionRequest;
 import org.apache.geode.cache.client.internal.locator.ClientConnectionResponse;
-import org.apache.geode.cache.client.internal.locator.LocatorListRequest;
 import org.apache.geode.cache.client.internal.locator.LocatorListResponse;
 import org.apache.geode.cache.query.QueryService;
 import org.apache.geode.distributed.DistributedSystem;
@@ -63,7 +60,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 import java.util.Set;
-import java.util.concurrent.Callable;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
@@ -308,7 +304,7 @@ public class AutoConnectionSourceImplJUnitTest {
     startFakeLocator();
     int secondPort = AvailablePortHelper.getRandomAvailableTCPPort();
     TcpServer server2 = new TcpServer(secondPort, InetAddress.getLocalHost(), null, null, handler,
-        new FakeHelper(), Thread.currentThread().getThreadGroup(), "tcp server");
+        new FakeHelper(), Thread.currentThread().getThreadGroup(), "tcp server", null);
     server2.start();
 
     try {
@@ -392,7 +388,7 @@ public class AutoConnectionSourceImplJUnitTest {
 
   private void startFakeLocator() throws UnknownHostException, IOException, InterruptedException {
     server = new TcpServer(port, InetAddress.getLocalHost(), null, null, handler, new FakeHelper(),
-        Thread.currentThread().getThreadGroup(), "Tcp Server");
+        Thread.currentThread().getThreadGroup(), "Tcp Server", null);
     server.start();
     Thread.sleep(500);
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-core/src/test/java/org/apache/geode/distributed/internal/tcpserver/TCPServerSSLJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/distributed/internal/tcpserver/TCPServerSSLJUnitTest.java b/geode-core/src/test/java/org/apache/geode/distributed/internal/tcpserver/TCPServerSSLJUnitTest.java
index 8a25aaf..229fbb9 100644
--- a/geode-core/src/test/java/org/apache/geode/distributed/internal/tcpserver/TCPServerSSLJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/distributed/internal/tcpserver/TCPServerSSLJUnitTest.java
@@ -138,7 +138,7 @@ public class TCPServerSSLJUnitTest {
     public DummyTcpServer(int port, InetAddress bind_address, Properties sslConfig,
         DistributionConfigImpl cfg, TcpHandler handler, PoolStatHelper poolHelper,
         ThreadGroup threadGroup, String threadName) {
-      super(port, bind_address, sslConfig, cfg, handler, poolHelper, threadGroup, threadName);
+      super(port, bind_address, sslConfig, cfg, handler, poolHelper, threadGroup, threadName, null);
       if (cfg == null) {
         cfg = new DistributionConfigImpl(sslConfig);
       }

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-core/src/test/java/org/apache/geode/distributed/internal/tcpserver/TcpServerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/distributed/internal/tcpserver/TcpServerJUnitTest.java b/geode-core/src/test/java/org/apache/geode/distributed/internal/tcpserver/TcpServerJUnitTest.java
index eda0641..9d20e8c 100644
--- a/geode-core/src/test/java/org/apache/geode/distributed/internal/tcpserver/TcpServerJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/distributed/internal/tcpserver/TcpServerJUnitTest.java
@@ -69,7 +69,7 @@ public class TcpServerJUnitTest {
 
     stats = new SimpleStats();
     server = new TcpServer(port, localhost, new Properties(), null, handler, stats,
-        Thread.currentThread().getThreadGroup(), "server thread");
+        Thread.currentThread().getThreadGroup(), "server thread", null);
     server.start();
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/main/java/org/apache/geode/protocol/operations/OperationHandler.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/operations/OperationHandler.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/operations/OperationHandler.java
index aa6d79e..5d9012f 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/operations/OperationHandler.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/operations/OperationHandler.java
@@ -15,7 +15,8 @@
 package org.apache.geode.protocol.operations;
 
 import org.apache.geode.annotations.Experimental;
-import org.apache.geode.cache.Cache;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.protobuf.ProtobufOpsProcessor;
 import org.apache.geode.protocol.protobuf.Result;
 import org.apache.geode.serialization.SerializationService;
@@ -32,6 +33,7 @@ public interface OperationHandler<Req, Resp> {
    * Decode the message, deserialize contained values using the serialization service, do the work
    * indicated on the provided cache, and return a response.
    */
-  Result<Resp> process(SerializationService serializationService, Req request, Cache cache);
+  Result<Resp> process(SerializationService serializationService, Req request,
+      ExecutionContext executionContext) throws InvalidExecutionContextException;
 }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtobufOpsProcessor.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtobufOpsProcessor.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtobufOpsProcessor.java
index 7d75b4a..76f81e7 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtobufOpsProcessor.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtobufOpsProcessor.java
@@ -15,8 +15,10 @@
 package org.apache.geode.protocol.protobuf;
 
 import org.apache.geode.annotations.Experimental;
-import org.apache.geode.cache.Cache;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.protobuf.registry.OperationContextRegistry;
+import org.apache.geode.protocol.protobuf.utilities.ProtobufResponseUtilities;
 import org.apache.geode.serialization.SerializationService;
 
 /**
@@ -35,12 +37,19 @@ public class ProtobufOpsProcessor {
     this.operationContextRegistry = operationContextRegistry;
   }
 
-  public ClientProtocol.Response process(ClientProtocol.Request request, Cache cache) {
+  public ClientProtocol.Response process(ClientProtocol.Request request, ExecutionContext context) {
     ClientProtocol.Request.RequestAPICase requestType = request.getRequestAPICase();
     OperationContext operationContext = operationContextRegistry.getOperationContext(requestType);
     ClientProtocol.Response.Builder builder;
-    Result result = operationContext.getOperationHandler().process(serializationService,
-        operationContext.getFromRequest().apply(request), cache);
+    Result result;
+    try {
+      result = operationContext.getOperationHandler().process(serializationService,
+          operationContext.getFromRequest().apply(request), context);
+    } catch (InvalidExecutionContextException e) {
+      result = Failure.of(ProtobufResponseUtilities.makeErrorResponse(
+          ProtocolErrorCode.UNSUPPORTED_OPERATION.codeValue,
+          "Invalid execution context found for operation."));
+    }
 
     builder = (ClientProtocol.Response.Builder) result.map(operationContext.getToResponse(),
         operationContext.getToErrorResponse());

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtobufStreamProcessor.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtobufStreamProcessor.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtobufStreamProcessor.java
index 648ab3c..d04e49e 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtobufStreamProcessor.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtobufStreamProcessor.java
@@ -20,9 +20,8 @@ import java.io.InputStream;
 import java.io.OutputStream;
 
 import org.apache.geode.annotations.Experimental;
-import org.apache.geode.cache.Cache;
-import org.apache.geode.internal.cache.InternalCache;
 import org.apache.geode.internal.cache.tier.sockets.ClientProtocolMessageHandler;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
 import org.apache.geode.protocol.exception.InvalidProtocolMessageException;
 import org.apache.geode.protocol.protobuf.registry.OperationContextRegistry;
 import org.apache.geode.protocol.protobuf.serializer.ProtobufProtocolSerializer;
@@ -45,29 +44,29 @@ public class ProtobufStreamProcessor implements ClientProtocolMessageHandler {
         new OperationContextRegistry());
   }
 
-  public void processOneMessage(InputStream inputStream, OutputStream outputStream, Cache cache)
-      throws InvalidProtocolMessageException, IOException {
+  @Override
+  public void receiveMessage(InputStream inputStream, OutputStream outputStream,
+      ExecutionContext executionContext) throws IOException {
+    try {
+      processOneMessage(inputStream, outputStream, executionContext);
+    } catch (InvalidProtocolMessageException e) {
+      throw new IOException(e);
+    }
+  }
+
+  private void processOneMessage(InputStream inputStream, OutputStream outputStream,
+      ExecutionContext executionContext) throws InvalidProtocolMessageException, IOException {
     ClientProtocol.Message message = protobufProtocolSerializer.deserialize(inputStream);
     if (message == null) {
       throw new EOFException("Tried to deserialize protobuf message at EOF");
     }
 
     ClientProtocol.Request request = message.getRequest();
-    ClientProtocol.Response response = protobufOpsProcessor.process(request, cache);
+    ClientProtocol.Response response = protobufOpsProcessor.process(request, executionContext);
     ClientProtocol.MessageHeader responseHeader =
         ProtobufUtilities.createMessageHeaderForRequest(message);
     ClientProtocol.Message responseMessage =
         ProtobufUtilities.createProtobufResponse(responseHeader, response);
     protobufProtocolSerializer.serialize(responseMessage, outputStream);
   }
-
-  @Override
-  public void receiveMessage(InputStream inputStream, OutputStream outputStream,
-      InternalCache cache) throws IOException {
-    try {
-      processOneMessage(inputStream, outputStream, cache);
-    } catch (InvalidProtocolMessageException e) {
-      throw new IOException(e);
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtocolErrorCode.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtocolErrorCode.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtocolErrorCode.java
index e3b262d..6a6f605 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtocolErrorCode.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtocolErrorCode.java
@@ -18,6 +18,7 @@ public enum ProtocolErrorCode {
   GENERIC_FAILURE(1000),
   VALUE_ENCODING_ERROR(1100),
   UNSUPPORTED_VERSION(1101),
+  UNSUPPORTED_OPERATION(1102),
   AUTHENTICATION_FAILED(1200),
   AUTHORIZATION_FAILED(1201),
   UNAUTHORIZED_REQUEST(1202),

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandler.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandler.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandler.java
index 607d1d2..75274c1 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandler.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandler.java
@@ -19,8 +19,9 @@ import java.util.Map;
 import java.util.Set;
 
 import org.apache.geode.annotations.Experimental;
-import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.Region;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.operations.OperationHandler;
 import org.apache.geode.protocol.protobuf.BasicTypes;
 import org.apache.geode.protocol.protobuf.Failure;
@@ -40,9 +41,10 @@ public class GetAllRequestOperationHandler
 
   @Override
   public Result<RegionAPI.GetAllResponse> process(SerializationService serializationService,
-      RegionAPI.GetAllRequest request, Cache cache) {
+      RegionAPI.GetAllRequest request, ExecutionContext executionContext)
+      throws InvalidExecutionContextException {
     String regionName = request.getRegionName();
-    Region region = cache.getRegion(regionName);
+    Region region = executionContext.getCache().getRegion(regionName);
     if (region == null) {
       return Failure.of(ProtobufResponseUtilities
           .makeErrorResponse(ProtocolErrorCode.REGION_NOT_FOUND.codeValue, "Region not found"));

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetAvailableServersOperationHandler.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetAvailableServersOperationHandler.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetAvailableServersOperationHandler.java
index 239d9f7..e7c18cd 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetAvailableServersOperationHandler.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetAvailableServersOperationHandler.java
@@ -14,33 +14,20 @@
  */
 package org.apache.geode.protocol.protobuf.operations;
 
-import java.io.IOException;
-import java.net.InetSocketAddress;
+import java.util.ArrayList;
 import java.util.Collection;
-import java.util.HashSet;
-import java.util.Properties;
-import java.util.StringTokenizer;
 import java.util.stream.Collectors;
 
-import org.apache.commons.lang.StringUtils;
-
 import org.apache.geode.annotations.Experimental;
-import org.apache.geode.cache.Cache;
-import org.apache.geode.cache.client.internal.locator.GetAllServersRequest;
-import org.apache.geode.cache.client.internal.locator.GetAllServersResponse;
-import org.apache.geode.distributed.ConfigurationProperties;
-import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.InternalLocator;
 import org.apache.geode.distributed.internal.ServerLocation;
-import org.apache.geode.distributed.internal.tcpserver.TcpClient;
-import org.apache.geode.internal.admin.remote.DistributionLocatorId;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.operations.OperationHandler;
 import org.apache.geode.protocol.protobuf.BasicTypes;
-import org.apache.geode.protocol.protobuf.Failure;
-import org.apache.geode.protocol.protobuf.ProtocolErrorCode;
 import org.apache.geode.protocol.protobuf.Result;
 import org.apache.geode.protocol.protobuf.ServerAPI;
 import org.apache.geode.protocol.protobuf.Success;
-import org.apache.geode.protocol.protobuf.utilities.ProtobufResponseUtilities;
 import org.apache.geode.serialization.SerializationService;
 
 @Experimental
@@ -50,51 +37,19 @@ public class GetAvailableServersOperationHandler implements
   @Override
   public Result<ServerAPI.GetAvailableServersResponse> process(
       SerializationService serializationService, ServerAPI.GetAvailableServersRequest request,
-      Cache cache) {
-
-    InternalDistributedSystem distributedSystem =
-        (InternalDistributedSystem) cache.getDistributedSystem();
-    Properties properties = distributedSystem.getProperties();
-    String locatorsString = properties.getProperty(ConfigurationProperties.LOCATORS);
-
-    HashSet<DistributionLocatorId> locators = new HashSet();
-    StringTokenizer stringTokenizer = new StringTokenizer(locatorsString, ",");
-    while (stringTokenizer.hasMoreTokens()) {
-      String locator = stringTokenizer.nextToken();
-      if (StringUtils.isNotEmpty(locator)) {
-        locators.add(new DistributionLocatorId(locator));
-      }
-    }
+      ExecutionContext executionContext) throws InvalidExecutionContextException {
 
-    TcpClient tcpClient = getTcpClient();
-    for (DistributionLocatorId locator : locators) {
-      try {
-        return getGetAvailableServersFromLocator(tcpClient, locator.getHost());
-      } catch (IOException | ClassNotFoundException e) {
-        // try the next locator
-      }
-    }
-    return Failure.of(ProtobufResponseUtilities.makeErrorResponse(
-        ProtocolErrorCode.DATA_UNREACHABLE.codeValue, "Unable to find a locator"));
-  }
+    InternalLocator locator = executionContext.getLocator();
+    ArrayList servers2 = locator.getServerLocatorAdvisee().getLoadSnapshot().getServers(null);
 
-  private Result<ServerAPI.GetAvailableServersResponse> getGetAvailableServersFromLocator(
-      TcpClient tcpClient, InetSocketAddress address) throws IOException, ClassNotFoundException {
-    GetAllServersResponse getAllServersResponse = (GetAllServersResponse) tcpClient
-        .requestToServer(address, new GetAllServersRequest(), 1000, true);
-    Collection<BasicTypes.Server> servers =
-        (Collection<BasicTypes.Server>) getAllServersResponse.getServers().stream()
-            .map(serverLocation -> getServerProtobufMessage((ServerLocation) serverLocation))
-            .collect(Collectors.toList());
+    Collection<BasicTypes.Server> servers = (Collection<BasicTypes.Server>) servers2.stream()
+        .map(serverLocation -> getServerProtobufMessage((ServerLocation) serverLocation))
+        .collect(Collectors.toList());
     ServerAPI.GetAvailableServersResponse.Builder builder =
         ServerAPI.GetAvailableServersResponse.newBuilder().addAllServers(servers);
     return Success.of(builder.build());
   }
 
-  protected TcpClient getTcpClient() {
-    return new TcpClient();
-  }
-
   private BasicTypes.Server getServerProtobufMessage(ServerLocation serverLocation) {
     BasicTypes.Server.Builder serverBuilder = BasicTypes.Server.newBuilder();
     serverBuilder.setHostname(serverLocation.getHostName()).setPort(serverLocation.getPort());

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRegionNamesRequestOperationHandler.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRegionNamesRequestOperationHandler.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRegionNamesRequestOperationHandler.java
index e5d216a..53898ed 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRegionNamesRequestOperationHandler.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRegionNamesRequestOperationHandler.java
@@ -17,8 +17,9 @@ package org.apache.geode.protocol.protobuf.operations;
 import java.util.Set;
 
 import org.apache.geode.annotations.Experimental;
-import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.Region;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.operations.OperationHandler;
 import org.apache.geode.protocol.protobuf.RegionAPI;
 import org.apache.geode.protocol.protobuf.Result;
@@ -32,8 +33,9 @@ public class GetRegionNamesRequestOperationHandler
 
   @Override
   public Result<RegionAPI.GetRegionNamesResponse> process(SerializationService serializationService,
-      RegionAPI.GetRegionNamesRequest request, Cache cache) {
-    Set<Region<?, ?>> regions = cache.rootRegions();
+      RegionAPI.GetRegionNamesRequest request, ExecutionContext executionContext)
+      throws InvalidExecutionContextException {
+    Set<Region<?, ?>> regions = executionContext.getCache().rootRegions();
     return Success.of(ProtobufResponseUtilities.createGetRegionNamesResponse(regions));
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRegionRequestOperationHandler.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRegionRequestOperationHandler.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRegionRequestOperationHandler.java
index b563a5d..007f96b 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRegionRequestOperationHandler.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRegionRequestOperationHandler.java
@@ -15,8 +15,9 @@
 package org.apache.geode.protocol.protobuf.operations;
 
 import org.apache.geode.annotations.Experimental;
-import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.Region;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.operations.OperationHandler;
 import org.apache.geode.protocol.protobuf.BasicTypes;
 import org.apache.geode.protocol.protobuf.Failure;
@@ -34,10 +35,11 @@ public class GetRegionRequestOperationHandler
 
   @Override
   public Result<RegionAPI.GetRegionResponse> process(SerializationService serializationService,
-      RegionAPI.GetRegionRequest request, Cache cache) {
+      RegionAPI.GetRegionRequest request, ExecutionContext executionContext)
+      throws InvalidExecutionContextException {
     String regionName = request.getRegionName();
 
-    Region region = cache.getRegion(regionName);
+    Region region = executionContext.getCache().getRegion(regionName);
     if (region == null) {
       return Failure.of(
           ProtobufResponseUtilities.makeErrorResponse(ProtocolErrorCode.REGION_NOT_FOUND.codeValue,

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandler.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandler.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandler.java
index 96c0282..8f0fef7 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandler.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandler.java
@@ -15,8 +15,9 @@
 package org.apache.geode.protocol.protobuf.operations;
 
 import org.apache.geode.annotations.Experimental;
-import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.Region;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.operations.OperationHandler;
 import org.apache.geode.protocol.protobuf.BasicTypes;
 import org.apache.geode.protocol.protobuf.Failure;
@@ -36,9 +37,10 @@ public class GetRequestOperationHandler
 
   @Override
   public Result<RegionAPI.GetResponse> process(SerializationService serializationService,
-      RegionAPI.GetRequest request, Cache cache) {
+      RegionAPI.GetRequest request, ExecutionContext executionContext)
+      throws InvalidExecutionContextException {
     String regionName = request.getRegionName();
-    Region region = cache.getRegion(regionName);
+    Region region = executionContext.getCache().getRegion(regionName);
     if (region == null) {
       return Failure.of(ProtobufResponseUtilities
           .makeErrorResponse(ProtocolErrorCode.REGION_NOT_FOUND.codeValue, "Region not found"));

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/PutAllRequestOperationHandler.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/PutAllRequestOperationHandler.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/PutAllRequestOperationHandler.java
index 253a95d..e0ebc41 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/PutAllRequestOperationHandler.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/PutAllRequestOperationHandler.java
@@ -21,8 +21,9 @@ import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
 import org.apache.geode.annotations.Experimental;
-import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.Region;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.operations.OperationHandler;
 import org.apache.geode.protocol.protobuf.BasicTypes;
 import org.apache.geode.protocol.protobuf.Failure;
@@ -43,8 +44,9 @@ public class PutAllRequestOperationHandler
 
   @Override
   public Result<RegionAPI.PutAllResponse> process(SerializationService serializationService,
-      RegionAPI.PutAllRequest putAllRequest, Cache cache) {
-    Region region = cache.getRegion(putAllRequest.getRegionName());
+      RegionAPI.PutAllRequest putAllRequest, ExecutionContext executionContext)
+      throws InvalidExecutionContextException {
+    Region region = executionContext.getCache().getRegion(putAllRequest.getRegionName());
 
     if (region == null) {
       return Failure.of(ProtobufResponseUtilities.createAndLogErrorResponse(

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/PutRequestOperationHandler.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/PutRequestOperationHandler.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/PutRequestOperationHandler.java
index c24fb29..cf5afb4 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/PutRequestOperationHandler.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/PutRequestOperationHandler.java
@@ -15,8 +15,9 @@
 package org.apache.geode.protocol.protobuf.operations;
 
 import org.apache.geode.annotations.Experimental;
-import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.Region;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.operations.OperationHandler;
 import org.apache.geode.protocol.protobuf.BasicTypes;
 import org.apache.geode.protocol.protobuf.Failure;
@@ -36,9 +37,10 @@ public class PutRequestOperationHandler
 
   @Override
   public Result<RegionAPI.PutResponse> process(SerializationService serializationService,
-      RegionAPI.PutRequest request, Cache cache) {
+      RegionAPI.PutRequest request, ExecutionContext executionContext)
+      throws InvalidExecutionContextException {
     String regionName = request.getRegionName();
-    Region region = cache.getRegion(regionName);
+    Region region = executionContext.getCache().getRegion(regionName);
     if (region == null) {
       return Failure.of(
           ProtobufResponseUtilities.makeErrorResponse(ProtocolErrorCode.REGION_NOT_FOUND.codeValue,

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandler.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandler.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandler.java
index 59236be..052efcf 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandler.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandler.java
@@ -18,10 +18,10 @@ import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
 import org.apache.geode.annotations.Experimental;
-import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.Region;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.operations.OperationHandler;
-import org.apache.geode.protocol.protobuf.BasicTypes;
 import org.apache.geode.protocol.protobuf.Failure;
 import org.apache.geode.protocol.protobuf.ProtocolErrorCode;
 import org.apache.geode.protocol.protobuf.RegionAPI;
@@ -40,10 +40,11 @@ public class RemoveRequestOperationHandler
 
   @Override
   public Result<RegionAPI.RemoveResponse> process(SerializationService serializationService,
-      RegionAPI.RemoveRequest request, Cache cache) {
+      RegionAPI.RemoveRequest request, ExecutionContext executionContext)
+      throws InvalidExecutionContextException {
 
     String regionName = request.getRegionName();
-    Region region = cache.getRegion(regionName);
+    Region region = executionContext.getCache().getRegion(regionName);
     if (region == null) {
       return Failure.of(ProtobufResponseUtilities
           .makeErrorResponse(ProtocolErrorCode.REGION_NOT_FOUND.codeValue, "Region not found"));

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/test/java/org/apache/geode/protocol/GetAvailableServersDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/GetAvailableServersDUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/GetAvailableServersDUnitTest.java
deleted file mode 100644
index 4d6390b..0000000
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/GetAvailableServersDUnitTest.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.protocol;
-
-import org.apache.geode.cache.server.CacheServer;
-import org.apache.geode.internal.cache.InternalCache;
-import org.apache.geode.protocol.exception.InvalidProtocolMessageException;
-import org.apache.geode.protocol.protobuf.ClientProtocol;
-import org.apache.geode.protocol.protobuf.ServerAPI;
-import org.apache.geode.protocol.protobuf.serializer.ProtobufProtocolSerializer;
-import org.apache.geode.protocol.protobuf.utilities.ProtobufRequestUtilities;
-import org.apache.geode.protocol.protobuf.utilities.ProtobufUtilities;
-import org.apache.geode.test.dunit.DistributedTestUtils;
-import org.apache.geode.test.dunit.Host;
-import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
-import org.apache.geode.test.dunit.rules.DistributedRestoreSystemProperties;
-import org.apache.geode.test.junit.categories.DistributedTest;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import java.io.IOException;
-import java.net.Socket;
-
-import static org.junit.Assert.assertEquals;
-
-@Category(DistributedTest.class)
-public class GetAvailableServersDUnitTest extends JUnit4CacheTestCase {
-
-  @Rule
-  public DistributedRestoreSystemProperties distributedRestoreSystemProperties =
-      new DistributedRestoreSystemProperties();
-
-  @Before
-  public void setup() {
-
-  }
-
-  @Test
-  public void testGetAllAvailableServersRequest()
-      throws IOException, InvalidProtocolMessageException {
-    Host host = Host.getHost(0);
-    VM vm0 = host.getVM(0);
-    VM vm1 = host.getVM(1);
-    VM vm2 = host.getVM(2);
-
-    int locatorPort = DistributedTestUtils.getDUnitLocatorPort();
-
-    // int cacheServer1Port = vm0.invoke("Start Cache1", () -> startCacheWithCacheServer());
-    int cacheServer1Port = startCacheWithCacheServer();
-    int cacheServer2Port = vm1.invoke("Start Cache2", () -> startCacheWithCacheServer());
-    int cacheServer3Port = vm2.invoke("Start Cache3", () -> startCacheWithCacheServer());
-
-    vm0.invoke(() -> {
-      Socket socket = new Socket(host.getHostName(), cacheServer1Port);
-      socket.getOutputStream().write(110);
-
-      ClientProtocol.Request.Builder protobufRequestBuilder =
-          ProtobufUtilities.createProtobufRequestBuilder();
-      ClientProtocol.Message getAvailableServersRequestMessage =
-          ProtobufUtilities.createProtobufMessage(ProtobufUtilities.createMessageHeader(1233445),
-              protobufRequestBuilder.setGetAvailableServersRequest(
-                  ProtobufRequestUtilities.createGetAvailableServersRequest()).build());
-
-      ProtobufProtocolSerializer protobufProtocolSerializer = new ProtobufProtocolSerializer();
-      protobufProtocolSerializer.serialize(getAvailableServersRequestMessage,
-          socket.getOutputStream());
-
-      ClientProtocol.Message getAvailableServersResponseMessage =
-          protobufProtocolSerializer.deserialize(socket.getInputStream());
-      assertEquals(1233445,
-          getAvailableServersResponseMessage.getMessageHeader().getCorrelationId());
-      assertEquals(ClientProtocol.Message.MessageTypeCase.RESPONSE,
-          getAvailableServersResponseMessage.getMessageTypeCase());
-      ClientProtocol.Response messageResponse = getAvailableServersResponseMessage.getResponse();
-      assertEquals(ClientProtocol.Response.ResponseAPICase.GETAVAILABLESERVERSRESPONSE,
-          messageResponse.getResponseAPICase());
-      ServerAPI.GetAvailableServersResponse getAvailableServersResponse =
-          messageResponse.getGetAvailableServersResponse();
-      assertEquals(3, getAvailableServersResponse.getServersCount());
-    });
-  }
-
-  private Integer startCacheWithCacheServer() throws IOException {
-    System.setProperty("geode.feature-protobuf-protocol", "true");
-
-    InternalCache cache = getCache();
-    CacheServer cacheServer = cache.addCacheServer();
-    cacheServer.setPort(0);
-    cacheServer.start();
-    return cacheServer.getPort();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/test/java/org/apache/geode/protocol/RoundTripLocatorConnectionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/RoundTripLocatorConnectionJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/RoundTripLocatorConnectionJUnitTest.java
new file mode 100644
index 0000000..799c55c
--- /dev/null
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/RoundTripLocatorConnectionJUnitTest.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.geode.protocol;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.net.Socket;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.contrib.java.lang.system.RestoreSystemProperties;
+import org.junit.experimental.categories.Category;
+
+import org.apache.geode.cache.server.CacheServer;
+import org.apache.geode.internal.cache.InternalCache;
+import org.apache.geode.protocol.exception.InvalidProtocolMessageException;
+import org.apache.geode.protocol.protobuf.ClientProtocol;
+import org.apache.geode.protocol.protobuf.ProtocolErrorCode;
+import org.apache.geode.protocol.protobuf.ServerAPI;
+import org.apache.geode.protocol.protobuf.serializer.ProtobufProtocolSerializer;
+import org.apache.geode.protocol.protobuf.utilities.ProtobufRequestUtilities;
+import org.apache.geode.protocol.protobuf.utilities.ProtobufUtilities;
+import org.apache.geode.test.dunit.DistributedTestUtils;
+import org.apache.geode.test.dunit.Host;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
+import org.apache.geode.test.junit.categories.DistributedTest;
+
+@Category(DistributedTest.class)
+public class RoundTripLocatorConnectionJUnitTest extends JUnit4CacheTestCase {
+
+  private Socket socket;
+  private DataOutputStream dataOutputStream;
+
+  @Rule
+  public final RestoreSystemProperties restoreSystemProperties = new RestoreSystemProperties();
+
+  @Before
+  public void setup() throws IOException {
+    Host host = Host.getHost(0);
+    int locatorPort = DistributedTestUtils.getDUnitLocatorPort();
+    int cacheServer1Port = startCacheWithCacheServer();
+
+    Host.getLocator().invoke(() -> System.setProperty("geode.feature-protobuf-protocol", "true"));
+
+    socket = new Socket(host.getHostName(), locatorPort);
+    dataOutputStream = new DataOutputStream(socket.getOutputStream());
+    dataOutputStream.writeInt(0);
+    dataOutputStream.writeByte(110);
+  }
+
+  @Test
+  public void testEchoProtobufMessageFromLocator()
+      throws IOException, InvalidProtocolMessageException {
+    ClientProtocol.Request.Builder protobufRequestBuilder =
+        ProtobufUtilities.createProtobufRequestBuilder();
+    ClientProtocol.Message getAvailableServersRequestMessage =
+        ProtobufUtilities.createProtobufMessage(ProtobufUtilities.createMessageHeader(1233445),
+            protobufRequestBuilder.setGetAvailableServersRequest(
+                ProtobufRequestUtilities.createGetAvailableServersRequest()).build());
+
+    ProtobufProtocolSerializer protobufProtocolSerializer = new ProtobufProtocolSerializer();
+    protobufProtocolSerializer.serialize(getAvailableServersRequestMessage,
+        socket.getOutputStream());
+
+    ClientProtocol.Message getAvailableServersResponseMessage =
+        protobufProtocolSerializer.deserialize(socket.getInputStream());
+    assertEquals(1233445, getAvailableServersResponseMessage.getMessageHeader().getCorrelationId());
+    assertEquals(ClientProtocol.Message.MessageTypeCase.RESPONSE,
+        getAvailableServersResponseMessage.getMessageTypeCase());
+    ClientProtocol.Response messageResponse = getAvailableServersResponseMessage.getResponse();
+    assertEquals(ClientProtocol.Response.ResponseAPICase.GETAVAILABLESERVERSRESPONSE,
+        messageResponse.getResponseAPICase());
+    ServerAPI.GetAvailableServersResponse getAvailableServersResponse =
+        messageResponse.getGetAvailableServersResponse();
+    assertEquals(1, getAvailableServersResponse.getServersCount());
+  }
+
+  @Test
+  public void testInvalidOperationReturnsFailure()
+      throws IOException, InvalidProtocolMessageException {
+    ClientProtocol.Request.Builder protobufRequestBuilder =
+        ProtobufUtilities.createProtobufRequestBuilder();
+    ClientProtocol.Message getAvailableServersRequestMessage =
+        ProtobufUtilities.createProtobufMessage(ProtobufUtilities.createMessageHeader(1233445),
+            protobufRequestBuilder
+                .setGetRegionNamesRequest(ProtobufRequestUtilities.createGetRegionNamesRequest())
+                .build());
+
+    ProtobufProtocolSerializer protobufProtocolSerializer = new ProtobufProtocolSerializer();
+    protobufProtocolSerializer.serialize(getAvailableServersRequestMessage,
+        socket.getOutputStream());
+
+    ClientProtocol.Message getAvailableServersResponseMessage =
+        protobufProtocolSerializer.deserialize(socket.getInputStream());
+    assertEquals(1233445, getAvailableServersResponseMessage.getMessageHeader().getCorrelationId());
+    assertEquals(ClientProtocol.Message.MessageTypeCase.RESPONSE,
+        getAvailableServersResponseMessage.getMessageTypeCase());
+    ClientProtocol.Response messageResponse = getAvailableServersResponseMessage.getResponse();
+    assertEquals(ClientProtocol.Response.ResponseAPICase.ERRORRESPONSE,
+        messageResponse.getResponseAPICase());
+    assertEquals(ProtocolErrorCode.UNSUPPORTED_OPERATION.codeValue,
+        messageResponse.getErrorResponse().getError().getErrorCode());
+  }
+
+  private Integer startCacheWithCacheServer() throws IOException {
+    System.setProperty("geode.feature-protobuf-protocol", "true");
+
+    InternalCache cache = getCache();
+    CacheServer cacheServer = cache.addCacheServer();
+    cacheServer.setPort(0);
+    cacheServer.start();
+    return cacheServer.getPort();
+  }
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/ProtobufStreamProcessorTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/ProtobufStreamProcessorTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/ProtobufStreamProcessorTest.java
index 87bfd52..2185b15 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/ProtobufStreamProcessorTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/ProtobufStreamProcessorTest.java
@@ -26,6 +26,7 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.geode.internal.cache.InternalCache;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
 import org.apache.geode.test.junit.categories.UnitTest;
 
 @Category(UnitTest.class)
@@ -37,6 +38,7 @@ public class ProtobufStreamProcessorTest {
 
     ProtobufStreamProcessor protobufStreamProcessor = new ProtobufStreamProcessor();
     InternalCache mockInternalCache = mock(InternalCache.class);
-    protobufStreamProcessor.receiveMessage(inputStream, outputStream, mockInternalCache);
+    protobufStreamProcessor.receiveMessage(inputStream, outputStream,
+        new ExecutionContext(mockInternalCache));
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandlerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandlerJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandlerJUnitTest.java
index f2e3199..f4d098c 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandlerJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandlerJUnitTest.java
@@ -17,7 +17,6 @@ package org.apache.geode.protocol.protobuf.operations;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
-import java.nio.charset.Charset;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -28,13 +27,14 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.geode.cache.Region;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.protobuf.BasicTypes;
 import org.apache.geode.protocol.protobuf.RegionAPI;
 import org.apache.geode.protocol.protobuf.Result;
 import org.apache.geode.protocol.protobuf.Success;
 import org.apache.geode.protocol.protobuf.utilities.ProtobufRequestUtilities;
 import org.apache.geode.protocol.protobuf.utilities.ProtobufUtilities;
-import org.apache.geode.serialization.SerializationService;
 import org.apache.geode.serialization.codec.StringCodec;
 import org.apache.geode.serialization.exception.UnsupportedEncodingTypeException;
 import org.apache.geode.serialization.registry.exception.CodecAlreadyRegisteredForTypeException;
@@ -80,9 +80,9 @@ public class GetAllRequestOperationHandlerJUnitTest extends OperationHandlerJUni
   @Test
   public void processReturnsExpectedValuesForValidKeys()
       throws CodecAlreadyRegisteredForTypeException, UnsupportedEncodingTypeException,
-      CodecNotRegisteredForTypeException {
-    Result<RegionAPI.GetAllResponse> result =
-        operationHandler.process(serializationServiceStub, generateTestRequest(true), cacheStub);
+      CodecNotRegisteredForTypeException, InvalidExecutionContextException {
+    Result<RegionAPI.GetAllResponse> result = operationHandler.process(serializationServiceStub,
+        generateTestRequest(true), new ExecutionContext(cacheStub));
 
     Assert.assertTrue(result instanceof Success);
 
@@ -99,10 +99,10 @@ public class GetAllRequestOperationHandlerJUnitTest extends OperationHandlerJUni
   }
 
   @Test
-  public void processReturnsNoEntriesForNoKeysRequested()
-      throws UnsupportedEncodingTypeException, CodecNotRegisteredForTypeException {
-    Result<RegionAPI.GetAllResponse> result =
-        operationHandler.process(serializationServiceStub, generateTestRequest(false), cacheStub);
+  public void processReturnsNoEntriesForNoKeysRequested() throws UnsupportedEncodingTypeException,
+      CodecNotRegisteredForTypeException, InvalidExecutionContextException {
+    Result<RegionAPI.GetAllResponse> result = operationHandler.process(serializationServiceStub,
+        generateTestRequest(false), new ExecutionContext(cacheStub));
 
     Assert.assertTrue(result instanceof Success);
 

http://git-wip-us.apache.org/repos/asf/geode/blob/530f48f3/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAvailableServersOperationHandlerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAvailableServersOperationHandlerJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAvailableServersOperationHandlerJUnitTest.java
index 77b088d..cff6ddc 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAvailableServersOperationHandlerJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAvailableServersOperationHandlerJUnitTest.java
@@ -14,14 +14,12 @@
  */
 package org.apache.geode.protocol.protobuf.operations;
 
-import org.apache.geode.cache.client.internal.locator.GetAllServersResponse;
-import org.apache.geode.distributed.ConfigurationProperties;
-import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.InternalLocator;
+import org.apache.geode.distributed.internal.LocatorLoadSnapshot;
 import org.apache.geode.distributed.internal.ServerLocation;
-import org.apache.geode.distributed.internal.tcpserver.TcpClient;
-import org.apache.geode.internal.cache.GemFireCacheImpl;
+import org.apache.geode.distributed.internal.ServerLocator;
+import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
 import org.apache.geode.protocol.protobuf.BasicTypes;
-import org.apache.geode.protocol.protobuf.Failure;
 import org.apache.geode.protocol.protobuf.Result;
 import org.apache.geode.protocol.protobuf.ServerAPI;
 import org.apache.geode.protocol.protobuf.ServerAPI.GetAvailableServersResponse;
@@ -32,75 +30,48 @@ import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Properties;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyBoolean;
-import static org.mockito.ArgumentMatchers.anyInt;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 @Category(UnitTest.class)
 public class GetAvailableServersOperationHandlerJUnitTest extends OperationHandlerJUnitTest {
 
-  private TcpClient mockTCPClient;
+  public static final String HOSTNAME_1 = "hostname1";
+  public static final int PORT_1 = 12345;
+
+  public static final String HOSTNAME_2 = "hostname2";
+  public static final int PORT_2 = 23456;
+
+  private InternalLocator internalLocatorMock;
 
   @Before
   public void setUp() throws Exception {
     super.setUp();
 
-    operationHandler = mock(GetAvailableServersOperationHandler.class);
-    cacheStub = mock(GemFireCacheImpl.class);
-    when(operationHandler.process(any(), any(), any())).thenCallRealMethod();
-    InternalDistributedSystem mockDistributedSystem = mock(InternalDistributedSystem.class);
-    when(cacheStub.getDistributedSystem()).thenReturn(mockDistributedSystem);
-    Properties mockProperties = mock(Properties.class);
-    when(mockDistributedSystem.getProperties()).thenReturn(mockProperties);
-    String locatorString = "testLocator1Host[12345],testLocator2Host[23456]";
-    when(mockProperties.getProperty(ConfigurationProperties.LOCATORS)).thenReturn(locatorString);
-    mockTCPClient = mock(TcpClient.class);
-    when(((GetAvailableServersOperationHandler) operationHandler).getTcpClient())
-        .thenReturn(mockTCPClient);
-  }
-
-  @Test
-  public void testServerReturnedFromHandler() throws Exception {
-    when(mockTCPClient.requestToServer(any(), any(), anyInt(), anyBoolean()))
-        .thenReturn(new GetAllServersResponse(new ArrayList<ServerLocation>() {
-          {
-            add(new ServerLocation("hostname1", 12345));
-            add(new ServerLocation("hostname2", 23456));
-          }
-        }));
+    operationHandler = new GetAvailableServersOperationHandler();
+    internalLocatorMock = mock(InternalLocator.class);
+    ServerLocator serverLocatorAdviseeMock = mock(ServerLocator.class);
+    LocatorLoadSnapshot locatorLoadSnapshot = mock(LocatorLoadSnapshot.class);
+    ArrayList<Object> serverList = new ArrayList<>();
+    serverList.add(new ServerLocation(HOSTNAME_1, PORT_1));
+    serverList.add(new ServerLocation(HOSTNAME_2, PORT_2));
 
-    ServerAPI.GetAvailableServersRequest getAvailableServersRequest =
-        ProtobufRequestUtilities.createGetAvailableServersRequest();
-    Result operationHandlerResult =
-        operationHandler.process(serializationServiceStub, getAvailableServersRequest, cacheStub);
-    assertTrue(operationHandlerResult instanceof Success);
-    ValidateGetAvailableServersResponse(
-        (GetAvailableServersResponse) operationHandlerResult.getMessage());
+    when(internalLocatorMock.getServerLocatorAdvisee()).thenReturn(serverLocatorAdviseeMock);
+    when(serverLocatorAdviseeMock.getLoadSnapshot()).thenReturn(locatorLoadSnapshot);
+    when(locatorLoadSnapshot.getServers(null)).thenReturn(serverList);
   }
 
   @Test
-  public void testServerReturnedFromSecondLocatorIfFirstDown() throws Exception {
-    when(mockTCPClient.requestToServer(any(), any(), anyInt(), anyBoolean()))
-        .thenThrow(new IOException("BOOM!!!"))
-        .thenReturn(new GetAllServersResponse(new ArrayList<ServerLocation>() {
-          {
-            add(new ServerLocation("hostname1", 12345));
-            add(new ServerLocation("hostname2", 23456));
-          }
-        }));
-
+  public void testServerReturnedFromHandler() throws Exception {
     ServerAPI.GetAvailableServersRequest getAvailableServersRequest =
         ProtobufRequestUtilities.createGetAvailableServersRequest();
-    Result operationHandlerResult =
-        operationHandler.process(serializationServiceStub, getAvailableServersRequest, cacheStub);
+    Result operationHandlerResult = operationHandler.process(serializationServiceStub,
+        getAvailableServersRequest, new ExecutionContext(internalLocatorMock));
     assertTrue(operationHandlerResult instanceof Success);
     ValidateGetAvailableServersResponse(
         (GetAvailableServersResponse) operationHandlerResult.getMessage());
@@ -110,22 +81,10 @@ public class GetAvailableServersOperationHandlerJUnitTest extends OperationHandl
       GetAvailableServersResponse getAvailableServersResponse) {
     assertEquals(2, getAvailableServersResponse.getServersCount());
     BasicTypes.Server server = getAvailableServersResponse.getServers(0);
-    assertEquals("hostname1", server.getHostname());
-    assertEquals(12345, server.getPort());
+    assertEquals(HOSTNAME_1, server.getHostname());
+    assertEquals(PORT_1, server.getPort());
     server = getAvailableServersResponse.getServers(1);
-    assertEquals("hostname2", server.getHostname());
-    assertEquals(23456, server.getPort());
-  }
-
-  @Test
-  public void testProcessFailsIfNoLocatorsAvailable() throws Exception {
-    when(mockTCPClient.requestToServer(any(), any(), anyInt(), anyBoolean()))
-        .thenThrow(new IOException("BOOM!!!"));
-
-    ServerAPI.GetAvailableServersRequest getAvailableServersRequest =
-        ProtobufRequestUtilities.createGetAvailableServersRequest();
-    Result operationHandlerResult =
-        operationHandler.process(serializationServiceStub, getAvailableServersRequest, cacheStub);
-    assertTrue(operationHandlerResult instanceof Failure);
+    assertEquals(HOSTNAME_2, server.getHostname());
+    assertEquals(PORT_2, server.getPort());
   }
 }


[15/25] geode git commit: GEODE-3335: add FlakyTest category to testNavigationAPIS

Posted by ud...@apache.org.
GEODE-3335: add FlakyTest category to testNavigationAPIS


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/c95b32ee
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/c95b32ee
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/c95b32ee

Branch: refs/heads/feature/GEODE-3503
Commit: c95b32ee8c95f2a95767ce771eb6b2f5ee8492c7
Parents: a1ac45d
Author: Kirk Lund <kl...@apache.org>
Authored: Tue Aug 22 14:23:09 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Tue Aug 22 15:37:35 2017 -0700

----------------------------------------------------------------------
 .../geode/management/RegionManagementDUnitTest.java  | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/c95b32ee/geode-core/src/test/java/org/apache/geode/management/RegionManagementDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/RegionManagementDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/RegionManagementDUnitTest.java
index cb35663..ee7cb95 100644
--- a/geode-core/src/test/java/org/apache/geode/management/RegionManagementDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/RegionManagementDUnitTest.java
@@ -30,11 +30,14 @@ package org.apache.geode.management;
 
 import static java.lang.management.ManagementFactory.getPlatformMBeanServer;
 import static java.util.concurrent.TimeUnit.MINUTES;
-import static org.apache.geode.cache.Region.*;
-import static org.apache.geode.management.internal.MBeanJMXAdapter.*;
-import static org.apache.geode.test.dunit.Host.*;
+import static org.apache.geode.cache.Region.SEPARATOR;
+import static org.apache.geode.management.internal.MBeanJMXAdapter.getDistributedRegionMbeanName;
+import static org.apache.geode.management.internal.MBeanJMXAdapter.getMemberMBeanName;
+import static org.apache.geode.management.internal.MBeanJMXAdapter.getRegionMBeanName;
+import static org.apache.geode.test.dunit.Host.getHost;
 import static org.apache.geode.test.dunit.Invoke.invokeInEveryVM;
-import static org.assertj.core.api.Assertions.*;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.fail;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -47,7 +50,6 @@ import javax.management.Notification;
 import javax.management.NotificationListener;
 import javax.management.ObjectName;
 
-import org.apache.geode.test.junit.categories.FlakyTest;
 import org.awaitility.Awaitility;
 import org.awaitility.core.ConditionFactory;
 import org.junit.After;
@@ -80,6 +82,7 @@ import org.apache.geode.management.internal.MBeanJMXAdapter;
 import org.apache.geode.management.internal.SystemManagementService;
 import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.junit.categories.DistributedTest;
+import org.apache.geode.test.junit.categories.FlakyTest;
 
 /**
  * This class checks and verifies various data and operations exposed through RegionMXBean
@@ -91,7 +94,6 @@ import org.apache.geode.test.junit.categories.DistributedTest;
  * TODO: complete refactoring this test to use ManagementTestRule
  */
 @Category(DistributedTest.class)
-@SuppressWarnings({"serial", "unused"})
 public class RegionManagementDUnitTest extends ManagementTestBase {
 
   private static final String REGION_NAME = "MANAGEMENT_TEST_REGION";
@@ -263,6 +265,7 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
   }
 
   @Test
+  @Category(FlakyTest.class) // GEODE-3335
   public void testNavigationAPIS() throws Exception {
     createManagersAndThenMembers_tmp();
 


[19/25] geode git commit: GEODE-3395 Variable-ize product version and name in user guide - Managing

Posted by ud...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/management/mm_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/mm_overview.html.md.erb b/geode-docs/managing/management/mm_overview.html.md.erb
index 21967cb..af4220e 100644
--- a/geode-docs/managing/management/mm_overview.html.md.erb
+++ b/geode-docs/managing/management/mm_overview.html.md.erb
@@ -1,6 +1,4 @@
----
-title:  Overview of Geode Management and Monitoring Tools
----
+<% set_title("Overview of", product_name, "Management and Monitoring Tools") %>
 
 <!--
 Licensed to the Apache Software Foundation (ASF) under one or more
@@ -19,33 +17,33 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Geode provides a variety of management tools you can use to manage a Geode distributed system.
+<%=vars.product_name%> provides a variety of management tools you can use to manage a <%=vars.product_name%> distributed system.
 
-The Geode management and monitoring tools allow you to configure all members and processes of a distributed system, monitor operations in the system, and start and stop the members. Internally, Geode uses Java MBeans, specifically MXBeans, to expose management controls and monitoring features. You can monitor and control Geode by writing Java programs that use these MXBeans, or you can use one of several tools provided with Geode to monitor and manage your distributed system. The primary tool for these tasks is the gfsh command-line tool, as described in this section.
+The <%=vars.product_name%> management and monitoring tools allow you to configure all members and processes of a distributed system, monitor operations in the system, and start and stop the members. Internally, <%=vars.product_name%> uses Java MBeans, specifically MXBeans, to expose management controls and monitoring features. You can monitor and control <%=vars.product_name%> by writing Java programs that use these MXBeans, or you can use one of several tools provided with <%=vars.product_name%> to monitor and manage your distributed system. The primary tool for these tasks is the gfsh command-line tool, as described in this section.
 
-Geode provides the following tools to manage a Geode installation:
+<%=vars.product_name%> provides the following tools to manage a <%=vars.product_name%> installation:
 
 ## gfsh Command-line tool
 
-The gfsh command line tool provides a set of commands you use to configure, manage, and monitor a Geode distributed system. gfsh is the recommended tool for managing your distributed system.
+The gfsh command line tool provides a set of commands you use to configure, manage, and monitor a <%=vars.product_name%> distributed system. gfsh is the recommended tool for managing your distributed system.
 
 Use gfsh to:
 
--   Start and stop Geode processes, such as locators and cache servers
+-   Start and stop <%=vars.product_name%> processes, such as locators and cache servers
 -   Deploy applications
 -   Create and destroy regions
 -   Execute functions
 -   Manage disk stores
 -   Import and export data
--   Monitor Geode processes
--   Launch Geode monitoring tools
+-   Monitor <%=vars.product_name%> processes
+-   Launch <%=vars.product_name%> monitoring tools
 -   Shut down a distributed system
--   Script various operations involving Geode members
+-   Script various operations involving <%=vars.product_name%> members
 -   Save the configuration for all members of a distributed system
 
 gfsh runs in its own shell, or you can [execute gfsh commands directly from the OS command line](../../tools_modules/gfsh/os_command_line_execution.html#topic_fpf_y1g_tp). gfsh can interact with remote systems [using the http protocol](../../configuring/cluster_config/gfsh_remote.html). You can also [write scripts that run in a gfsh shell](../../tools_modules/gfsh/command_scripting.html#concept_9B2F7550F16C4717831AD40A56922259) to automate system startup.
 
-You can use gfsh to create shared cluster configurations for your distributed system. You can define configurations that apply to the entire cluster, or that apply only to groups of similar members that all share a common configuration. Geode locators maintain these configurations as a hidden region and distribute the configuration to all locators in the distributed system. The locator also persists the shared configurations on disk as `cluster.xml` and `cluster.properties` files. You can use those shared cluster configuration files to re-start your system, migrate the system to a new environment, add new members to a distributed system, or to restore existing members after a failure.
+You can use gfsh to create shared cluster configurations for your distributed system. You can define configurations that apply to the entire cluster, or that apply only to groups of similar members that all share a common configuration. <%=vars.product_name%> locators maintain these configurations as a hidden region and distribute the configuration to all locators in the distributed system. The locator also persists the shared configurations on disk as `cluster.xml` and `cluster.properties` files. You can use those shared cluster configuration files to re-start your system, migrate the system to a new environment, add new members to a distributed system, or to restore existing members after a failure.
 
 A basic cluster configuration consists of:
 
@@ -55,40 +53,40 @@ A basic cluster configuration consists of:
 
 See [Overview of the Cluster Configuration Service](../../configuring/cluster_config/gfsh_persist.html) and [Cluster Configuration Files and Troubleshooting](../../configuring/cluster_config/gfsh_config_troubleshooting.html#concept_ylt_2cb_y4) for additional details on gfsh cluster configuration files.
 
-Using the gfsh tool, you can easily migrate a Geode-based application from a development environment into a testing or production environment.
+Using the gfsh tool, you can easily migrate a <%=vars.product_name%>-based application from a development environment into a testing or production environment.
 
 ## Executing gfsh commands with the management API
 
-You can also use Geode's management APIs to execute gfsh commands in a Java class. See [Executing gfsh Commands through the Management API](gfsh_and_management_api.html#concept_451F0978285245E69C3E8DE795BD8635).
+You can also use <%=vars.product_name%>'s management APIs to execute gfsh commands in a Java class. See [Executing gfsh Commands through the Management API](gfsh_and_management_api.html#concept_451F0978285245E69C3E8DE795BD8635).
 
 ## Member Configuration Management
 
-When you issue gfsh commands and have the cluster configuration service enabled (on a locator), Geode saves the configurations created within gfsh by building a `cluster.xml` and `cluster.properties` files for the entire cluster, or group of members.
+When you issue gfsh commands and have the cluster configuration service enabled (on a locator), <%=vars.product_name%> saves the configurations created within gfsh by building a `cluster.xml` and `cluster.properties` files for the entire cluster, or group of members.
 
 You can also directly create configurations using `cache.xml` and `gemfire.properties` files and manage the members individually.
 
 ## Java Management Extension (JMX) MBeans
 
-Geode uses a federated Open MBean strategy to manage and monitor all members of the distributed system. Your Java classes interact with a single MBeanServer that aggregates MBeans from other local and remote members. Using this strategy gives you a consolidated, single-agent view of the distributed system.
+<%=vars.product_name%> uses a federated Open MBean strategy to manage and monitor all members of the distributed system. Your Java classes interact with a single MBeanServer that aggregates MBeans from other local and remote members. Using this strategy gives you a consolidated, single-agent view of the distributed system.
 
-Geode's implementation of JMX is industry-standard and friendly to generic JMX clients. You can monitor or manage the distributed system by using any third-party tool that is compliant with JMX. For example, JConsole.
+<%=vars.product_name%>'s implementation of JMX is industry-standard and friendly to generic JMX clients. You can monitor or manage the distributed system by using any third-party tool that is compliant with JMX. For example, JConsole.
 
-See [Apache Geode Management and Monitoring](management_and_monitoring.html)
+See [<%=vars.product_name_long%> Management and Monitoring](management_and_monitoring.html)
 
-## Geode Java API
+## <%=vars.product_name%> Java API
 
-The Geode API provides a set of Java classes you can use to manage and monitor a distributed system. See the <span class="keyword apiname">org.apache.geode.management</span> package in the Geode JavaDocs .
+The <%=vars.product_name%> API provides a set of Java classes you can use to manage and monitor a distributed system. See the <span class="keyword apiname">org.apache.geode.management</span> package in the <%=vars.product_name%> JavaDocs .
 
-## Geode Pulse
+## <%=vars.product_name%> Pulse
 
-Geode Pulse is a Web Application that provides a graphical dashboard for monitoring vital, real-time health and performance of Geode clusters, members, and regions.
+<%=vars.product_name%> Pulse is a Web Application that provides a graphical dashboard for monitoring vital, real-time health and performance of <%=vars.product_name%> clusters, members, and regions.
 
-Use Pulse to examine total memory, CPU, and disk space used by members, uptime statistics, client connections, and critical notifications. Pulse communicates with a Geode JMX manager to provide a complete view of your Geode deployment.
+Use Pulse to examine total memory, CPU, and disk space used by members, uptime statistics, client connections, and critical notifications. Pulse communicates with a <%=vars.product_name%> JMX manager to provide a complete view of your <%=vars.product_name%> deployment.
 
-See [Geode Pulse](../../tools_modules/pulse/pulse-overview.html).
+See [<%=vars.product_name%> Pulse](../../tools_modules/pulse/pulse-overview.html).
 
 ## JConsole
 
-JConsole is a JMX monitoring utility provided with a Java Development Kit (JDK). You use gfsh to connect to Geode, and then launch JConsole with a gfsh command. The JConsole application allows you to browse MBeans, attributes, operations, and notifications. See [Browsing Geode MBeans through JConsole](mbeans_jconsole.html).
+JConsole is a JMX monitoring utility provided with a Java Development Kit (JDK). You use gfsh to connect to <%=vars.product_name%>, and then launch JConsole with a gfsh command. The JConsole application allows you to browse MBeans, attributes, operations, and notifications. See [Browsing <%=vars.product_name%> MBeans through JConsole](mbeans_jconsole.html).
 
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/management/notification_federation_and_alerts.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/notification_federation_and_alerts.html.md.erb b/geode-docs/managing/management/notification_federation_and_alerts.html.md.erb
index b8ad3f6..154bff2 100644
--- a/geode-docs/managing/management/notification_federation_and_alerts.html.md.erb
+++ b/geode-docs/managing/management/notification_federation_and_alerts.html.md.erb
@@ -41,7 +41,7 @@ JMX Managers will emit notifications for all distributed system members with two
 
 ## <a id="topic_212EE5A2ABAB4E8E8EF71807C9ECEF1A__section_7463D13112D54406953416356835E290" class="no-quick-link"></a>System Alert Notifications
 
-System alerts are Geode alerts wrapped within a JMX notification. The JMX Manager registers itself as an alert listener with each member of the system, and by default, it receives all messages logged with the SEVERE alert level by any node in the distributed system. Consequently, the DistributedSystemMXBean will then emit notifications for these alerts on behalf of the DistributedSystem.
+System alerts are <%=vars.product_name%> alerts wrapped within a JMX notification. The JMX Manager registers itself as an alert listener with each member of the system, and by default, it receives all messages logged with the SEVERE alert level by any node in the distributed system. Consequently, the DistributedSystemMXBean will then emit notifications for these alerts on behalf of the DistributedSystem.
 
 By default, the JMX Manager registers itself to send notifications only for SEVERE level alerts. To change the alert level that the JMX Manager will send notifications for, use the `DistributedMXBean.changeAlertLevel` method. Possible alert levels to set are WARNING, ERROR, SEVERE, and NONE. After changing the level, the JMX Manager will only emit that level of log message as notifications.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/member-reconnect.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/member-reconnect.html.md.erb b/geode-docs/managing/member-reconnect.html.md.erb
new file mode 100644
index 0000000..16717ce
--- /dev/null
+++ b/geode-docs/managing/member-reconnect.html.md.erb
@@ -0,0 +1,83 @@
+---
+title:  Handling Forced Cache Disconnection Using Autoreconnect
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+A <%=vars.product_name%> member may be forcibly disconnected from a <%=vars.product_name%> distributed system if the member is unresponsive for a period of time, or if a network partition separates one or more members into a group that is too small to act as the distributed system.
+
+## How the Autoreconnection Process Works
+
+After being disconnected from a distributed system,
+a <%=vars.product_name%> member shuts down and, by default, automatically restarts into 
+a "reconnecting" state,
+while periodically attempting to rejoin the distributed system 
+by contacting a list of known locators.
+If the member succeeds in reconnecting to a known locator, the member rebuilds its view of the distributed system from existing members and receives a new distributed member ID.
+
+If the member cannot connect to a known locator, the member will then check to see if it itself is a locator (or hosting an embedded locator process). If the member is a locator, then the member does a quorum-based reconnect; it will attempt to contact a quorum of the members that were in the membership view just before it became disconnected. If a quorum of members can be contacted, then startup of the distributed system is allowed to begin. Since the reconnecting member does not know which members survived the network partition event, all members that are in a reconnecting state will keep their UDP unicast ports open and respond to ping requests.
+
+Membership quorum is determined using the same member weighting system used in network partition detection. See [Membership Coordinators, Lead Members and Member Weighting](network_partitioning/membership_coordinators_lead_members_and_weighting.html#concept_23C2606D59754106AFBFE17515DF4330).
+
+Note that when a locator is in the reconnecting state,
+it provides no discovery services for the distributed system.
+
+The default settings for reconfiguration of the cache once
+reconnected assume that the cluster configuration service has
+a valid (XML) configuration.
+This will not be the case if the cluster was configured using
+API calls.
+To handle this case,
+either disable autoreconnect by setting the property to
+
+```
+disable-auto-reconnect = true
+```
+
+or, disable the cluster configuration service by setting the property to
+
+```
+enable-cluster-configuration = false
+```
+
+After the cache has reconnected, applications must fetch a reference to the new Cache, Regions, DistributedSystem and other artifacts. Old references will continue to throw cancellation exceptions like `CacheClosedException(cause=ForcedDisconnectException)`.
+
+See the <%=vars.product_name%> `DistributedSystem` and `Cache` Java API documentation for more information.
+
+## Managing the Autoreconnection Process
+
+By default a <%=vars.product_name%> member will try to reconnect until it is told to stop by using the `DistributedSystem.stopReconnecting()` or `Cache.stopReconnecting()` method. You can disable automatic reconnection entirely by setting `disable-auto-reconnect` <%=vars.product_name%> property to "true."
+
+You can use `DistributedSystem` and `Cache` callback methods to perform actions during the reconnect process, or to cancel the reconnect process if necessary.
+
+The `DistributedSystem` and `Cache` API provide several methods you can use to take actions while a member is reconnecting to the distributed system:
+
+-   `DistributedSystem.isReconnecting()` returns true if the member is in the process of reconnecting and recreating the cache after having been removed from the system by other members.
+-   `DistributedSystem.waitUntilReconnected(long, TimeUnit)` waits for a period of time, and then returns a boolean value to indicate whether the member has reconnected to the DistributedSystem. Use a value of -1 seconds to wait indefinitely until the reconnect completes or the member shuts down. Use a value of 0 seconds as a quick probe to determine if the member has reconnected.
+-   `DistributedSystem.getReconnectedSystem()` returns the reconnected DistributedSystem.
+-   `DistributedSystem.stopReconnecting()` stops the reconnection process and ensures that the DistributedSystem stays in a disconnected state.
+-   `Cache.isReconnecting()` returns true if the cache is attempting to reconnect to a distributed system.
+-   `Cache.waitForReconnect(long, TimeUnit)` waits for a period of time, and then returns a boolean value to indicate whether the DistributedSystem has reconnected. Use a value of -1 seconds to wait indefinitely until the reconnect completes or the cache shuts down. Use a value of 0 seconds as a quick probe to determine if the member has reconnected.
+-   `Cache.getReconnectedCache()` returns the reconnected Cache.
+-   `Cache.stopReconnecting()` stops the reconnection process and ensures that the DistributedSystem stays in a disconnected state.
+
+## Operator Intervention
+
+You may need to intervene in the autoreconnection process if processes or hardware have crashed or are otherwise shut down before the network connection is healed. In this case the members in a "reconnecting" state will not be able to find the lost processes through UDP probes and will not rejoin the system until they are able to contact a locator.
+
+

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/cache_consistency.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/cache_consistency.html.md.erb b/geode-docs/managing/monitor_tune/cache_consistency.html.md.erb
index adc10c9..86fbc52 100644
--- a/geode-docs/managing/monitor_tune/cache_consistency.html.md.erb
+++ b/geode-docs/managing/monitor_tune/cache_consistency.html.md.erb
@@ -19,7 +19,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Maintaining data consistency between caches in a distributed Geode system is vital for ensuring its functional integrity and preventing data loss.
+Maintaining data consistency between caches in a distributed <%=vars.product_name%> system is vital for ensuring its functional integrity and preventing data loss.
 
 ## <a id="cache_const__section_lf3_lvn_nr" class="no-quick-link"></a>General Guidelines
 
@@ -28,7 +28,7 @@ Maintaining data consistency between caches in a distributed Geode system is vit
 **Note:**
 If you revoke a member’s disk store, do not restart that member with its disk stores—in isolation—at a later time.
 
-Geode stores information about your persisted data and prevents you from starting a member with a revoked disk store in the running system. But Geode cannot stop you from starting a revoked member in isolation, and running with its revoked data. This is an unlikely situation, but it is possible to do:
+<%=vars.product_name%> stores information about your persisted data and prevents you from starting a member with a revoked disk store in the running system. But <%=vars.product_name%> cannot stop you from starting a revoked member in isolation, and running with its revoked data. This is an unlikely situation, but it is possible to do:
 
 1.  Members A and B are running, both storing Region data to disk.
 2.  Member A goes down.
@@ -43,7 +43,7 @@ Geode stores information about your persisted data and prevents you from startin
 
 **Understand Cache Transactions**
 
-Understanding the operation of Geode transactions can help you minimize situations where the cache could get out of sync.
+Understanding the operation of <%=vars.product_name%> transactions can help you minimize situations where the cache could get out of sync.
 
 Transactions do not work in distributed regions with global scope.
 
@@ -59,7 +59,7 @@ If a cache writer exists during a transaction, then each transaction write opera
 
 A region in a cache with transactions may not stay in sync with a region of the same name in another cache without transactions.
 
-Two applications running the same sequence of operations in their transactions may get different results. This could occur because operations happening outside a transaction in one of the members can overwrite the transaction, even in the process of committing. This could also occur if the results of a large transaction exceed the machine’s memory or the capacity of Geode. Those limits can vary by machine, so the two members may not be in sync.
+Two applications running the same sequence of operations in their transactions may get different results. This could occur because operations happening outside a transaction in one of the members can overwrite the transaction, even in the process of committing. This could also occur if the results of a large transaction exceed the machine’s memory or the capacity of <%=vars.product_name%>. Those limits can vary by machine, so the two members may not be in sync.
 
 ## <a id="cache_const__section_qxx_kvn_nr" class="no-quick-link"></a>Guidelines for Multi-Site Deployments
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/chapter_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/chapter_overview.html.md.erb b/geode-docs/managing/monitor_tune/chapter_overview.html.md.erb
index 34f88f9..8f7e921 100644
--- a/geode-docs/managing/monitor_tune/chapter_overview.html.md.erb
+++ b/geode-docs/managing/monitor_tune/chapter_overview.html.md.erb
@@ -19,42 +19,42 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-A collection of tools and controls allow you to monitor and adjust Apache Geode performance.
+A collection of tools and controls allow you to monitor and adjust <%=vars.product_name_long%> performance.
 
--   **[Improving Performance on vSphere](../../managing/monitor_tune/performance_on_vsphere.html)**
+-   **[Improving Performance on vSphere](performance_on_vsphere.html)**
 
-    This topic provides guidelines for tuning vSphere virtualized environments that host Apache Geode deployments.
+    This topic provides guidelines for tuning vSphere virtualized environments that host <%=vars.product_name_long%> deployments.
 
--   **[Performance Controls](../../managing/monitor_tune/performance_controls.html)**
+-   **[Performance Controls](performance_controls.html)**
 
     This topic provides tuning suggestions of particular interest to developers, primarily programming techniques and cache configuration.
 
--   **[System Member Performance](../../managing/monitor_tune/system_member_performance.html)**
+-   **[System Member Performance](system_member_performance.html)**
 
     You can modify some configuration parameters to improve system member performance.
 
--   **[Slow Receivers with TCP/IP](../../managing/monitor_tune/slow_receivers.html)**
+-   **[Slow Receivers with TCP/IP](slow_receivers.html)**
 
     You have several options for preventing situations that can cause slow receivers of data distributions. The slow receiver options control only peer-to-peer communication using TCP/IP. This discussion does not apply to client/server or multi-site communication, or to communication using the UDP unicast or multicast protocols.
 
--   **[Slow distributed-ack Messages](../../managing/monitor_tune/slow_messages.html)**
+-   **[Slow distributed-ack Messages](slow_messages.html)**
 
     In systems with distributed-ack regions, a sudden large number of distributed-no-ack operations can cause distributed-ack operations to take a long time to complete.
 
--   **[Socket Communication](../../managing/monitor_tune/socket_communication.html)**
+-   **[Socket Communication](socket_communication.html)**
 
-    Geode processes communicate using TCP/IP and UDP unicast and multicast protocols. In all cases, communication uses sockets that you can tune to optimize performance.
+    <%=vars.product_name%> processes communicate using TCP/IP and UDP unicast and multicast protocols. In all cases, communication uses sockets that you can tune to optimize performance.
 
--   **[UDP Communication](../../managing/monitor_tune/udp_communication.html)**
+-   **[UDP Communication](udp_communication.html)**
 
     You can make configuration adjustments to improve multicast and unicast UDP performance of peer-to-peer communication.
 
--   **[Multicast Communication](../../managing/monitor_tune/multicast_communication.html)**
+-   **[Multicast Communication](multicast_communication.html)**
 
-    You can make configuration adjustments to improve the UDP multicast performance of peer-to-peer communication in your Geode system.
+    You can make configuration adjustments to improve the UDP multicast performance of peer-to-peer communication in your <%=vars.product_name%> system.
 
--   **[Maintaining Cache Consistency](../../managing/monitor_tune/cache_consistency.html)**
+-   **[Maintaining Cache Consistency](cache_consistency.html)**
 
-    Maintaining data consistency between caches in a distributed Geode system is vital for ensuring its functional integrity and preventing data loss.
+    Maintaining data consistency between caches in a distributed <%=vars.product_name%> system is vital for ensuring its functional integrity and preventing data loss.
 
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/multicast_communication.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/multicast_communication.html.md.erb b/geode-docs/managing/monitor_tune/multicast_communication.html.md.erb
index ba823c6..9b87b9a 100644
--- a/geode-docs/managing/monitor_tune/multicast_communication.html.md.erb
+++ b/geode-docs/managing/monitor_tune/multicast_communication.html.md.erb
@@ -19,27 +19,27 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-You can make configuration adjustments to improve the UDP multicast performance of peer-to-peer communication in your Geode system.
+You can make configuration adjustments to improve the UDP multicast performance of peer-to-peer communication in your <%=vars.product_name%> system.
 
-Before you begin, you should understand Geode [Basic Configuration and Programming](../../basic_config/book_intro.html). See also the general communication tuning and UDP tuning covered in [Socket Communication](socket_communication.html) and [UDP Communication](udp_communication.html#udp_comm).
+Before you begin, you should understand <%=vars.product_name%> [Basic Configuration and Programming](../../basic_config/book_intro.html). See also the general communication tuning and UDP tuning covered in [Socket Communication](socket_communication.html) and [UDP Communication](udp_communication.html#udp_comm).
 
--   **[Provisioning Bandwidth for Multicast](../../managing/monitor_tune/multicast_communication_provisioning_bandwidth.html)**
+-   **[Provisioning Bandwidth for Multicast](multicast_communication_provisioning_bandwidth.html)**
 
     Multicast installations require more planning and configuration than TCP installations. With IP multicast, you gain scalability but lose the administrative convenience of TCP.
 
--   **[Testing Multicast Speed Limits](../../managing/monitor_tune/multicast_communication_testing_multicast_speed_limits.html)**
+-   **[Testing Multicast Speed Limits](multicast_communication_testing_multicast_speed_limits.html)**
 
     TCP automatically adjusts its speed to the capability of the processes using it and enforces bandwidth sharing so that every process gets a turn. With multicast, you must determine and explicitly set those limits.
 
--   **[Configuring Multicast Speed Limits](../../managing/monitor_tune/multicast_communication_configuring_speed_limits.html)**
+-   **[Configuring Multicast Speed Limits](multicast_communication_configuring_speed_limits.html)**
 
     After you determine the maximum transmission rate, configure and tune your production system.
 
--   **[Run-time Considerations for Multicast](../../managing/monitor_tune/multicast_communication_runtime_considerations.html)**
+-   **[Run-time Considerations for Multicast](multicast_communication_runtime_considerations.html)**
 
     When you use multicast for messaging and data distribution, you need to understand how the health monitoring setting works and how to control memory use.
 
--   **[Troubleshooting the Multicast Tuning Process](../../managing/monitor_tune/multicast_communication_troubleshooting.html)**
+-   **[Troubleshooting the Multicast Tuning Process](multicast_communication_troubleshooting.html)**
 
     Several problems may arise during the initial testing and tuning process for multicasting.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/multicast_communication_configuring_speed_limits.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/multicast_communication_configuring_speed_limits.html.md.erb b/geode-docs/managing/monitor_tune/multicast_communication_configuring_speed_limits.html.md.erb
index a6cb090..5de92bd 100644
--- a/geode-docs/managing/monitor_tune/multicast_communication_configuring_speed_limits.html.md.erb
+++ b/geode-docs/managing/monitor_tune/multicast_communication_configuring_speed_limits.html.md.erb
@@ -42,7 +42,7 @@ For best performance, the producer and the consumers should run on different mac
 -   Monitor the members that receive data for signs of data loss. A few data loss messages can happen normally during region creation. Multicast retransmit requests and unicast retransmits can also be monitored to detect data loss. Even when you see data loss, the cause of the problem may have nothing to do with the network. However, if it happens constantly then you should try testing the flow control rate again
 -   If necessary, reconfigure all the `gemfire.properties` files and repeat with lower flow control maximum credits until you find the maximum useful rate for your installation.
 -   Slow system performance might be helped by reducing how far your multicast messaging goes in your network.
--   Reduce multicast latency by disabling batching. By default, Geode uses batching for operations when the region’s scope is distributed-no-ack. Set the `disableBatching` property to true on the application or when starting a cache server process through the `gfsh` command line:
+-   Reduce multicast latency by disabling batching. By default, <%=vars.product_name%> uses batching for operations when the region’s scope is distributed-no-ack. Set the `disableBatching` property to true on the application or when starting a cache server process through the `gfsh` command line:
 
     ``` pre
     gfsh>start server --name=server_name --J=-Dp2p.disableBatching=true

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/multicast_communication_runtime_considerations.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/multicast_communication_runtime_considerations.html.md.erb b/geode-docs/managing/monitor_tune/multicast_communication_runtime_considerations.html.md.erb
index 2468e47..5e98c0b 100644
--- a/geode-docs/managing/monitor_tune/multicast_communication_runtime_considerations.html.md.erb
+++ b/geode-docs/managing/monitor_tune/multicast_communication_runtime_considerations.html.md.erb
@@ -23,7 +23,7 @@ When you use multicast for messaging and data distribution, you need to understa
 
 **Multicast Health Monitor**
 
-The Geode management and monitoring system is supplemented by a `maxRetransmissionRatio` health
+The <%=vars.product_name%> management and monitoring system is supplemented by a `maxRetransmissionRatio` health
 monitoring setting for distributed system members. This ratio is the number of retransmission
 requests received divided by the number of multicast datagrams written. If the ratio is at 1.0, the
 member is retransmitting as many packets as it originally sent. Retransmissions are point-to-point,
@@ -35,11 +35,11 @@ multicast to transmit cache updates. The new member is added, which is running o
 multicast enabled. As a result, there is a retransmission request for every cache update, and the
 `maxRetransmissionRatio` changes to 1.0.
 
-**Controlling Memory Use on Geode Hosts with Multicast**
+**Controlling Memory Use on <%=vars.product_name%> Hosts with Multicast**
 
 Running out of memory can impede a member’s performance and eventually lead to severe errors.
 
-When data is distributed over multicast, Geode incurs a fixed overhead of memory reserved for transmission buffers. A specified amount of memory is reserved for each distributed region. These producer-side buffers are used only when a receiver is not getting enough CPU to read from its own receiving buffer as quickly as the producer is sending. In this case, the receiver complains of lost data. The producer then retrieves the data, if it still exists in its buffer, and resends to the receiver.
+When data is distributed over multicast, <%=vars.product_name%> incurs a fixed overhead of memory reserved for transmission buffers. A specified amount of memory is reserved for each distributed region. These producer-side buffers are used only when a receiver is not getting enough CPU to read from its own receiving buffer as quickly as the producer is sending. In this case, the receiver complains of lost data. The producer then retrieves the data, if it still exists in its buffer, and resends to the receiver.
 
 Tuning the transmission buffers requires a careful balance. Larger buffers mean that more data remains available for retransmission, providing more protection in case of a problem. On the other hand, a larger amount of reserved memory means that less memory is available for caching.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/multicast_communication_testing_multicast_speed_limits.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/multicast_communication_testing_multicast_speed_limits.html.md.erb b/geode-docs/managing/monitor_tune/multicast_communication_testing_multicast_speed_limits.html.md.erb
index 1e8faa7..a2a432a 100644
--- a/geode-docs/managing/monitor_tune/multicast_communication_testing_multicast_speed_limits.html.md.erb
+++ b/geode-docs/managing/monitor_tune/multicast_communication_testing_multicast_speed_limits.html.md.erb
@@ -119,7 +119,7 @@ where:
 </table>
 
 **Note:**
-If your Geode distributed system runs across several subnets, start a receiver on each subnet.
+If your <%=vars.product_name%> distributed system runs across several subnets, start a receiver on each subnet.
 
 In the receiver’s output, look at the Lost/Total Datagrams columns for the number and percentage of lost packets out of the total sent.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/performance_controls.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/performance_controls.html.md.erb b/geode-docs/managing/monitor_tune/performance_controls.html.md.erb
index ddc713c..b8815c2 100644
--- a/geode-docs/managing/monitor_tune/performance_controls.html.md.erb
+++ b/geode-docs/managing/monitor_tune/performance_controls.html.md.erb
@@ -21,25 +21,25 @@ limitations under the License.
 
 This topic provides tuning suggestions of particular interest to developers, primarily programming techniques and cache configuration.
 
-Before you begin, you should understand Apache Geode [Basic Configuration and Programming](../../basic_config/book_intro.html).
+Before you begin, you should understand <%=vars.product_name_long%> [Basic Configuration and Programming](../../basic_config/book_intro.html).
 
--   **[Data Serialization](../../managing/monitor_tune/performance_controls_data_serialization.html)**
+-   **[Data Serialization](performance_controls_data_serialization.html)**
 
-    In addition to standard Java serialization, Geode offers serialization options that give you higher performance and greater flexibility for data storage, transfers, and language types.
+    In addition to standard Java serialization, <%=vars.product_name%> offers serialization options that give you higher performance and greater flexibility for data storage, transfers, and language types.
 
--   **[Setting Cache Timeouts](../../managing/monitor_tune/performance_controls_setting_cache_timeouts.html)**
+-   **[Setting Cache Timeouts](performance_controls_setting_cache_timeouts.html)**
 
     Cache timeout properties can modified through the gfsh `alter runtime` command (or declared in the `cache.xml` file) and can also be set through methods of the interface, `org.apache.geode.cache.Cache`.
 
--   **[Controlling Socket Use](../../managing/monitor_tune/performance_controls_controlling_socket_use.html)**
+-   **[Controlling Socket Use](performance_controls_controlling_socket_use.html)**
 
     For peer-to-peer communication, you can manage socket use at the system member level and at the thread level.
 
--   **[Management of Slow Receivers](../../managing/monitor_tune/performance_controls_managing_slow_receivers.html)**
+-   **[Management of Slow Receivers](performance_controls_managing_slow_receivers.html)**
 
     You have several options for handling slow members that receive data distribution. The slow receiver options control only to peer-to-peer communication between distributed regions using TCP/IP. This topic does not apply to client/server or multi-site communication, or to communication using the UDP unicast or IP multicast protocols.
 
--   **[Increasing the Ratio of Cache Hits](../../managing/monitor_tune/performance_controls_increasing_cache_hits.html)**
+-   **[Increasing the Ratio of Cache Hits](performance_controls_increasing_cache_hits.html)**
 
     The more frequently a get fails to find a valid value in the first cache and has to try a second cache, the more the overall performance is affected.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/performance_controls_data_serialization.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/performance_controls_data_serialization.html.md.erb b/geode-docs/managing/monitor_tune/performance_controls_data_serialization.html.md.erb
index 139f1bb..dd1d4f2 100644
--- a/geode-docs/managing/monitor_tune/performance_controls_data_serialization.html.md.erb
+++ b/geode-docs/managing/monitor_tune/performance_controls_data_serialization.html.md.erb
@@ -19,8 +19,8 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-In addition to standard Java serialization, Geode offers serialization options that give you higher performance and greater flexibility for data storage, transfers, and language types.
+In addition to standard Java serialization, <%=vars.product_name%> offers serialization options that give you higher performance and greater flexibility for data storage, transfers, and language types.
 
-Under *Developing with Apache Geode*, see [Data Serialization](../../developing/data_serialization/chapter_overview.html#data_serialization).
+Under *Developing with <%=vars.product_name_long%>*, see [Data Serialization](../../developing/data_serialization/chapter_overview.html#data_serialization).
 
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/performance_on_vsphere.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/performance_on_vsphere.html.md.erb b/geode-docs/managing/monitor_tune/performance_on_vsphere.html.md.erb
index ac6fa85..921a819 100644
--- a/geode-docs/managing/monitor_tune/performance_on_vsphere.html.md.erb
+++ b/geode-docs/managing/monitor_tune/performance_on_vsphere.html.md.erb
@@ -23,7 +23,7 @@ limitations under the License.
 
 Use the latest supported version of the guest OS, and use Java large paging.
 
--   **Use the latest supported version of the guest operating system**. This guideline is probably the most important. Upgrade the guest OS to a recent version supported by Geode. For example, for RHEL, use at least version 7.0 or for SLES, use at least 11.0. For Windows, use Windows Server 2012. For RedHat Linux users, it is particularly beneficial to use RHEL 7 since there are specific enhancements in the RHEL 7 release that improve virtualized latency sensitive workloads.
+-   **Use the latest supported version of the guest operating system**. This guideline is probably the most important. Upgrade the guest OS to a recent version supported by <%=vars.product_name%>. For example, for RHEL, use at least version 7.0 or for SLES, use at least 11.0. For Windows, use Windows Server 2012. For RedHat Linux users, it is particularly beneficial to use RHEL 7 since there are specific enhancements in the RHEL 7 release that improve virtualized latency sensitive workloads.
 -   **Use Java large paging in guest OS**. Configure Java on the guest OS to use large pages. Add the following command line option when launching Java:
 
     ``` pre
@@ -35,7 +35,7 @@ Use the latest supported version of the guest OS, and use Java large paging.
 This section provides VMware- recommended NUMA, CPU, and BIOS settings for your hardware and virtual machines.
 
 -   Always enable hyper-threading, and do not overcommit CPU.
--   For most production Apache Geode servers, always use virtual machines with at least two vCPUs .
+-   For most production <%=vars.product_name_long%> servers, always use virtual machines with at least two vCPUs .
 -   Apply non-uniform memory access (NUMA) locality by sizing virtual machines to fit within the NUMA node.
 -   VMware recommends the following BIOS settings:
     -   **BIOS Power Management Mode:** Maximum Performance.
@@ -82,45 +82,45 @@ These guidelines help you reduce latency.
 This topic discusses use limitations of vSphere vMotion, including the use of it with DRS.
 
 -   When you first commission the data management system, place VMware vSphere Distributed Resource Scheduler™ (DRS) in manual mode to prevent an automatic VMware vSphere vMotion® operation that can affect response times.
--   Reduce or eliminate the use of vMotion to migrate Geode virtual machines when they are under heavy load.
--   Do not allow vMotion migrations with Apache Geode locator processes, as the latency introduced to this process can cause other members of the Apache Geode servers to falsely suspect that other members are dead.
--   Use dedicated Apache Geode vSphere DRS clusters. This is especially important when you consider that the physical NIC and virtual NIC are specifically tuned to disable Interrupt Coalescing on every NIC of an ESXi host in the cluster. This type of tuning benefits Geode workloads, but it can hurt other non-Apache Geode workloads that are memory throughput-bound as opposed to latency sensitive as in the case of Apache Geode workloads.
--   If using a dedicated vSphere DRS cluster is not an option, and Apache Geode must run in a shared DRS cluster, make sure that DRS rules are set up not to perform vMotion migrations on Geode virtual machines.
--   If you must use vMotion for migration, VMware recommends that all vMotion migration activity of Apache Geode members occurs over 10GbE, during periods of low activity and scheduled maintenance windows.
+-   Reduce or eliminate the use of vMotion to migrate <%=vars.product_name%> virtual machines when they are under heavy load.
+-   Do not allow vMotion migrations with <%=vars.product_name_long%> locator processes, as the latency introduced to this process can cause other members of the <%=vars.product_name_long%> servers to falsely suspect that other members are dead.
+-   Use dedicated <%=vars.product_name_long%> vSphere DRS clusters. This is especially important when you consider that the physical NIC and virtual NIC are specifically tuned to disable Interrupt Coalescing on every NIC of an ESXi host in the cluster. This type of tuning benefits <%=vars.product_name%> workloads, but it can hurt other non-<%=vars.product_name_long%> workloads that are memory throughput-bound as opposed to latency sensitive as in the case of <%=vars.product_name_long%> workloads.
+-   If using a dedicated vSphere DRS cluster is not an option, and <%=vars.product_name_long%> must run in a shared DRS cluster, make sure that DRS rules are set up not to perform vMotion migrations on <%=vars.product_name%> virtual machines.
+-   If you must use vMotion for migration, VMware recommends that all vMotion migration activity of <%=vars.product_name_long%> members occurs over 10GbE, during periods of low activity and scheduled maintenance windows.
 
 ## <a id="topic_E53BBF3D09A54953B02DCE2BD00D51E0" class="no-quick-link"></a>Placement and Organization of Virtual Machines
 
 This section provides guidelines on JVM instances and placement of redundant copies of cached data.
 
 -   Have one JVM instance per virtual machine.
--   Increasing the heap space to service the demand for more data is better than installing a second instance of a JVM on a single virtual machine. If increasing the JVM heap size is not an option, consider placing the second JVM on a separate newly created virtual machine, thus promoting more effective horizontal scalability. As you increase the number of Apache Geode servers, also increase the number of virtual machines to maintain a 1:1:1 ratio among the Apache Geode server, the JVM, and the virtual machines.
--   Size for a minimum of four vCPU virtual machines with one Apache Geode server running in one JVM instance. This allows ample CPU cycles for the garbage collector, and the rest for user transactions.
--   Because Apache Geode can place redundant copies of cached data on any virtual machine, it is possible to inadvertently place two redundant data copies on the same ESX/ESXi host. This is not optimal if a host fails. To create a more robust configuration, use VM1-to-VM2 anti-affinity rules, to indicate to vSphere that VM1 and VM2 can never be placed on the same host because they hold redundant data copies.
+-   Increasing the heap space to service the demand for more data is better than installing a second instance of a JVM on a single virtual machine. If increasing the JVM heap size is not an option, consider placing the second JVM on a separate newly created virtual machine, thus promoting more effective horizontal scalability. As you increase the number of <%=vars.product_name_long%> servers, also increase the number of virtual machines to maintain a 1:1:1 ratio among the <%=vars.product_name_long%> server, the JVM, and the virtual machines.
+-   Size for a minimum of four vCPU virtual machines with one <%=vars.product_name_long%> server running in one JVM instance. This allows ample CPU cycles for the garbage collector, and the rest for user transactions.
+-   Because <%=vars.product_name_long%> can place redundant copies of cached data on any virtual machine, it is possible to inadvertently place two redundant data copies on the same ESX/ESXi host. This is not optimal if a host fails. To create a more robust configuration, use VM1-to-VM2 anti-affinity rules, to indicate to vSphere that VM1 and VM2 can never be placed on the same host because they hold redundant data copies.
 
 ## <a id="topic_567308E9DE07406BB5BF420BE77B6558" class="no-quick-link"></a>Virtual Machine Memory Reservation
 
 This section provides guidelines for sizing and setting memory.
 
 -   Set memory reservation at the virtual machine level so that ESXi provides and locks down the needed physical memory upon virtual machine startup. Once allocated, ESXi does not allow the memory to be taken away.
--   Do not overcommit memory for Geode hosts.
--   When sizing memory for a Geode server within one JVM on one virtual machine, the total reserved memory for the virtual machine should not exceed what is available within one NUMA node for optimal performance.
+-   Do not overcommit memory for <%=vars.product_name%> hosts.
+-   When sizing memory for a <%=vars.product_name%> server within one JVM on one virtual machine, the total reserved memory for the virtual machine should not exceed what is available within one NUMA node for optimal performance.
 
-## <a id="topic_424B940584044CF6A685E86802548A27" class="no-quick-link"></a>vSphere High Availability and Apache Geode
+## <a id="topic_424B940584044CF6A685E86802548A27" class="no-quick-link"></a>vSphere High Availability and <%=vars.product_name_long%>
 
-On Apache Geode virtual machines, disable vSphere High Availability (HA).
+On <%=vars.product_name_long%> virtual machines, disable vSphere High Availability (HA).
 
-If you are using a dedicated Apache Geode DRS cluster, then you can disable HA across the cluster. However, if you are using a shared cluster, exclude Geode virtual machines from vSphere HA.
+If you are using a dedicated <%=vars.product_name_long%> DRS cluster, then you can disable HA across the cluster. However, if you are using a shared cluster, exclude <%=vars.product_name%> virtual machines from vSphere HA.
 
-Additionally, to support high availability, you can also set up anti-affinity rules between the Apache Geode virtual machines to prevent two Apache Geode servers from running on the same ESXi host within the same DRS cluster.
+Additionally, to support high availability, you can also set up anti-affinity rules between the <%=vars.product_name_long%> virtual machines to prevent two <%=vars.product_name_long%> servers from running on the same ESXi host within the same DRS cluster.
 
 ## <a id="topic_913B15841C4249A68697F3D91281A645" class="no-quick-link"></a>Storage Guidelines
 
 This section provides storage guidelines for persistence files, binaries, logs, and more.
 
--   Use the PVSCSI driver for I/O intensive Apache Geode workloads.
+-   Use the PVSCSI driver for I/O intensive <%=vars.product_name_long%> workloads.
 -   Align disk partitions at the VMFS and guest operating system levels.
--   Provision VMDK files as eagerzeroedthick to avoid lazy zeroing for Apache Geode members.
--   Use separate VMDKs for Apache Geode persistence files, binaries, and logs.
+-   Provision VMDK files as eagerzeroedthick to avoid lazy zeroing for <%=vars.product_name_long%> members.
+-   Use separate VMDKs for <%=vars.product_name_long%> persistence files, binaries, and logs.
 -   Map a dedicated LUN to each VMDK.
 -   For Linux virtual machines, use NOOP scheduling as the I/O scheduler instead of Completely Fair Queuing (CFQ). Starting with the Linux kernel 2.6, CFQ is the default I/O scheduler in many Linux distributions. See [http://kb.vmware.com/kb/2011861](http://kb.vmware.com/kb/2011861) for more information.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/slow_messages.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/slow_messages.html.md.erb b/geode-docs/managing/monitor_tune/slow_messages.html.md.erb
index 7150802..20e8676 100644
--- a/geode-docs/managing/monitor_tune/slow_messages.html.md.erb
+++ b/geode-docs/managing/monitor_tune/slow_messages.html.md.erb
@@ -30,7 +30,7 @@ The main reasons why a large number of `distributed-no-ack` messages may delay `
 
 You can take these steps to reduce the impact of this problem:
 
-1.  If you’re using TCP, check whether you have socket conservation enabled for your members. It is configured by setting the Geode property `conserve-sockets` to true. If enabled, each application’s threads will share sockets unless you override the setting at the thread level. Work with your application programmers to see whether you might disable sharing entirely or at least for the threads that perform `distributed-ack` operations. These include operations on `distributed-ack` regions and also `netSearches` performed on regions of any distributed scope. (Note: `netSearch` is only performed on regions with a data-policy of empty, normal and preloaded.) If you give each thread that performs `distributed-ack` operations its own socket, you effectively let it scoot to the front of the line ahead of the `distributed-no-ack` operations that are being performed by other threads. The thread-level override is done by calling the `DistributedSystem.setThreadsSocketPolicy(false)` metho
 d.
+1.  If you’re using TCP, check whether you have socket conservation enabled for your members. It is configured by setting the <%=vars.product_name%> property `conserve-sockets` to true. If enabled, each application’s threads will share sockets unless you override the setting at the thread level. Work with your application programmers to see whether you might disable sharing entirely or at least for the threads that perform `distributed-ack` operations. These include operations on `distributed-ack` regions and also `netSearches` performed on regions of any distributed scope. (Note: `netSearch` is only performed on regions with a data-policy of empty, normal and preloaded.) If you give each thread that performs `distributed-ack` operations its own socket, you effectively let it scoot to the front of the line ahead of the `distributed-no-ack` operations that are being performed by other threads. The thread-level override is done by calling the `DistributedSystem.setThreadsSocketPol
 icy(false)` method.
 2.  Reduce your buffer sizes to slow down the distributed-no-ack operations. These changes slow down the threads performing distributed-no-ack operations and allow the thread doing the distributed-ack operations to be sent in a more timely manner.
     -   If you're using UDP (you either have multicast enabled regions or have set `disable-tcp` to true in gemfire.properties), consider reducing the byteAllowance of mcast-flow-control to something smaller than the default of 3.5 megabytes.
     -   If you're using TCP/IP, reduce the `socket-buffer-size` in gemfire.properties.

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/slow_receivers.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/slow_receivers.html.md.erb b/geode-docs/managing/monitor_tune/slow_receivers.html.md.erb
index 5e4aebe..88e0ded 100644
--- a/geode-docs/managing/monitor_tune/slow_receivers.html.md.erb
+++ b/geode-docs/managing/monitor_tune/slow_receivers.html.md.erb
@@ -21,13 +21,13 @@ limitations under the License.
 
 You have several options for preventing situations that can cause slow receivers of data distributions. The slow receiver options control only peer-to-peer communication using TCP/IP. This discussion does not apply to client/server or multi-site communication, or to communication using the UDP unicast or multicast protocols.
 
-Before you begin, you should understand Geode [Basic Configuration and Programming](../../basic_config/book_intro.html).
+Before you begin, you should understand <%=vars.product_name%> [Basic Configuration and Programming](../../basic_config/book_intro.html).
 
--   **[Preventing Slow Receivers](../../managing/monitor_tune/slow_receivers_preventing_problems.html)**
+-   **[Preventing Slow Receivers](slow_receivers_preventing_problems.html)**
 
     During system integration, you can identify and eliminate potential causes of slow receivers in peer-to-peer communication.
 
--   **[Managing Slow Receivers](../../managing/monitor_tune/slow_receivers_managing.html)**
+-   **[Managing Slow Receivers](slow_receivers_managing.html)**
 
     If the receiver fails to receive a message, the sender continues to attempt to deliver the message as long as the receiving member is still in the distributed system.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/slow_receivers_managing.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/slow_receivers_managing.html.md.erb b/geode-docs/managing/monitor_tune/slow_receivers_managing.html.md.erb
index 49e93c4..de3bcfa 100644
--- a/geode-docs/managing/monitor_tune/slow_receivers_managing.html.md.erb
+++ b/geode-docs/managing/monitor_tune/slow_receivers_managing.html.md.erb
@@ -21,7 +21,7 @@ limitations under the License.
 
 If the receiver fails to receive a message, the sender continues to attempt to deliver the message as long as the receiving member is still in the distributed system.
 
-During the retry cycle, Geode throws warnings that include this string:
+During the retry cycle, <%=vars.product_name%> throws warnings that include this string:
 
 ``` pre
 will reattempt
@@ -69,7 +69,7 @@ When a process disconnects after receiving a request to do so by a producer, it
 
 These messages only appear in your logs if logging is enabled and the log level is set to a level that includes warning (which it does by default). See [Logging](../logging/logging.html#concept_30DB86B12B454E168B80BB5A71268865).
 
-If your consumer is unable to receive even high priority messages, only the producer’s warnings will appear in the logs. If you see only producer warnings, you can restart the consumer process. Otherwise, the Geode failure detection code will eventually cause the member to leave the distributed system on its own.
+If your consumer is unable to receive even high priority messages, only the producer’s warnings will appear in the logs. If you see only producer warnings, you can restart the consumer process. Otherwise, the <%=vars.product_name%> failure detection code will eventually cause the member to leave the distributed system on its own.
 
 **Use Cases**
 
@@ -83,7 +83,7 @@ These are the main use cases for the slow receiver specifications:
 
 When using a distribution scope other than distributed-no-ack, alerts are issued for slow receivers. A member that isn’t responding to messages may be sick, slow, or missing. Sick or slow members are detected in message transmission and reply-wait processing code, triggering a warning alert first. If a member still isn’t responding, a severe warning alert is issued, indicating that the member may be disconnected from the distributed system. This alert sequence is enabled by setting the ack-wait-threshold and the ack-severe-alert-threshold to some number of seconds.
 
-When ack-severe-alert-threshold is set, regions are configured to use ether distributed-ack or global scope, or use the partition data policy. Geode will wait for a total of ack-wait-threshold seconds for a response to a cache operation, then it logs a warning alert ("Membership: requesting removal of entry(\#). Disconnected as a slow-receiver"). After waiting an additional ack-severe-alert-threshold seconds after the first threshold is reached, the system also informs the failure detection mechanism that the receiver is suspect and may be disconnected, as shown in the following figure.
+When ack-severe-alert-threshold is set, regions are configured to use ether distributed-ack or global scope, or use the partition data policy. <%=vars.product_name%> will wait for a total of ack-wait-threshold seconds for a response to a cache operation, then it logs a warning alert ("Membership: requesting removal of entry(\#). Disconnected as a slow-receiver"). After waiting an additional ack-severe-alert-threshold seconds after the first threshold is reached, the system also informs the failure detection mechanism that the receiver is suspect and may be disconnected, as shown in the following figure.
 
 <img src="../../images_svg/member_severe_alert.svg" id="slow_recv__image_BA474143B16744F28DE0AB1CAD00FB48" class="image" />
 The events occur in this order:

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/slow_receivers_preventing_problems.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/slow_receivers_preventing_problems.html.md.erb b/geode-docs/managing/monitor_tune/slow_receivers_preventing_problems.html.md.erb
index ec0c199..b7f8b80 100644
--- a/geode-docs/managing/monitor_tune/slow_receivers_preventing_problems.html.md.erb
+++ b/geode-docs/managing/monitor_tune/slow_receivers_preventing_problems.html.md.erb
@@ -27,19 +27,19 @@ Slowing is more likely to occur when applications run many threads, send large m
 
 **Host Resources**
 
-Make sure that the machines that run Geode members have enough CPU available to them. Do not run any other heavyweight processes on the same machine.
+Make sure that the machines that run <%=vars.product_name%> members have enough CPU available to them. Do not run any other heavyweight processes on the same machine.
 
-The machines that host Geode application and cache server processes should have comparable computing power and memory capacity. Otherwise, members on the less powerful machines tend to have trouble keeping up with the rest of the group.
+The machines that host <%=vars.product_name%> application and cache server processes should have comparable computing power and memory capacity. Otherwise, members on the less powerful machines tend to have trouble keeping up with the rest of the group.
 
 **Network Capacity**
 
-Eliminate congested areas on the network by rebalancing the traffic load. Work with your network administrator to identify and eliminate traffic bottlenecks, whether caused by the architecture of the distributed Geode system or by contention between the Geode traffic and other traffic on your network. Consider whether more subnets are needed to separate the Geode administrative traffic from Geode data transport and to separate all the Geode traffic from the rest of your network load.
+Eliminate congested areas on the network by rebalancing the traffic load. Work with your network administrator to identify and eliminate traffic bottlenecks, whether caused by the architecture of the distributed <%=vars.product_name%> system or by contention between the <%=vars.product_name%> traffic and other traffic on your network. Consider whether more subnets are needed to separate the <%=vars.product_name%> administrative traffic from <%=vars.product_name%> data transport and to separate all the <%=vars.product_name%> traffic from the rest of your network load.
 
 The network connections between hosts need to have equal bandwidth. If not, you can end up with a configuration like the multicast example in the following figure, which creates conflicts among the members. For example, if app1 sends out data at 7Mbps, app3 and app4 would be fine, but app2 would miss some data. In that case, app2 contacts app1 on the TCP channel and sends a log message that it’s dropping data.
 <img src="../../images_svg/unbalanced_network_capacity_probs.svg" id="slow_recv__image_F8C424AB97C444298993294000676150" class="image" />
 
 **Plan for Growth**
 
-Upgrade the infrastructure to the level required for acceptable performance. Analyze the expected Geode traffic in comparison to the network’s capacity. Build in extra capacity for growth and high-traffic spikes. Similarly, evaluate whether the machines that host Geode application and cache server processes can handle the expected load.
+Upgrade the infrastructure to the level required for acceptable performance. Analyze the expected <%=vars.product_name%> traffic in comparison to the network’s capacity. Build in extra capacity for growth and high-traffic spikes. Similarly, evaluate whether the machines that host <%=vars.product_name%> application and cache server processes can handle the expected load.
 
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/socket_communication.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/socket_communication.html.md.erb b/geode-docs/managing/monitor_tune/socket_communication.html.md.erb
index a97986a..2da40f9 100644
--- a/geode-docs/managing/monitor_tune/socket_communication.html.md.erb
+++ b/geode-docs/managing/monitor_tune/socket_communication.html.md.erb
@@ -19,29 +19,29 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Geode processes communicate using TCP/IP and UDP unicast and multicast protocols. In all cases, communication uses sockets that you can tune to optimize performance.
+<%=vars.product_name%> processes communicate using TCP/IP and UDP unicast and multicast protocols. In all cases, communication uses sockets that you can tune to optimize performance.
 
-The adjustments you make to tune your Geode communication may run up against operating system limits. If this happens, check with your system administrator about adjusting the operating system settings.
+The adjustments you make to tune your <%=vars.product_name%> communication may run up against operating system limits. If this happens, check with your system administrator about adjusting the operating system settings.
 
-All of the settings discussed here are listed as `gemfire.properties` and `cache.xml` settings. They can also be configured through the API and some can be configured at the command line. Before you begin, you should understand Geode [Basic Configuration and Programming](../../basic_config/book_intro.html).
+All of the settings discussed here are listed as `gemfire.properties` and `cache.xml` settings. They can also be configured through the API and some can be configured at the command line. Before you begin, you should understand <%=vars.product_name%> [Basic Configuration and Programming](../../basic_config/book_intro.html).
 
--   **[Setting Socket Buffer Sizes](../../managing/monitor_tune/socket_communication_setting_socket_buffer_sizes.html)**
+-   **[Setting Socket Buffer Sizes](socket_communication_setting_socket_buffer_sizes.html)**
 
     When you determine buffer size settings, you try to strike a balance between communication needs and other processing.
 
--   **[Ephemeral TCP Port Limits](../../managing/monitor_tune/socket_communication_ephemeral_tcp_port_limits.html)**
+-   **[Ephemeral TCP Port Limits](socket_communication_ephemeral_tcp_port_limits.html)**
 
     By default, Windows’ ephemeral ports are within the range 1024-4999, inclusive.You can increase the range.
 
--   **[Making Sure You Have Enough Sockets](../../managing/monitor_tune/socket_communication_have_enough_sockets.html)**
+-   **[Making Sure You Have Enough Sockets](socket_communication_have_enough_sockets.html)**
 
     The number of sockets available to your applications is governed by operating system limits.
 
--   **[TCP/IP KeepAlive Configuration](../../managing/monitor_tune/socket_tcp_keepalive.html)**
+-   **[TCP/IP KeepAlive Configuration](socket_tcp_keepalive.html)**
 
-    Geode supports TCP KeepAlive to prevent socket connections from being timed out.
+    <%=vars.product_name%> supports TCP KeepAlive to prevent socket connections from being timed out.
 
--   **[TCP/IP Peer-to-Peer Handshake Timeouts](../../managing/monitor_tune/socket_communication_tcpip_p2p_handshake_timeouts.html)**
+-   **[TCP/IP Peer-to-Peer Handshake Timeouts](socket_communication_tcpip_p2p_handshake_timeouts.html)**
 
     You can alleviate connection handshake timeouts for TCP/IP connections by increasing the connection handshake timeout interval with the system property p2p.handshakeTimeoutMs.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/socket_communication_have_enough_sockets.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/socket_communication_have_enough_sockets.html.md.erb b/geode-docs/managing/monitor_tune/socket_communication_have_enough_sockets.html.md.erb
index 839e9be..abadaa8 100644
--- a/geode-docs/managing/monitor_tune/socket_communication_have_enough_sockets.html.md.erb
+++ b/geode-docs/managing/monitor_tune/socket_communication_have_enough_sockets.html.md.erb
@@ -21,7 +21,7 @@ limitations under the License.
 
 The number of sockets available to your applications is governed by operating system limits.
 
-Sockets use file descriptors and the operating system’s view of your application’s socket use is expressed in terms of file descriptors. There are two limits, one on the maximum descriptors available to a single application and the other on the total number of descriptors available in the system. If you get error messages telling you that you have too many files open, you might be hitting the operating system limits with your use of sockets. Your system administrator might be able to increase the system limits so that you have more available. You can also tune your members to use fewer sockets for their outgoing connections. This section discusses socket use in Geode and ways to limit socket consumption in your Geode members.
+Sockets use file descriptors and the operating system’s view of your application’s socket use is expressed in terms of file descriptors. There are two limits, one on the maximum descriptors available to a single application and the other on the total number of descriptors available in the system. If you get error messages telling you that you have too many files open, you might be hitting the operating system limits with your use of sockets. Your system administrator might be able to increase the system limits so that you have more available. You can also tune your members to use fewer sockets for their outgoing connections. This section discusses socket use in <%=vars.product_name%> and ways to limit socket consumption in your <%=vars.product_name%> members.
 
 ## <a id="socket_comm__section_31B4EFAD6F384AB1BEBCF148D3DEA514" class="no-quick-link"></a>Socket Sharing
 
@@ -158,7 +158,7 @@ In this table, M is the total number of members in the distributed system.
 </tbody>
 </table>
 
-With client/server installations, the number of client connections to any single server is undetermined, but Geode’s server load balancing and conditioning keeps the connections fairly evenly distributed among servers.
+With client/server installations, the number of client connections to any single server is undetermined, but <%=vars.product_name%>’s server load balancing and conditioning keeps the connections fairly evenly distributed among servers.
 
 Servers are peers in their own distributed system and have the additional socket requirements as noted in the Peer-to-Peer section above.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/socket_communication_setting_socket_buffer_sizes.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/socket_communication_setting_socket_buffer_sizes.html.md.erb b/geode-docs/managing/monitor_tune/socket_communication_setting_socket_buffer_sizes.html.md.erb
index 665b98b..c029d92 100644
--- a/geode-docs/managing/monitor_tune/socket_communication_setting_socket_buffer_sizes.html.md.erb
+++ b/geode-docs/managing/monitor_tune/socket_communication_setting_socket_buffer_sizes.html.md.erb
@@ -99,7 +99,7 @@ This table lists the settings for the various member relationships and protocols
 
 **TCP/IP Buffer Sizes**
 
-If possible, your TCP/IP buffer size settings should match across your Geode installation. At a minimum, follow the guidelines listed here.
+If possible, your TCP/IP buffer size settings should match across your <%=vars.product_name%> installation. At a minimum, follow the guidelines listed here.
 
 -   **Peer-to-peer**. The socket-buffer-size setting in `gemfire.properties` should be the same throughout your distributed system.
 -   **Client/server**. The client’s pool socket-buffer size-should match the setting for the servers the pool uses, as in these example `cache.xml` snippets:

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/socket_tcp_keepalive.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/socket_tcp_keepalive.html.md.erb b/geode-docs/managing/monitor_tune/socket_tcp_keepalive.html.md.erb
index f5512bf..f600d22 100644
--- a/geode-docs/managing/monitor_tune/socket_tcp_keepalive.html.md.erb
+++ b/geode-docs/managing/monitor_tune/socket_tcp_keepalive.html.md.erb
@@ -19,9 +19,9 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Geode supports TCP KeepAlive to prevent socket connections from being timed out.
+<%=vars.product_name%> supports TCP KeepAlive to prevent socket connections from being timed out.
 
-The `gemfire.enableTcpKeepAlive` system property prevents connections that appear idle from being timed out (for example, by a firewall.) When configured to true, Geode enables the SO\_KEEPALIVE option for individual sockets. This operating system-level setting allows the socket to send verification checks (ACK requests) to remote systems in order to determine whether or not to keep the socket connection alive.
+The `gemfire.enableTcpKeepAlive` system property prevents connections that appear idle from being timed out (for example, by a firewall.) When configured to true, <%=vars.product_name%> enables the SO\_KEEPALIVE option for individual sockets. This operating system-level setting allows the socket to send verification checks (ACK requests) to remote systems in order to determine whether or not to keep the socket connection alive.
 
 **Note:**
 The time intervals for sending the first ACK KeepAlive request, the subsequent ACK requests and the number of requests to send before closing the socket is configured on the operating system level.

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/sockets_and_gateways.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/sockets_and_gateways.html.md.erb b/geode-docs/managing/monitor_tune/sockets_and_gateways.html.md.erb
index 1a3c543..b468bc8 100644
--- a/geode-docs/managing/monitor_tune/sockets_and_gateways.html.md.erb
+++ b/geode-docs/managing/monitor_tune/sockets_and_gateways.html.md.erb
@@ -78,7 +78,7 @@ If possible, your TCP/IP buffer size settings should match across your installat
     ```
 
 **Note:**
-WAN deployments increase the messaging demands on a Geode system. To avoid hangs related to WAN messaging, always set `conserve-sockets=false` for GemFire members that participate in a WAN deployment.
+WAN deployments increase the messaging demands on a <%=vars.product_name%> system. To avoid hangs related to WAN messaging, always set `conserve-sockets=false` for GemFire members that participate in a WAN deployment.
 
 ## <a id="socket_comm__section_4A7C60D4471A4339884AA5AAC97B4DAA" class="no-quick-link"></a>Multi-site (WAN) Socket Requirements
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/system_member_performance.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/system_member_performance.html.md.erb b/geode-docs/managing/monitor_tune/system_member_performance.html.md.erb
index 49b9f62..d5ed52f 100644
--- a/geode-docs/managing/monitor_tune/system_member_performance.html.md.erb
+++ b/geode-docs/managing/monitor_tune/system_member_performance.html.md.erb
@@ -23,19 +23,19 @@ You can modify some configuration parameters to improve system member performanc
 
 Before doing so, you should understand [Basic Configuration and Programming](../../basic_config/book_intro.html).
 
--   **[Distributed System Member Properties](../../managing/monitor_tune/system_member_performance_distributed_system_member.html)**
+-   **[Distributed System Member Properties](system_member_performance_distributed_system_member.html)**
 
     Several performance-related properties apply to a cache server or application that connects to the distributed system.
 
--   **[JVM Memory Settings and System Performance](../../managing/monitor_tune/system_member_performance_jvm_mem_settings.html)**
+-   **[JVM Memory Settings and System Performance](system_member_performance_jvm_mem_settings.html)**
 
     You configure JVM memory settings for the Java application by adding parameters to the java invocation. For the cache server, you add them to the command-line parameters for the gfsh `start server` command.
 
--   **[Garbage Collection and System Performance](../../managing/monitor_tune/system_member_performance_garbage.html)**
+-   **[Garbage Collection and System Performance](system_member_performance_garbage.html)**
 
     If your application exhibits unacceptably high latencies, you might improve performance by modifying your JVM’s garbage collection behavior.
 
--   **[Connection Thread Settings and Performance](../../managing/monitor_tune/system_member_performance_connection_thread_settings.html)**
+-   **[Connection Thread Settings and Performance](system_member_performance_connection_thread_settings.html)**
 
     When many peer processes are started concurrently, you can improve the distributed system connect time can by setting the p2p.HANDSHAKE\_POOL\_SIZE system property value to the expected number of members.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/system_member_performance_jvm_mem_settings.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/system_member_performance_jvm_mem_settings.html.md.erb b/geode-docs/managing/monitor_tune/system_member_performance_jvm_mem_settings.html.md.erb
index 4440b25..41ebaeb 100644
--- a/geode-docs/managing/monitor_tune/system_member_performance_jvm_mem_settings.html.md.erb
+++ b/geode-docs/managing/monitor_tune/system_member_performance_jvm_mem_settings.html.md.erb
@@ -45,7 +45,7 @@ You configure JVM memory settings for the Java application by adding parameters
     gfsh>start server --name=server-name --J=-XX:MaxDirectMemorySize=256M
     ```
 
--   JVM stack size—Each thread in a Java application has its own stack. The stack is used to hold return addresses, arguments to functions and method calls, and so on. Since Geode is a highly multi-threaded system, at any given point in time there are multiple thread pools and threads that are in use. The default stack size setting for a thread in Java is 1MB. Stack size has to be allocated in contiguous blocks and if the machine is being used actively and there are many threads running in the system (Task Manager shows the number of active threads), you may encounter an `OutOfMemory error: unable to create new native thread`, even though your process has enough available heap. If this happens, consider reducing the stack size requirement for threads on the cache server. The following parameter added to the Java application startup limits the maximum size of the stack.
+-   JVM stack size—Each thread in a Java application has its own stack. The stack is used to hold return addresses, arguments to functions and method calls, and so on. Since <%=vars.product_name%> is a highly multi-threaded system, at any given point in time there are multiple thread pools and threads that are in use. The default stack size setting for a thread in Java is 1MB. Stack size has to be allocated in contiguous blocks and if the machine is being used actively and there are many threads running in the system (Task Manager shows the number of active threads), you may encounter an `OutOfMemory error: unable to create new native thread`, even though your process has enough available heap. If this happens, consider reducing the stack size requirement for threads on the cache server. The following parameter added to the Java application startup limits the maximum size of the stack.
 
     ``` pre
     -Xss384k

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/monitor_tune/udp_communication.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/monitor_tune/udp_communication.html.md.erb b/geode-docs/managing/monitor_tune/udp_communication.html.md.erb
index a42aa25..9b5b913 100644
--- a/geode-docs/managing/monitor_tune/udp_communication.html.md.erb
+++ b/geode-docs/managing/monitor_tune/udp_communication.html.md.erb
@@ -21,20 +21,20 @@ limitations under the License.
 
 You can make configuration adjustments to improve multicast and unicast UDP performance of peer-to-peer communication.
 
-You can tune your Geode UDP messaging to maximize throughput. There are two main tuning goals: to use the largest reasonable datagram packet sizes and to reduce retransmission rates. These actions reduce messaging overhead and overall traffic on your network while still getting your data where it needs to go. Geode also provides statistics to help you decide when to change your UDP messaging settings.
+You can tune your <%=vars.product_name%> UDP messaging to maximize throughput. There are two main tuning goals: to use the largest reasonable datagram packet sizes and to reduce retransmission rates. These actions reduce messaging overhead and overall traffic on your network while still getting your data where it needs to go. <%=vars.product_name%> also provides statistics to help you decide when to change your UDP messaging settings.
 
-Before you begin, you should understand Geode [Basic Configuration and Programming](../../basic_config/book_intro.html). See also the general communication tuning and multicast-specific tuning covered in [Socket Communication](socket_communication.html) and [Multicast Communication](multicast_communication.html#multicast).
+Before you begin, you should understand <%=vars.product_name%> [Basic Configuration and Programming](../../basic_config/book_intro.html). See also the general communication tuning and multicast-specific tuning covered in [Socket Communication](socket_communication.html) and [Multicast Communication](multicast_communication.html#multicast).
 
 ## <a id="udp_comm__section_4089ACC33AF34FA888BAE3CA3602A730" class="no-quick-link"></a>UDP Datagram Size
 
-You can change the UDP datagram size with the Geode property `udp-fragment-size`. This is the maximum packet size for transmission over UDP unicast or multicast sockets. When possible, smaller messages are combined into batches up to the size of this setting.
+You can change the UDP datagram size with the <%=vars.product_name%> property `udp-fragment-size`. This is the maximum packet size for transmission over UDP unicast or multicast sockets. When possible, smaller messages are combined into batches up to the size of this setting.
 
 Most operating systems set a maximum transmission size of 64k for UDP datagrams, so this setting should be kept under 60k to allow for communication headers. Setting the fragment size too high can result in extra network traffic if your network is subject to packet loss, as more data must be resent for each retransmission. If many UDP retransmissions appear in DistributionStats, you maybe achieve better throughput by lowering the fragment size.
 
 ## <a id="udp_comm__section_B9882A4EBA004599B2207B9CB1D3ADC9" class="no-quick-link"></a>UDP Flow Control
 
 UDP protocols typically have a flow-control protocol built into them to keep processes from being
-overrun by incoming no-ack messages. The Geode UDP flow-control protocol is a credit based system in
+overrun by incoming no-ack messages. The <%=vars.product_name%> UDP flow-control protocol is a credit based system in
 which the sender has a maximum number of bytes it can send before getting its byte credit count
 replenished, or recharged, by its receivers. While its byte credits are too low, the sender
 waits. The receivers do their best to anticipate the sender’s recharge requirements and provide
@@ -42,7 +42,7 @@ recharges before they are needed. If the sender's credits run too low, it explic
 recharge from its receivers.
 
 This flow-control protocol, which is used for all multicast and unicast no-ack messaging, is
-configured using a three-part Geode property `mcast-flow-control`. This property is composed of:
+configured using a three-part <%=vars.product_name%> property `mcast-flow-control`. This property is composed of:
 
 -   `byteAllowance`—Determines how many bytes (also referred to as credits) can be sent before receiving a recharge from the receiving processes.
 -   `rechargeThreshold`—Sets a lower limit on the ratio of the sender’s remaining credit to its `byteAllowance`. When the ratio goes below this limit, the receiver automatically sends a recharge. This reduces recharge request messaging from the sender and helps keep the sender from blocking while waiting for recharges.
@@ -52,7 +52,7 @@ In a well-tuned system, where consumers of cache events are keeping up with prod
 
 ## <a id="udp_comm__section_FB1F54A41D2643A29DB416D309ED4C56" class="no-quick-link"></a>UDP Retransmission Statistics
 
-Geode stores retransmission statistics for its senders and receivers. You can use these statistics to help determine whether your flow control and fragment size settings are appropriate for your system.
+<%=vars.product_name%> stores retransmission statistics for its senders and receivers. You can use these statistics to help determine whether your flow control and fragment size settings are appropriate for your system.
 
 The retransmission rates are stored in the DistributionStats `ucastRetransmits` and
 `mcastRetransmits`. For multicast, there is also a receiver-side statistic `mcastRetransmitRequests`


[03/25] geode git commit: GEODE-3406: Address PR feedback

Posted by ud...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/PutRequestOperationHandlerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/PutRequestOperationHandlerJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/PutRequestOperationHandlerJUnitTest.java
index 64d9f67..d36ad41 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/PutRequestOperationHandlerJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/PutRequestOperationHandlerJUnitTest.java
@@ -16,7 +16,7 @@ package org.apache.geode.protocol.protobuf.operations;
 
 import com.google.protobuf.ByteString;
 import org.apache.geode.cache.Region;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.protobuf.BasicTypes;
 import org.apache.geode.protocol.protobuf.Failure;
@@ -68,7 +68,7 @@ public class PutRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
       CodecAlreadyRegisteredForTypeException, InvalidExecutionContextException {
     PutRequestOperationHandler operationHandler = new PutRequestOperationHandler();
     Result<RegionAPI.PutResponse> result = operationHandler.process(serializationServiceStub,
-        generateTestRequest(), new ExecutionContext(cacheStub));
+        generateTestRequest(), new MessageExecutionContext(cacheStub));
 
     assertTrue(result instanceof Success);
 
@@ -99,7 +99,7 @@ public class PutRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
     RegionAPI.PutRequest putRequest =
         ProtobufRequestUtilities.createPutRequest(TEST_REGION, testEntry).getPutRequest();
     Result<RegionAPI.PutResponse> result = operationHandler.process(serializationServiceStub,
-        putRequest, new ExecutionContext(cacheStub));
+        putRequest, new MessageExecutionContext(cacheStub));
 
     assertTrue(result instanceof Failure);
     assertEquals(ProtocolErrorCode.VALUE_ENCODING_ERROR.codeValue,
@@ -113,7 +113,7 @@ public class PutRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
     when(cacheStub.getRegion(TEST_REGION)).thenReturn(null);
     PutRequestOperationHandler operationHandler = new PutRequestOperationHandler();
     Result<RegionAPI.PutResponse> result = operationHandler.process(serializationServiceStub,
-        generateTestRequest(), new ExecutionContext(cacheStub));
+        generateTestRequest(), new MessageExecutionContext(cacheStub));
 
     assertTrue(result instanceof Failure);
     assertEquals(ProtocolErrorCode.REGION_NOT_FOUND.codeValue,
@@ -128,7 +128,7 @@ public class PutRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
 
     PutRequestOperationHandler operationHandler = new PutRequestOperationHandler();
     Result<RegionAPI.PutResponse> result = operationHandler.process(serializationServiceStub,
-        generateTestRequest(), new ExecutionContext(cacheStub));
+        generateTestRequest(), new MessageExecutionContext(cacheStub));
 
     assertTrue(result instanceof Failure);
     assertEquals(ProtocolErrorCode.CONSTRAINT_VIOLATION.codeValue,

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandlerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandlerJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandlerJUnitTest.java
index 47d6231..4350ece 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandlerJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandlerJUnitTest.java
@@ -16,7 +16,7 @@ package org.apache.geode.protocol.protobuf.operations;
 
 import com.google.protobuf.ByteString;
 import org.apache.geode.cache.Region;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.protobuf.BasicTypes;
 import org.apache.geode.protocol.protobuf.ClientProtocol;
@@ -76,7 +76,7 @@ public class RemoveRequestOperationHandlerJUnitTest extends OperationHandlerJUni
       CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     RegionAPI.RemoveRequest removeRequest = generateTestRequest(false, false).getRemoveRequest();
     Result<RegionAPI.RemoveResponse> result = operationHandler.process(serializationServiceStub,
-        removeRequest, new ExecutionContext(cacheStub));
+        removeRequest, new MessageExecutionContext(cacheStub));
 
     assertTrue(result instanceof Success);
     verify(regionStub).remove(TEST_KEY);
@@ -88,7 +88,7 @@ public class RemoveRequestOperationHandlerJUnitTest extends OperationHandlerJUni
       CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     RegionAPI.RemoveRequest removeRequest = generateTestRequest(true, false).getRemoveRequest();
     Result<RegionAPI.RemoveResponse> result = operationHandler.process(serializationServiceStub,
-        removeRequest, new ExecutionContext(cacheStub));
+        removeRequest, new MessageExecutionContext(cacheStub));
 
     assertTrue(result instanceof Failure);
     assertEquals(ProtocolErrorCode.REGION_NOT_FOUND.codeValue,
@@ -101,7 +101,7 @@ public class RemoveRequestOperationHandlerJUnitTest extends OperationHandlerJUni
       CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     RegionAPI.RemoveRequest removeRequest = generateTestRequest(false, true).getRemoveRequest();
     Result<RegionAPI.RemoveResponse> result = operationHandler.process(serializationServiceStub,
-        removeRequest, new ExecutionContext(cacheStub));
+        removeRequest, new MessageExecutionContext(cacheStub));
 
     assertTrue(result instanceof Success);
   }
@@ -124,7 +124,7 @@ public class RemoveRequestOperationHandlerJUnitTest extends OperationHandlerJUni
     RegionAPI.RemoveRequest removeRequest =
         ProtobufRequestUtilities.createRemoveRequest(TEST_REGION, encodedKey).getRemoveRequest();;
     Result<RegionAPI.RemoveResponse> result = operationHandler.process(serializationServiceStub,
-        removeRequest, new ExecutionContext(cacheStub));
+        removeRequest, new MessageExecutionContext(cacheStub));
 
     assertTrue(result instanceof Failure);
     assertEquals(ProtocolErrorCode.VALUE_ENCODING_ERROR.codeValue,


[23/25] geode git commit: GEODE-3184: Cleaned up Cargo tests

Posted by ud...@apache.org.
GEODE-3184: Cleaned up Cargo tests

This closes #722


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/a229933c
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/a229933c
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/a229933c

Branch: refs/heads/feature/GEODE-3503
Commit: a229933ce4dfa2db462f81fc864a4bb1b78e2d08
Parents: fa29ec1
Author: David Anuta <da...@gmail.com>
Authored: Fri Aug 18 09:41:24 2017 -0700
Committer: Jason Huynh <hu...@gmail.com>
Committed: Wed Aug 23 11:16:05 2017 -0700

----------------------------------------------------------------------
 .../modules/session/catalina/DeltaSession.java  |  46 +++----
 .../geode/session/tests/CargoTestBase.java      | 127 ++++++-------------
 .../org/apache/geode/session/tests/Client.java  |  12 +-
 .../geode/session/tests/ContainerInstall.java   |  37 ++++--
 .../geode/session/tests/ContainerManager.java   |   8 ++
 .../tests/GenericAppServerClientServerTest.java |   8 +-
 .../tests/GenericAppServerContainer.java        |  10 +-
 .../session/tests/GenericAppServerInstall.java  |   4 +-
 .../geode/session/tests/ServerContainer.java    |  13 +-
 .../session/tests/TomcatClientServerTest.java   |  23 ++--
 .../geode/session/tests/TomcatContainer.java    |   4 +-
 .../geode/session/tests/TomcatInstall.java      |   6 +-
 12 files changed, 148 insertions(+), 150 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/a229933c/extensions/geode-modules/src/main/java/org/apache/geode/modules/session/catalina/DeltaSession.java
----------------------------------------------------------------------
diff --git a/extensions/geode-modules/src/main/java/org/apache/geode/modules/session/catalina/DeltaSession.java b/extensions/geode-modules/src/main/java/org/apache/geode/modules/session/catalina/DeltaSession.java
index 27e5bce..4aa894a 100644
--- a/extensions/geode-modules/src/main/java/org/apache/geode/modules/session/catalina/DeltaSession.java
+++ b/extensions/geode-modules/src/main/java/org/apache/geode/modules/session/catalina/DeltaSession.java
@@ -14,28 +14,6 @@
  */
 package org.apache.geode.modules.session.catalina;
 
-import org.apache.geode.DataSerializable;
-import org.apache.geode.DataSerializer;
-import org.apache.geode.Delta;
-import org.apache.geode.InvalidDeltaException;
-import org.apache.geode.cache.Region;
-import org.apache.geode.internal.cache.lru.Sizeable;
-import org.apache.geode.internal.util.BlobHelper;
-import org.apache.geode.modules.gatewaydelta.GatewayDelta;
-import org.apache.geode.modules.gatewaydelta.GatewayDeltaEvent;
-import org.apache.geode.modules.session.catalina.internal.DeltaSessionAttributeEvent;
-import org.apache.geode.modules.session.catalina.internal.DeltaSessionAttributeEventBatch;
-import org.apache.geode.modules.session.catalina.internal.DeltaSessionDestroyAttributeEvent;
-import org.apache.geode.modules.session.catalina.internal.DeltaSessionUpdateAttributeEvent;
-import org.apache.catalina.Manager;
-import org.apache.catalina.ha.session.SerializablePrincipal;
-import org.apache.catalina.realm.GenericPrincipal;
-import org.apache.catalina.security.SecurityUtil;
-import org.apache.catalina.session.StandardSession;
-import org.apache.juli.logging.Log;
-import org.apache.juli.logging.LogFactory;
-
-import javax.servlet.http.HttpSession;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
@@ -52,6 +30,30 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 
+import javax.servlet.http.HttpSession;
+
+import org.apache.catalina.Manager;
+import org.apache.catalina.ha.session.SerializablePrincipal;
+import org.apache.catalina.realm.GenericPrincipal;
+import org.apache.catalina.security.SecurityUtil;
+import org.apache.catalina.session.StandardSession;
+import org.apache.juli.logging.Log;
+import org.apache.juli.logging.LogFactory;
+
+import org.apache.geode.DataSerializable;
+import org.apache.geode.DataSerializer;
+import org.apache.geode.Delta;
+import org.apache.geode.InvalidDeltaException;
+import org.apache.geode.cache.Region;
+import org.apache.geode.internal.cache.lru.Sizeable;
+import org.apache.geode.internal.util.BlobHelper;
+import org.apache.geode.modules.gatewaydelta.GatewayDelta;
+import org.apache.geode.modules.gatewaydelta.GatewayDeltaEvent;
+import org.apache.geode.modules.session.catalina.internal.DeltaSessionAttributeEvent;
+import org.apache.geode.modules.session.catalina.internal.DeltaSessionAttributeEventBatch;
+import org.apache.geode.modules.session.catalina.internal.DeltaSessionDestroyAttributeEvent;
+import org.apache.geode.modules.session.catalina.internal.DeltaSessionUpdateAttributeEvent;
+
 @SuppressWarnings("serial")
 public class DeltaSession extends StandardSession
     implements DataSerializable, Delta, GatewayDelta, Sizeable, DeltaSessionInterface {

http://git-wip-us.apache.org/repos/asf/geode/blob/a229933c/geode-assembly/src/test/java/org/apache/geode/session/tests/CargoTestBase.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/session/tests/CargoTestBase.java b/geode-assembly/src/test/java/org/apache/geode/session/tests/CargoTestBase.java
index f54141b..92f3490 100644
--- a/geode-assembly/src/test/java/org/apache/geode/session/tests/CargoTestBase.java
+++ b/geode-assembly/src/test/java/org/apache/geode/session/tests/CargoTestBase.java
@@ -71,6 +71,28 @@ public abstract class CargoTestBase extends JUnit4CacheTestCase {
   }
 
   /**
+   * Gets the specified key from all the containers within the container manager and check that each
+   * container has the associated expected value
+   */
+  public void getKeyValueDataOnAllClients(String key, String expectedValue, String expectedCookie)
+      throws IOException, URISyntaxException {
+    for (int i = 0; i < manager.numContainers(); i++) {
+      // Set the port for this server
+      client.setPort(Integer.parseInt(manager.getContainerPort(i)));
+      // Get the response to a get on the specified key from this server
+      Client.Response resp = client.get(key);
+
+      // Null would mean we don't expect the same cookie as before
+      if (expectedCookie != null)
+        assertEquals("Sessions are not replicating properly", expectedCookie,
+            resp.getSessionCookie());
+
+      // Check that the response from this server is correct
+      assertEquals("Session data is not replicating properly", expectedValue, resp.getResponse());
+    }
+  }
+
+  /**
    * Test that when multiple containers are using session replication, all of the containers will
    * use the same session cookie for the same client.
    */
@@ -80,14 +102,8 @@ public abstract class CargoTestBase extends JUnit4CacheTestCase {
 
     client.setPort(Integer.parseInt(manager.getContainerPort(0)));
     Client.Response resp = client.get(null);
-    String cookie = resp.getSessionCookie();
 
-    for (int i = 1; i < manager.numContainers(); i++) {
-      client.setPort(Integer.parseInt(manager.getContainerPort(i)));
-      resp = client.get(null);
-
-      assertEquals("Sessions are not replicating properly", cookie, resp.getSessionCookie());
-    }
+    getKeyValueDataOnAllClients(null, "", resp.getSessionCookie());
   }
 
   /**
@@ -103,15 +119,8 @@ public abstract class CargoTestBase extends JUnit4CacheTestCase {
 
     client.setPort(Integer.parseInt(manager.getContainerPort(0)));
     Client.Response resp = client.set(key, value);
-    String cookie = resp.getSessionCookie();
-
-    for (int i = 0; i < manager.numContainers(); i++) {
-      client.setPort(Integer.parseInt(manager.getContainerPort(i)));
-      resp = client.get(key);
 
-      assertEquals("Sessions are not replicating properly", cookie, resp.getSessionCookie());
-      assertEquals("Session data is not replicating properly", value, resp.getResponse());
-    }
+    getKeyValueDataOnAllClients(key, value, resp.getSessionCookie());
   }
 
   /**
@@ -128,17 +137,11 @@ public abstract class CargoTestBase extends JUnit4CacheTestCase {
 
     client.setPort(Integer.parseInt(manager.getContainerPort(0)));
     Client.Response resp = client.set(key, value);
-    String cookie = resp.getSessionCookie();
 
     manager.stopContainer(0);
+    manager.removeContainer(0);
 
-    for (int i = 1; i < manager.numContainers(); i++) {
-      client.setPort(Integer.parseInt(manager.getContainerPort(i)));
-      resp = client.get(key);
-
-      assertEquals("Sessions are not replicating properly", cookie, resp.getSessionCookie());
-      assertEquals("Container failure caused inaccessible data.", value, resp.getResponse());
-    }
+    getKeyValueDataOnAllClients(key, value, resp.getSessionCookie());
   }
 
   /**
@@ -153,17 +156,11 @@ public abstract class CargoTestBase extends JUnit4CacheTestCase {
     String value = "Foo";
 
     client.setPort(Integer.parseInt(manager.getContainerPort(0)));
-    Client.Response resp = client.set(key, value);
-    String cookie = resp.getSessionCookie();
+    client.set(key, value);
 
     client.invalidate();
 
-    for (int i = 0; i < manager.numContainers(); i++) {
-      client.setPort(Integer.parseInt(manager.getContainerPort(i)));
-      resp = client.get(key);
-
-      assertEquals("Data removal is not being replicated properly.", "", resp.getResponse());
-    }
+    getKeyValueDataOnAllClients(key, "", null);
   }
 
   /**
@@ -180,27 +177,13 @@ public abstract class CargoTestBase extends JUnit4CacheTestCase {
 
     client.setPort(Integer.parseInt(manager.getContainerPort(0)));
     Client.Response resp = client.set(key, value);
-    String cookie = resp.getSessionCookie();
 
-    for (int i = 0; i < manager.numContainers(); i++) {
-      client.setPort(Integer.parseInt(manager.getContainerPort(i)));
-      resp = client.get(key);
-
-      assertEquals("Sessions are not replicating properly", cookie, resp.getSessionCookie());
-      assertEquals(value, resp.getResponse());
-    }
+    getKeyValueDataOnAllClients(key, value, resp.getSessionCookie());
 
     client.setMaxInactive(1);
-
     Thread.sleep(5000);
 
-    for (int i = 0; i < manager.numContainers(); i++) {
-      client.setPort(Integer.parseInt(manager.getContainerPort(i)));
-      resp = client.get(key);
-
-      assertEquals("Session replication is not doing session expiration correctly.", "",
-          resp.getResponse());
-    }
+    getKeyValueDataOnAllClients(key, "", null);
   }
 
 
@@ -219,7 +202,6 @@ public abstract class CargoTestBase extends JUnit4CacheTestCase {
 
     client.setPort(Integer.parseInt(manager.getContainerPort(0)));
     Client.Response resp = client.set(key, value);
-    String cookie = resp.getSessionCookie();
 
     client.setMaxInactive(timeToExp);
 
@@ -232,14 +214,7 @@ public abstract class CargoTestBase extends JUnit4CacheTestCase {
       curTime = System.currentTimeMillis();
     }
 
-    for (int i = 0; i < manager.numContainers(); i++) {
-      client.setPort(Integer.parseInt(manager.getContainerPort(i)));
-      resp = client.get(key);
-
-      assertEquals("Sessions are not replicating properly", cookie, resp.getSessionCookie());
-      assertEquals("Containers are not replicating session expiration reset", value,
-          resp.getResponse());
-    }
+    getKeyValueDataOnAllClients(key, value, resp.getSessionCookie());
   }
 
   /**
@@ -254,28 +229,13 @@ public abstract class CargoTestBase extends JUnit4CacheTestCase {
 
     client.setPort(Integer.parseInt(manager.getContainerPort(0)));
     Client.Response resp = client.set(key, value);
-    String cookie = resp.getSessionCookie();
 
-    for (int i = 0; i < manager.numContainers(); i++) {
-      client.setPort(Integer.parseInt(manager.getContainerPort(i)));
-      resp = client.get(key);
-
-      assertEquals("Sessions are not replicating properly", cookie, resp.getSessionCookie());
-      assertEquals(value, resp.getResponse());
-    }
+    getKeyValueDataOnAllClients(key, value, resp.getSessionCookie());
 
     client.setPort(Integer.parseInt(manager.getContainerPort(0)));
     client.remove(key);
 
-    for (int i = 0; i < manager.numContainers(); i++) {
-      client.setPort(Integer.parseInt(manager.getContainerPort(i)));
-      resp = client.get(key);
-
-      assertEquals("Sessions are not replicating properly", cookie, resp.getSessionCookie());
-      assertEquals(
-          "Was expecting an empty response after removal. Double check to make sure that the enableLocalCache cacheProperty is set to false. This test is unreliable on servers which use a local cache.",
-          "", resp.getResponse());
-    }
+    getKeyValueDataOnAllClients(key, "", resp.getSessionCookie());
   }
 
   /**
@@ -291,29 +251,16 @@ public abstract class CargoTestBase extends JUnit4CacheTestCase {
 
     client.setPort(Integer.parseInt(manager.getContainerPort(0)));
     Client.Response resp = client.set(key, value);
-    String cookie = resp.getSessionCookie();
 
-    for (int i = 0; i < manager.numContainers(); i++) {
-      client.setPort(Integer.parseInt(manager.getContainerPort(i)));
-      resp = client.get(key);
+    getKeyValueDataOnAllClients(key, value, resp.getSessionCookie());
 
-      assertEquals("Sessions are not replicating properly", cookie, resp.getSessionCookie());
-      assertEquals(value, resp.getResponse());
-    }
     int numContainers = manager.numContainers();
-
+    // Add and start new container
     manager.addContainer(getInstall());
     manager.startAllInactiveContainers();
-
+    // Check that a container was added
     assertEquals(numContainers + 1, manager.numContainers());
 
-    for (int i = 0; i < manager.numContainers(); i++) {
-      client.setPort(Integer.parseInt(manager.getContainerPort(i)));
-      resp = client.get(key);
-
-      assertEquals("Sessions are not replicating properly", cookie, resp.getSessionCookie());
-      assertEquals("Containers are not properly sharing data with new arrival", value,
-          resp.getResponse());
-    }
+    getKeyValueDataOnAllClients(key, value, resp.getSessionCookie());
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/a229933c/geode-assembly/src/test/java/org/apache/geode/session/tests/Client.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/session/tests/Client.java b/geode-assembly/src/test/java/org/apache/geode/session/tests/Client.java
index 9b458d0..52968d4 100644
--- a/geode-assembly/src/test/java/org/apache/geode/session/tests/Client.java
+++ b/geode-assembly/src/test/java/org/apache/geode/session/tests/Client.java
@@ -14,8 +14,9 @@
  */
 package org.apache.geode.session.tests;
 
-import org.apache.geode.modules.session.CommandServlet;
-import org.apache.geode.modules.session.QueryCommand;
+import java.io.IOException;
+import java.net.URISyntaxException;
+
 import org.apache.http.Header;
 import org.apache.http.StatusLine;
 import org.apache.http.client.methods.CloseableHttpResponse;
@@ -30,8 +31,8 @@ import org.apache.http.protocol.BasicHttpContext;
 import org.apache.http.protocol.HttpContext;
 import org.apache.http.util.EntityUtils;
 
-import java.io.IOException;
-import java.net.URISyntaxException;
+import org.apache.geode.modules.session.CommandServlet;
+import org.apache.geode.modules.session.QueryCommand;
 
 /**
  * A simple http client that talks to a server running the session-testing-war.
@@ -207,7 +208,8 @@ public class Client {
 
     StatusLine status = resp.getStatusLine();
     if (status.getStatusCode() != 200) {
-      throw new IOException("Http request failed. " + status);
+      throw new IOException("Http request to " + req.getURI().getHost() + "["
+          + req.getURI().getPort() + "] failed. " + status);
     }
 
     Response response = new Response(reqCookie, EntityUtils.toString(resp.getEntity()), isNew);

http://git-wip-us.apache.org/repos/asf/geode/blob/a229933c/geode-assembly/src/test/java/org/apache/geode/session/tests/ContainerInstall.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/session/tests/ContainerInstall.java b/geode-assembly/src/test/java/org/apache/geode/session/tests/ContainerInstall.java
index f9bce0a..ede100e 100644
--- a/geode-assembly/src/test/java/org/apache/geode/session/tests/ContainerInstall.java
+++ b/geode-assembly/src/test/java/org/apache/geode/session/tests/ContainerInstall.java
@@ -385,17 +385,25 @@ public abstract class ContainerInstall {
 
   protected static void editXMLFile(String XMLPath, String tagId, String tagName,
       String parentTagName, HashMap<String, String> attributes) {
-    editXMLFile(XMLPath, tagId, tagName, parentTagName, attributes, false);
+    editXMLFile(XMLPath, tagId, tagName, tagName, parentTagName, attributes, false);
   }
 
   protected static void editXMLFile(String XMLPath, String tagName, String parentTagName,
       HashMap<String, String> attributes) {
-    editXMLFile(XMLPath, null, tagName, parentTagName, attributes, false);
+    editXMLFile(XMLPath, tagName, parentTagName, attributes, false);
   }
 
   protected static void editXMLFile(String XMLPath, String tagName, String parentTagName,
       HashMap<String, String> attributes, boolean writeOnSimilarAttributeNames) {
-    editXMLFile(XMLPath, null, tagName, parentTagName, attributes, writeOnSimilarAttributeNames);
+    editXMLFile(XMLPath, null, tagName, tagName, parentTagName, attributes,
+        writeOnSimilarAttributeNames);
+  }
+
+  protected static void editXMLFile(String XMLPath, String tagName, String replacementTagName,
+      String parentTagName, HashMap<String, String> attributes,
+      boolean writeOnSimilarAttributeNames) {
+    editXMLFile(XMLPath, null, tagName, replacementTagName, parentTagName, attributes,
+        writeOnSimilarAttributeNames);
   }
 
   /**
@@ -410,6 +418,7 @@ public abstract class ContainerInstall {
    * @param tagId The id of tag to edit. If null, then this method will add a new xml element,
    *        unless writeOnSimilarAttributeNames is set to true.
    * @param tagName The name of the xml element to edit
+   * @param replacementTagName The new name of the XML attribute that is being edited
    * @param parentTagName The parent element of the element we should edit
    * @param attributes the xml attributes for the element to edit
    * @param writeOnSimilarAttributeNames If true, find an existing element with the same set of
@@ -418,7 +427,7 @@ public abstract class ContainerInstall {
    *        not null).
    */
   protected static void editXMLFile(String XMLPath, String tagId, String tagName,
-      String parentTagName, HashMap<String, String> attributes,
+      String replacementTagName, String parentTagName, HashMap<String, String> attributes,
       boolean writeOnSimilarAttributeNames) {
 
     try {
@@ -431,23 +440,33 @@ public abstract class ContainerInstall {
       // Get node with specified tagId
       if (tagId != null) {
         node = findNodeWithAttribute(doc, tagName, "id", tagId);
-      } else if (writeOnSimilarAttributeNames) {
+      }
+      // If writing on similar attributes then search by tag name
+      else if (writeOnSimilarAttributeNames) {
+        // Get all the nodes with the given tag name
         NodeList nodes = doc.getElementsByTagName(tagName);
         for (int i = 0; i < nodes.getLength(); i++) {
           Node n = nodes.item(i);
+          // If the node being iterated across has the exact attributes then it is the one that
+          // should be edited
           if (nodeHasExactAttributes(n, attributes, false)) {
             node = n;
             break;
           }
         }
       }
-      // If no node is found
+      // If a node if found
       if (node != null) {
+        doc.renameNode(node, null, replacementTagName);
+        // Rewrite the node attributes
         rewriteNodeAttributes(node, attributes);
+        // Write the tagId so that it can be found easier next time
         if (tagId != null)
           ((Element) node).setAttribute("id", tagId);
-      } else {
-        Element e = doc.createElement(tagName);
+      }
+      // No node found creates new element under the parent tag passed in
+      else {
+        Element e = doc.createElement(replacementTagName);
         // Set id attribute
         if (tagId != null) {
           e.setAttribute("id", tagId);
@@ -485,11 +504,13 @@ public abstract class ContainerInstall {
    */
   private static Node findNodeWithAttribute(Document doc, String nodeName, String name,
       String value) {
+    // Get all nodes with given name
     NodeList nodes = doc.getElementsByTagName(nodeName);
     if (nodes == null) {
       return null;
     }
 
+    // Find and return the first node that has the given attribute
     for (int i = 0; i < nodes.getLength(); i++) {
       Node node = nodes.item(i);
       Node nodeAttr = node.getAttributes().getNamedItem(name);

http://git-wip-us.apache.org/repos/asf/geode/blob/a229933c/geode-assembly/src/test/java/org/apache/geode/session/tests/ContainerManager.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/session/tests/ContainerManager.java b/geode-assembly/src/test/java/org/apache/geode/session/tests/ContainerManager.java
index 500cfa9..036c6a5 100644
--- a/geode-assembly/src/test/java/org/apache/geode/session/tests/ContainerManager.java
+++ b/geode-assembly/src/test/java/org/apache/geode/session/tests/ContainerManager.java
@@ -140,6 +140,7 @@ public class ContainerManager {
   public ArrayList<Integer> getContainerIndexesWithState(String state) {
     ArrayList<Integer> indexes = new ArrayList<>();
     for (int i = 0; i < numContainers(); i++) {
+      // Checks that the state passed in is one of the 5 supported by Cargo
       if (state.equals(State.STARTED.toString()) || state.equals(State.STOPPED.toString())
           || state.equals(State.STARTED.toString()) || state.equals(State.STOPPING.toString())
           || state.equals(State.UNKNOWN.toString())) {
@@ -198,6 +199,13 @@ public class ContainerManager {
   }
 
   /**
+   * Remove the container in the given index from the list
+   */
+  public ServerContainer removeContainer(int index) {
+    return containers.remove(index);
+  }
+
+  /**
    * Get the indexes of all active containers
    */
   private ArrayList<Integer> getActiveContainerIndexes() {

http://git-wip-us.apache.org/repos/asf/geode/blob/a229933c/geode-assembly/src/test/java/org/apache/geode/session/tests/GenericAppServerClientServerTest.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/session/tests/GenericAppServerClientServerTest.java b/geode-assembly/src/test/java/org/apache/geode/session/tests/GenericAppServerClientServerTest.java
index 08f9978..ea4022b 100644
--- a/geode-assembly/src/test/java/org/apache/geode/session/tests/GenericAppServerClientServerTest.java
+++ b/geode-assembly/src/test/java/org/apache/geode/session/tests/GenericAppServerClientServerTest.java
@@ -14,11 +14,12 @@
  */
 package org.apache.geode.session.tests;
 
+import org.junit.Before;
+
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.server.CacheServer;
 import org.apache.geode.test.dunit.Host;
 import org.apache.geode.test.dunit.VM;
-import org.junit.Before;
 
 /**
  * Extends the {@link CargoTestBase} class to support client server tests of generic app servers
@@ -31,14 +32,17 @@ public abstract class GenericAppServerClientServerTest extends CargoTestBase {
    */
   @Before
   public void startServers() throws InterruptedException {
+    // Setup host
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
+    // Start server in VM
     vm0.invoke(() -> {
       Cache cache = getCache();
+      // Add cache server
       CacheServer server = cache.addCacheServer();
       server.setPort(0);
+      // Start the server in this VM
       server.start();
     });
-    Thread.sleep(5000);
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/a229933c/geode-assembly/src/test/java/org/apache/geode/session/tests/GenericAppServerContainer.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/session/tests/GenericAppServerContainer.java b/geode-assembly/src/test/java/org/apache/geode/session/tests/GenericAppServerContainer.java
index 7a2cfaf..11c76fc 100644
--- a/geode-assembly/src/test/java/org/apache/geode/session/tests/GenericAppServerContainer.java
+++ b/geode-assembly/src/test/java/org/apache/geode/session/tests/GenericAppServerContainer.java
@@ -95,18 +95,25 @@ public class GenericAppServerContainer extends ServerContainer {
   private List<String> buildCommand() throws IOException {
     ContainerInstall install = getInstall();
 
+    // Start command list
     List<String> command = new ArrayList<>();
+    // Path to the modify war script to run
     command.add(modifyWarScript.getAbsolutePath());
+    // Path to the WAR file to modify
     command.add("-w");
     command.add(install.getWarFilePath());
+    // Get connection type for the WAR (peer-to-peer or client-server)
     command.add("-t");
     command.add(install.getConnectionType().getName());
+    // Path to the modified version of the origin WAR file
     command.add("-o");
     command.add(getWarFile().getAbsolutePath());
+    // Add all the cache properties setup to the WAR file
     for (String property : cacheProperties.keySet()) {
       command.add("-p");
       command.add("gemfire.cache." + property + "=" + getCacheProperty(property));
     }
+    // Add all the system properties to the WAR file
     for (String property : systemProperties.keySet()) {
       command.add("-p");
       command.add("gemfire.property." + property + "=" + getSystemProperty(property));
@@ -148,8 +155,7 @@ public class GenericAppServerContainer extends ServerContainer {
   }
 
   /**
-   * Update the container's settings by calling by modifying the war file through the
-   * {@link #modifyWarFile()} function
+   * Update the container's settings by calling {@link #modifyWarFile()} method
    */
   @Override
   public void writeSettings() throws Exception {

http://git-wip-us.apache.org/repos/asf/geode/blob/a229933c/geode-assembly/src/test/java/org/apache/geode/session/tests/GenericAppServerInstall.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/session/tests/GenericAppServerInstall.java b/geode-assembly/src/test/java/org/apache/geode/session/tests/GenericAppServerInstall.java
index aead718..4e56887 100644
--- a/geode-assembly/src/test/java/org/apache/geode/session/tests/GenericAppServerInstall.java
+++ b/geode-assembly/src/test/java/org/apache/geode/session/tests/GenericAppServerInstall.java
@@ -14,7 +14,6 @@
  */
 package org.apache.geode.session.tests;
 
-import java.awt.Container;
 import java.io.File;
 import java.io.IOException;
 
@@ -35,7 +34,8 @@ import java.io.IOException;
 public class GenericAppServerInstall extends ContainerInstall {
 
   /**
-   * Get the download URL and container name of a generic app server using hardcoded keywords
+   * Get the version number, download URL, and container name of a generic app server using
+   * hardcoded keywords
    *
    * Currently the only supported keyword instance is JETTY9.
    */

http://git-wip-us.apache.org/repos/asf/geode/blob/a229933c/geode-assembly/src/test/java/org/apache/geode/session/tests/ServerContainer.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/session/tests/ServerContainer.java b/geode-assembly/src/test/java/org/apache/geode/session/tests/ServerContainer.java
index dbd438a..39ec42c 100644
--- a/geode-assembly/src/test/java/org/apache/geode/session/tests/ServerContainer.java
+++ b/geode-assembly/src/test/java/org/apache/geode/session/tests/ServerContainer.java
@@ -90,8 +90,10 @@ public abstract class ServerContainer {
     this.install = install;
     // Get a container description for logging and output
     description = generateUniqueContainerDescription(containerDescriptors);
+    // Setup logging
     loggingLevel = DEFAULT_LOGGING_LEVEL;
     logDir = new File(DEFAULT_LOG_DIR + description);
+    logDir.mkdirs();
 
     logger.info("Creating new container " + description);
 
@@ -105,8 +107,6 @@ public abstract class ServerContainer {
     systemProperties = new HashMap<>();
     // Set WAR file to session testing war
     warFile = new File(install.getWarFilePath());
-    // Setup logging folders
-    logDir.mkdirs();
 
     // Create the Cargo Container instance wrapping our physical container
     LocalConfiguration configuration = (LocalConfiguration) new DefaultConfigurationFactory()
@@ -122,18 +122,20 @@ public abstract class ServerContainer {
     gemfireLogFile = new File(logDir.getAbsolutePath() + "/gemfire.log");
     gemfireLogFile.getParentFile().mkdirs();
     setSystemProperty("log-file", gemfireLogFile.getAbsolutePath());
-    logger.info("Gemfire logs in " + gemfireLogFile.getAbsolutePath());
+
+    logger.info("Gemfire logs can be found in " + gemfireLogFile.getAbsolutePath());
 
     // Create the container
     container = (InstalledLocalContainer) (new DefaultContainerFactory())
         .createContainer(install.getInstallId(), ContainerType.INSTALLED, configuration);
     // Set container's home dir to where it was installed
     container.setHome(install.getHome());
-    // Set container output log
+    // Set container output log to directory setup for it
     container.setOutput(logDir.getAbsolutePath() + "/container.log");
 
     // Set cacheXML file
     File installXMLFile = install.getCacheXMLFile();
+    // Sets the cacheXMLFile variable and adds the cache XML file server system property map
     setCacheXMLFile(new File(logDir.getAbsolutePath() + "/" + installXMLFile.getName()));
     // Copy the cacheXML file to a new, unique location for this container
     FileUtils.copyFile(installXMLFile, cacheXMLFile);
@@ -182,7 +184,10 @@ public abstract class ServerContainer {
 
     try {
       logger.info("Starting container " + description);
+
+      // Writes settings to the expected form (either XML or WAR file)
       writeSettings();
+      // Start the container through cargo
       container.start();
     } catch (Exception e) {
       throw new RuntimeException(

http://git-wip-us.apache.org/repos/asf/geode/blob/a229933c/geode-assembly/src/test/java/org/apache/geode/session/tests/TomcatClientServerTest.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/session/tests/TomcatClientServerTest.java b/geode-assembly/src/test/java/org/apache/geode/session/tests/TomcatClientServerTest.java
index 817428b..a5c6fa4 100644
--- a/geode-assembly/src/test/java/org/apache/geode/session/tests/TomcatClientServerTest.java
+++ b/geode-assembly/src/test/java/org/apache/geode/session/tests/TomcatClientServerTest.java
@@ -33,6 +33,7 @@ import org.apache.geode.test.dunit.rules.LocatorServerStartupRule;
  * Sets up the server needed for the client container to connect to
  */
 public abstract class TomcatClientServerTest extends CargoTestBase {
+  private String serverName;
 
   @Rule
   public transient TemporaryFolder temporaryFolder = new TemporaryFolder();
@@ -43,10 +44,6 @@ public abstract class TomcatClientServerTest extends CargoTestBase {
   @Rule
   public transient LocatorServerStartupRule locatorStartup = new LocatorServerStartupRule();
 
-
-  private String serverName;
-  private File workingDirectory;
-
   /**
    * Starts a server for the client Tomcat container to connect to using the GFSH command line
    * before each test
@@ -54,31 +51,33 @@ public abstract class TomcatClientServerTest extends CargoTestBase {
   @Before
   public void startServer() throws Exception {
     TomcatInstall install = (TomcatInstall) getInstall();
+    // List of all the jars for tomcat to put on the server classpath
     String libDirJars = install.getHome() + "/lib/*";
     String binDirJars = install.getHome() + "/bin/*";
 
-    CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_SERVER);
+    // Set server name based on the test about to be run
     serverName = getClass().getSimpleName().concat("_").concat(getTestMethodName());
-    workingDirectory = temporaryFolder.newFolder(serverName);
-
 
+    // Create command string for starting server
+    CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_SERVER);
     command.addOption(CliStrings.START_SERVER__NAME, serverName);
     command.addOption(CliStrings.START_SERVER__SERVER_PORT, "0");
+    // Add Tomcat jars to server classpath
     command.addOption(CliStrings.START_SERVER__CLASSPATH,
         binDirJars + File.pathSeparator + libDirJars);
     command.addOption(CliStrings.START_SERVER__LOCATORS, DUnitEnv.get().getLocatorString());
-    command.addOption(CliStrings.START_SERVER__DIR, workingDirectory.getCanonicalPath());
 
+    // Start server
     gfsh.executeAndVerifyCommand(command.toString());
   }
 
+  /**
+   * Stops the server for the client Tomcat container is has been connecting to
+   */
   @After
   public void stopServer() throws Exception {
     CommandStringBuilder command = new CommandStringBuilder(CliStrings.STOP_SERVER);
-
-    // command.addOption(CliStrings.START_SERVER__NAME, serverName);
-    command.addOption(CliStrings.STOP_SERVER__DIR, workingDirectory.getCanonicalPath());
-
+    command.addOption(CliStrings.STOP_SERVER__DIR, serverName);
     gfsh.executeAndVerifyCommand(command.toString());
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/a229933c/geode-assembly/src/test/java/org/apache/geode/session/tests/TomcatContainer.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/session/tests/TomcatContainer.java b/geode-assembly/src/test/java/org/apache/geode/session/tests/TomcatContainer.java
index d1bf714..a75ba90 100644
--- a/geode-assembly/src/test/java/org/apache/geode/session/tests/TomcatContainer.java
+++ b/geode-assembly/src/test/java/org/apache/geode/session/tests/TomcatContainer.java
@@ -53,13 +53,13 @@ public class TomcatContainer extends ServerContainer {
       String containerDescriptors) throws IOException {
     super(install, containerConfigHome, containerDescriptors);
 
-    // Modify the cargo configuration to load off of the installation's context.xml
-    setConfigFile(DEFAULT_CONF_DIR + "context.xml", "conf", "context.xml");
     // Setup container specific XML files
     contextXMLFile = new File(logDir.getAbsolutePath() + "/context.xml");
     serverXMLFile = new File(DEFAULT_CONF_DIR + "server.xml");
 
+    // Copy the default container context XML file from the install to the specified path
     FileUtils.copyFile(new File(DEFAULT_CONF_DIR + "context.xml"), contextXMLFile);
+    // Set the container context XML file to the new location copied to above
     setConfigFile(contextXMLFile.getAbsolutePath(), DEFAULT_TOMCAT_XML_REPLACEMENT_DIR,
         DEFAULT_TOMCAT_CONTEXT_XML_REPLACEMENT_NAME);
 

http://git-wip-us.apache.org/repos/asf/geode/blob/a229933c/geode-assembly/src/test/java/org/apache/geode/session/tests/TomcatInstall.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/session/tests/TomcatInstall.java b/geode-assembly/src/test/java/org/apache/geode/session/tests/TomcatInstall.java
index 57dc519..28a4e8c 100644
--- a/geode-assembly/src/test/java/org/apache/geode/session/tests/TomcatInstall.java
+++ b/geode-assembly/src/test/java/org/apache/geode/session/tests/TomcatInstall.java
@@ -163,10 +163,12 @@ public class TomcatInstall extends ContainerInstall {
   public void setupDefaultSettings() {
     HashMap<String, String> attributes = new HashMap<>();
 
+    // Set the session manager class within the context XML file
     attributes.put("className", getContextSessionManagerClass());
     editXMLFile(getDefaultContextXMLFile().getAbsolutePath(), "Tomcat", "Manager", "Context",
         attributes);
 
+    // Set the server lifecycle listener within the server XML file
     attributes.put("className", getServerLifeCycleListenerClass());
     editXMLFile(getDefaultServerXMLFile().getAbsolutePath(), "Tomcat", "Listener", "Server",
         attributes);
@@ -276,7 +278,7 @@ public class TomcatInstall extends ContainerInstall {
     // Don't need to copy any jars already in the tomcat install
     File tomcatLib = new File(tomcatLibPath);
 
-    // Find all the required jars in the tomcatModulePath
+    // Find all jars in the tomcatModulePath and add them as required jars
     try {
       for (File file : (new File(moduleJarDir)).listFiles()) {
         if (file.isFile() && file.getName().endsWith(".jar")) {
@@ -318,9 +320,11 @@ public class TomcatInstall extends ContainerInstall {
    */
   private void updateProperties() throws Exception {
     String jarsToSkip = "";
+    // Adds all the required jars as jars to skip when starting Tomcat
     for (String jarName : tomcatRequiredJars)
       jarsToSkip += "," + jarName + "*.jar";
 
+    // Add the jars to skip to the catalina property file
     editPropertyFile(getHome() + "/conf/catalina.properties", version.jarSkipPropertyName(),
         jarsToSkip, true);
   }


[05/25] geode git commit: GEODE-3406: Exclude new exception class from analyze serialiable test

Posted by ud...@apache.org.
GEODE-3406: Exclude new exception class from analyze serialiable test

Signed-off-by: Hitesh Khamesra <hk...@pivotal.io>


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/a4fc1ddf
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/a4fc1ddf
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/a4fc1ddf

Branch: refs/heads/feature/GEODE-3503
Commit: a4fc1ddf628766cd5f3e7fb9e9469f523d20dbf2
Parents: 0eb320f
Author: Alexander Murmann <am...@pivotal.io>
Authored: Fri Aug 18 11:01:26 2017 -0700
Committer: Hitesh Khamesra <hk...@pivotal.io>
Committed: Tue Aug 22 10:55:27 2017 -0700

----------------------------------------------------------------------
 .../resources/org/apache/geode/codeAnalysis/excludedClasses.txt     | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/a4fc1ddf/geode-core/src/test/resources/org/apache/geode/codeAnalysis/excludedClasses.txt
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/org/apache/geode/codeAnalysis/excludedClasses.txt b/geode-core/src/test/resources/org/apache/geode/codeAnalysis/excludedClasses.txt
index 43220fa..fbd582a 100644
--- a/geode-core/src/test/resources/org/apache/geode/codeAnalysis/excludedClasses.txt
+++ b/geode-core/src/test/resources/org/apache/geode/codeAnalysis/excludedClasses.txt
@@ -10,6 +10,7 @@ org/apache/geode/distributed/internal/DistributionManager
 org/apache/geode/internal/ExitCode
 org/apache/geode/internal/JarDeployer
 org/apache/geode/internal/cache/BackupLock
+org/apache/geode/internal/cache/tier/sockets/InvalidExecutionContextException
 org/apache/geode/internal/logging/GemFireLevel
 org/apache/geode/internal/logging/LoggingThreadGroup
 org/apache/geode/internal/logging/LogWriterImpl


[13/25] geode git commit: GEODE-3164: fix flakiness with await

Posted by ud...@apache.org.
GEODE-3164: fix flakiness with await


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/afded2a8
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/afded2a8
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/afded2a8

Branch: refs/heads/feature/GEODE-3503
Commit: afded2a8d960359f4e319e76bf5e39720151ae0d
Parents: a0ad568
Author: Kirk Lund <kl...@apache.org>
Authored: Tue Aug 22 15:10:54 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Tue Aug 22 15:37:35 2017 -0700

----------------------------------------------------------------------
 .../management/ClientHealthStatsDUnitTest.java  | 32 ++++++++++++--------
 1 file changed, 19 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/afded2a8/geode-core/src/test/java/org/apache/geode/management/ClientHealthStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/ClientHealthStatsDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/ClientHealthStatsDUnitTest.java
index 760a47a..b0e49c6 100644
--- a/geode-core/src/test/java/org/apache/geode/management/ClientHealthStatsDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/ClientHealthStatsDUnitTest.java
@@ -14,13 +14,15 @@
  */
 package org.apache.geode.management;
 
-import static java.util.concurrent.TimeUnit.*;
-import static org.apache.geode.distributed.ConfigurationProperties.*;
-import static org.apache.geode.test.dunit.Host.*;
-import static org.apache.geode.test.dunit.IgnoredException.*;
-import static org.apache.geode.test.dunit.Invoke.*;
-import static org.apache.geode.test.dunit.NetworkUtils.*;
-import static org.assertj.core.api.Assertions.*;
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static org.apache.geode.distributed.ConfigurationProperties.DURABLE_CLIENT_ID;
+import static org.apache.geode.distributed.ConfigurationProperties.DURABLE_CLIENT_TIMEOUT;
+import static org.apache.geode.distributed.ConfigurationProperties.STATISTIC_SAMPLING_ENABLED;
+import static org.apache.geode.test.dunit.Host.getHost;
+import static org.apache.geode.test.dunit.IgnoredException.addIgnoredException;
+import static org.apache.geode.test.dunit.Invoke.invokeInEveryVM;
+import static org.apache.geode.test.dunit.NetworkUtils.getServerHostName;
+import static org.assertj.core.api.Assertions.assertThat;
 
 import java.io.IOException;
 import java.io.Serializable;
@@ -56,7 +58,12 @@ import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.junit.categories.DistributedTest;
 
 /**
- * Client health stats check
+ * Distributed tests for client stats exposed via {@link CacheServerMXBean}:
+ * <ul>
+ * <li>{@link CacheServerMXBean#showClientStats}
+ * <li>{@link CacheServerMXBean#showAllClientStats}
+ * <li>{@link CacheServerMXBean#showClientQueueDetails}
+ * </ul>
  */
 @Category(DistributedTest.class)
 @SuppressWarnings({"serial", "unused"})
@@ -118,7 +125,6 @@ public class ClientHealthStatsDUnitTest implements Serializable {
 
     DistributedMember serverMember = this.managementTestRule.getDistributedMember(this.serverVM);
     this.managerVM.invoke(() -> verifyClientStats(serverMember, port, 2));
-
     this.managementTestRule.stopManager(this.managerVM);
   }
 
@@ -311,7 +317,7 @@ public class ClientHealthStatsDUnitTest implements Serializable {
     CacheServerMXBean cacheServerMXBean = awaitCacheServerMXBean(serverMember, serverPort);
 
     String[] clientIds = cacheServerMXBean.getClientIds();
-    assertThat(clientIds).hasSize(2);
+    await().until(() -> assertThat(clientIds).hasSize(2));
 
     ClientHealthStatus[] clientStatuses = cacheServerMXBean.showAllClientStats();
 
@@ -332,14 +338,14 @@ public class ClientHealthStatsDUnitTest implements Serializable {
    */
   private void verifyStats(final int serverPort) throws Exception {
     ManagementService service = this.managementTestRule.getManagementService();
-    CacheServerMXBean serverBean = service.getLocalCacheServerMXBean(serverPort);
+    CacheServerMXBean cacheServerMXBean = service.getLocalCacheServerMXBean(serverPort);
 
     CacheClientNotifier clientNotifier = CacheClientNotifier.getInstance();
     CacheClientProxy clientProxy = clientNotifier.getClientProxies().iterator().next();
     assertThat(clientProxy.getQueueSizeStat()).isEqualTo(clientProxy.getQueueSize());
 
-    ClientQueueDetail queueDetails = serverBean.showClientQueueDetails()[0];
-    assertThat(clientProxy.getQueueSizeStat()).isEqualTo((int) queueDetails.getQueueSize());
+    ClientQueueDetail queueDetails = cacheServerMXBean.showClientQueueDetails()[0];
+    assertThat((int) queueDetails.getQueueSize()).isEqualTo(clientProxy.getQueueSizeStat());
   }
 
   private CacheServerMXBean awaitCacheServerMXBean(final DistributedMember serverMember,


[20/25] geode git commit: GEODE-3395 Variable-ize product version and name in user guide - Managing

Posted by ud...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/heap_use/lock_memory.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/heap_use/lock_memory.html.md.erb b/geode-docs/managing/heap_use/lock_memory.html.md.erb
index f97e7e3..81106e2 100644
--- a/geode-docs/managing/heap_use/lock_memory.html.md.erb
+++ b/geode-docs/managing/heap_use/lock_memory.html.md.erb
@@ -26,7 +26,7 @@ On Linux systems, you can lock memory to prevent the operating system from pagin
 
 To use this feature:
 
-1.  Configure the operating system limits for locked memory. Increase the operating system's `ulimit -l` value (the maximum size that may be locked in memory) from the default (typically 32 KB or 64 KB) to at least the total amount of memory used by Geode for on-heap or off-heap storage. To view the current setting, enter `ulimit -a` at a shell prompt and find the value for `max locked                         memory`:
+1.  Configure the operating system limits for locked memory. Increase the operating system's `ulimit -l` value (the maximum size that may be locked in memory) from the default (typically 32 KB or 64 KB) to at least the total amount of memory used by <%=vars.product_name%> for on-heap or off-heap storage. To view the current setting, enter `ulimit -a` at a shell prompt and find the value for `max locked                         memory`:
 
     ``` pre
     # ulimit -a
@@ -41,12 +41,12 @@ To use this feature:
     # ulimit -l 64000000
     ```
 
-2.  Using locked memory in this manner increases the time required to start Geode. The additional time required to start Geode depends on the total amount of memory used, and can range from several seconds to 10 minutes or more. To improve startup time and reduce the potential of member timeouts, instruct the kernel to free operating system page caches just before starting a Geode member by issuing the following command:
+2.  Using locked memory in this manner increases the time required to start <%=vars.product_name%>. The additional time required to start <%=vars.product_name%> depends on the total amount of memory used, and can range from several seconds to 10 minutes or more. To improve startup time and reduce the potential of member timeouts, instruct the kernel to free operating system page caches just before starting a <%=vars.product_name%> member by issuing the following command:
 
     ``` pre
     $ echo 1 > /proc/sys/vm/drop_caches
     ```
 
-3.  Start each Geode data store with the gfsh `-lock-memory=true` option. If you deploy more than one server per host, begin by starting each server sequentially. Starting servers sequentially avoids a race condition in the operating system that can cause failures (even machine crashes) if you accidentally over-allocate the available RAM. After you verify that the system configuration is stable, you can then start servers concurrently.
+3.  Start each <%=vars.product_name%> data store with the gfsh `-lock-memory=true` option. If you deploy more than one server per host, begin by starting each server sequentially. Starting servers sequentially avoids a race condition in the operating system that can cause failures (even machine crashes) if you accidentally over-allocate the available RAM. After you verify that the system configuration is stable, you can then start servers concurrently.
 
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/heap_use/off_heap_management.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/heap_use/off_heap_management.html.md.erb b/geode-docs/managing/heap_use/off_heap_management.html.md.erb
index 3e1515d..8c203f3 100644
--- a/geode-docs/managing/heap_use/off_heap_management.html.md.erb
+++ b/geode-docs/managing/heap_use/off_heap_management.html.md.erb
@@ -21,11 +21,11 @@ limitations under the License.
 <a id="managing-off-heap-memory"></a>
 
 
-Geode can be configured to store region values in off-heap memory, which is memory within the JVM that is not subject to Java garbage collection.
+<%=vars.product_name%> can be configured to store region values in off-heap memory, which is memory within the JVM that is not subject to Java garbage collection.
 
 Garbage collection (GC) within a JVM can prove to be a performance impediment. A server cannot exert control over when garbage collection within the JVM heap memory takes place, and the server has little control over the triggers for invocation. Off-heap memory offloads values to a storage area that is not subject to Java GC. By taking advantage of off-heap storage, an application can reduce the amount of heap storage that is subject to GC overhead.
 
-Off-heap memory works in conjunction with the heap, it does not replace it. The keys are stored in heap memory space. Geode's own memory manager handles the off-heap memory with better performance than the Java garbage collector would for certain sets of region data.
+Off-heap memory works in conjunction with the heap, it does not replace it. The keys are stored in heap memory space. <%=vars.product_name%>'s own memory manager handles the off-heap memory with better performance than the Java garbage collector would for certain sets of region data.
 
 The resource manager monitors the contents of off-heap memory and invokes memory management operations in accordance with two thresholds similar to those used for monitoring the JVM heap: `eviction-off-heap-percentage` and `critical-off-heap-percentage`.
 
@@ -59,7 +59,7 @@ Off-heap storage is best suited to data patterns where:
 -   The values do not need to be frequently deserialized
 -   Many of the values are long-lived reference data
 
-Be aware that Geode has to perform extra work to access the data stored in off-heap memory since it is stored in serialized form. This extra work may cause some use cases to run slower in an off-heap configuration, even though they use less memory and avoid garbage collection overhead. However, even with the extra deserialization, off-heap storage may give you the best performance. Features that may increase overhead include
+Be aware that <%=vars.product_name%> has to perform extra work to access the data stored in off-heap memory since it is stored in serialized form. This extra work may cause some use cases to run slower in an off-heap configuration, even though they use less memory and avoid garbage collection overhead. However, even with the extra deserialization, off-heap storage may give you the best performance. Features that may increase overhead include
 
 -   frequent updates
 -   stored values of widely varying sizes
@@ -74,7 +74,7 @@ Region values that are less than or equal to eight bytes in size will not reside
 
 ## Controlling Off-heap Use with the Resource Manager
 
-The Geode resource manager controls off-heap memory by means of two thresholds, in much the same way as it does JVM heap memory. See [Using the Geode Resource Manager](heap_management.html#how_the_resource_manager_works). The resource manager prevents the cache from consuming too much off-heap memory by evicting old data. If the off-heap memory manager is unable to keep up, the resource manager refuses additions to the cache until the off-heap memory manager has freed an adequate amount of memory.
+The <%=vars.product_name%> resource manager controls off-heap memory by means of two thresholds, in much the same way as it does JVM heap memory. See [Using the <%=vars.product_name%> Resource Manager](heap_management.html#how_the_resource_manager_works). The resource manager prevents the cache from consuming too much off-heap memory by evicting old data. If the off-heap memory manager is unable to keep up, the resource manager refuses additions to the cache until the off-heap memory manager has freed an adequate amount of memory.
 
 The resource manager has two threshold settings, each expressed as a percentage of the total off-heap memory. Both are disabled by default.
 
@@ -152,7 +152,7 @@ off-heap-memory-size=4096m
 off-heap-memory-size=120g
 ```
 
-See [gemfire.properties and gfsecurity.properties (Geode Properties)](../../reference/topics/gemfire_properties.html) for details.
+See [gemfire.properties and gfsecurity.properties (<%=vars.product_name%> Properties)](../../reference/topics/gemfire_properties.html) for details.
 
 The cache.xml file supports one region attribute:
 
@@ -189,7 +189,7 @@ For example:
 
 ## <a id="managing-off-heap-memory__section_o4s_tg5_gv" class="no-quick-link"></a>Tuning Off-heap Memory Usage
 
-Geode collects statistics on off-heap memory usage which you can view with the gfsh `show metrics` command. See [Off-Heap (OffHeapMemoryStats)](../../reference/statistics_list.html#topic_ohc_tjk_w5) for a description of available off-heap statistics.
+<%=vars.product_name%> collects statistics on off-heap memory usage which you can view with the gfsh `show metrics` command. See [Off-Heap (OffHeapMemoryStats)](../../reference/statistics_list.html#topic_ohc_tjk_w5) for a description of available off-heap statistics.
 
 Off-heap memory is optimized, by default, for storing values of 128 KB in size. This figure is known as the "maximum optimized stored value size," which we will denote here by *maxOptStoredValSize*. If your data typically runs larger, you can enhance performance by increasing the OFF\_HEAP\_FREE\_LIST\_COUNT system parameter to a number larger than `maxOptStoredValSize/8`, where *maxOptStoredValSize* is expressed in KB (1024 bytes). So, the default values correspond to:
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/logging/configuring_log4j2.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/logging/configuring_log4j2.html.md.erb b/geode-docs/managing/logging/configuring_log4j2.html.md.erb
index f4d5ea3..cd9796b 100644
--- a/geode-docs/managing/logging/configuring_log4j2.html.md.erb
+++ b/geode-docs/managing/logging/configuring_log4j2.html.md.erb
@@ -1,6 +1,4 @@
----
-title:  Advanced Users—Configuring Log4j 2 for Geode
----
+<% set_title("Advanced Users—Configuring Log4j 2 for", product_name) %>
 
 <!--
 Licensed to the Apache Software Foundation (ASF) under one or more
@@ -19,17 +17,17 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Basic Geode logging configuration is configured via the gemfire.properties file. This topic is intended for advanced users who need increased control over logging due to integration with third-party libraries.
+Basic <%=vars.product_name%> logging configuration is configured via the gemfire.properties file. This topic is intended for advanced users who need increased control over logging due to integration with third-party libraries.
 
-The default `log4j2.xml` that Geode uses is stored in geode.jar as `log4j2-default.xml`. The contents of the configuration can be viewed in the product distribution in the following location: `$GEMFIRE/defaultConfigs/log4j2.xml`.
+The default `log4j2.xml` that <%=vars.product_name%> uses is stored in geode.jar as `log4j2-default.xml`. The contents of the configuration can be viewed in the product distribution in the following location: `$GEMFIRE/defaultConfigs/log4j2.xml`.
 
-To specify your own `log4j2.xml` configuration file (or anything else supported by Log4j 2 such as .json or .yaml), use the following flag when starting up your JVM or Geode member:
+To specify your own `log4j2.xml` configuration file (or anything else supported by Log4j 2 such as .json or .yaml), use the following flag when starting up your JVM or <%=vars.product_name%> member:
 
 ``` pre
 -Dlog4j.configurationFile=<location-of-your-file>
 ```
 
-If the Java system property `log4j.configurationFile` is specified, then Geode will not use the `log4j2-default.xml` included in geode.jar. However, Geode will still create and register a AlertAppender and LogWriterAppender if the `alert-level` and `log-file` Geode properties are configured. You can then use the Geode LogWriter to log to Geode's log or to generate an Alert and receive log statements from customer's application and all third party libraries. Alternatively, you can use any front-end logging API that is configured to log to Log4j 2.
+If the Java system property `log4j.configurationFile` is specified, then <%=vars.product_name%> will not use the `log4j2-default.xml` included in geode.jar. However, <%=vars.product_name%> will still create and register a AlertAppender and LogWriterAppender if the `alert-level` and `log-file` <%=vars.product_name%> properties are configured. You can then use the <%=vars.product_name%> LogWriter to log to <%=vars.product_name%>'s log or to generate an Alert and receive log statements from customer's application and all third party libraries. Alternatively, you can use any front-end logging API that is configured to log to Log4j 2.
 
 ## Using Different Front-End Logging APIs to Log to Log4j2
 
@@ -43,26 +41,26 @@ For example, if you are using:
 
 See [http://logging.apache.org/log4j/2.x/faq.html](http://logging.apache.org/log4j/2.x/faq.html) for more examples.
 
-All three of the above JAR files are in the full distribution of Log4J 2.1 which can be downloaded at [http://logging.apache.org/log4j/2.x/download.html](http://logging.apache.org/log4j/2.x/download.html). Download the appropriate bridge, adapter, or binding JARs to ensure that Geode logging is integrated with every logging API used in various third-party libraries or in your own applications.
+All three of the above JAR files are in the full distribution of Log4J 2.1 which can be downloaded at [http://logging.apache.org/log4j/2.x/download.html](http://logging.apache.org/log4j/2.x/download.html). Download the appropriate bridge, adapter, or binding JARs to ensure that <%=vars.product_name%> logging is integrated with every logging API used in various third-party libraries or in your own applications.
 
 **Note:**
-Apache Geode has been tested with Log4j 2.1. As newer versions of Log4j 2 come out, you can find 2.1 under Previous Releases on that page.
+<%=vars.product_name_long%> has been tested with Log4j 2.1. As newer versions of Log4j 2 come out, you can find 2.1 under Previous Releases on that page.
 
 ## Customizing Your Own log4j2.xml File
 
 Advanced users may want to move away entirely from setting `log-*` gemfire properties and instead specify their own `log4j2.xml` using `-Dlog4j.configurationFile`.
 
-Custom Log4j 2 configuration in Geode comes with some caveats and notes:
+Custom Log4j 2 configuration in <%=vars.product_name%> comes with some caveats and notes:
 
 -   Do not use `"monitorInterval="` in your log4j2.xml file because doing so can have significant performance impact. This setting instructs Log4j 2 to monitor the log4j2.xml config file at runtime and automatically reload and reconfigure if the file changes.
--   Geode's default `log4j2.xml` specifies status="FATAL" because Log4j 2's StatusLogger generates warnings to standard out at ERROR level anytime Geode stops its AlertAppender or LogWriterAppender. Geode uses a lot of concurrent threads that are executing code with log statements; these threads may be logging while the Geode appenders are being stopped.
--   Geode's default log4j2.xml specifies `shutdownHook="disable"` because Geode has a shutdown hook which disconnects the DistributedSystem and closes the Cache, which is executing the code that performs logging. If the Log4J2 shutdown hook stops logging before Geode completes its shutdown, Log4j 2 will attempt to start back up. This restart in turn attempts to register another Log4j 2 shutdown hook which fails resulting in a FATAL level message logged by Log4j 2.
--   The GEMFIRE\_VERBOSE marker (Log4J2 Marker are discussed on [http://logging.apache.org/log4j/2.x/manual/markers.html](http://logging.apache.org/log4j/2.x/manual/markers.html)) can be used to enable additional verbose log statements at TRACE level. Many log statements are enabled simply by enabling DEBUG or TRACE. However, even more log statements can be further enabled by using MarkerFilter to accept GEMFIRE\_VERBOSE. The default Geode `log4j2.xml` disables GEMFIRE\_VERBOSE with this line:
+-   <%=vars.product_name%>'s default `log4j2.xml` specifies status="FATAL" because Log4j 2's StatusLogger generates warnings to standard out at ERROR level anytime <%=vars.product_name%> stops its AlertAppender or LogWriterAppender. <%=vars.product_name%> uses a lot of concurrent threads that are executing code with log statements; these threads may be logging while the <%=vars.product_name%> appenders are being stopped.
+-   <%=vars.product_name%>'s default log4j2.xml specifies `shutdownHook="disable"` because <%=vars.product_name%> has a shutdown hook which disconnects the DistributedSystem and closes the Cache, which is executing the code that performs logging. If the Log4J2 shutdown hook stops logging before <%=vars.product_name%> completes its shutdown, Log4j 2 will attempt to start back up. This restart in turn attempts to register another Log4j 2 shutdown hook which fails resulting in a FATAL level message logged by Log4j 2.
+-   The GEMFIRE\_VERBOSE marker (Log4J2 Marker are discussed on [http://logging.apache.org/log4j/2.x/manual/markers.html](http://logging.apache.org/log4j/2.x/manual/markers.html)) can be used to enable additional verbose log statements at TRACE level. Many log statements are enabled simply by enabling DEBUG or TRACE. However, even more log statements can be further enabled by using MarkerFilter to accept GEMFIRE\_VERBOSE. The default <%=vars.product_name%> `log4j2.xml` disables GEMFIRE\_VERBOSE with this line:
 
     ``` pre
     <MarkerFilter marker="GEMFIRE_VERBOSE" onMatch="DENY" onMismatch="NEUTRAL"/> 
     ```
 
-    You can enable the GEMFIRE\_VERBOSE log statements by changing `onMatch="DENY"` to `onMatch="ACCEPT"`. Typically, it's more useful to simply enable DEBUG or TRACE on certain classes or packages instead of for the entire Geode product. However, this setting can be used for internal debugging purposes if all other debugging methods fail.
+    You can enable the GEMFIRE\_VERBOSE log statements by changing `onMatch="DENY"` to `onMatch="ACCEPT"`. Typically, it's more useful to simply enable DEBUG or TRACE on certain classes or packages instead of for the entire <%=vars.product_name%> product. However, this setting can be used for internal debugging purposes if all other debugging methods fail.
 
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/logging/how_logging_works.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/logging/how_logging_works.html.md.erb b/geode-docs/managing/logging/how_logging_works.html.md.erb
index 22634c6..0c26d8e 100644
--- a/geode-docs/managing/logging/how_logging_works.html.md.erb
+++ b/geode-docs/managing/logging/how_logging_works.html.md.erb
@@ -1,6 +1,4 @@
----
-title:  How Geode Logging Works
----
+<% set_title("How", product_name, "Logging Works") %>
 
 <!--
 Licensed to the Apache Software Foundation (ASF) under one or more
@@ -19,21 +17,21 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Apache Geode uses Apache Log4j 2 as the basis for its logging system.
+<%=vars.product_name_long%> uses Apache Log4j 2 as the basis for its logging system.
 
-Geode uses [Apache Log4j 2](http://logging.apache.org/log4j/2.x/) API and Core libraries as the basis for its logging system. Log4j 2 API is a popular and powerful front-end logging API used by all the Geode classes to generate log statements. Log4j 2 Core is a backend implementation for logging; you can route any of the front-end logging API libraries to log to this backend. Geode uses the Core backend to run two custom Log4j 2 Appenders: **AlertAppender** and **LogWriterAppender**.
+<%=vars.product_name%> uses [Apache Log4j 2](http://logging.apache.org/log4j/2.x/) API and Core libraries as the basis for its logging system. Log4j 2 API is a popular and powerful front-end logging API used by all the <%=vars.product_name%> classes to generate log statements. Log4j 2 Core is a backend implementation for logging; you can route any of the front-end logging API libraries to log to this backend. <%=vars.product_name%> uses the Core backend to run two custom Log4j 2 Appenders: **AlertAppender** and **LogWriterAppender**.
 
-Geode has been tested with Log4j 2.1.
+<%=vars.product_name%> has been tested with Log4j 2.1.
 
 **Note:**
-For this reason, Geode now always requires the following JARs to be in the classpath: `log4j-api-2.1.jar`, `log4j-core-2.1.jar`. Both of these JARs are distributed in the `$GEMFIRE/lib` directory and included in the appropriate `*-dependencies.jar` convenience libraries.
+For this reason, <%=vars.product_name%> now always requires the following JARs to be in the classpath: `log4j-api-2.1.jar`, `log4j-core-2.1.jar`. Both of these JARs are distributed in the `$GEMFIRE/lib` directory and included in the appropriate `*-dependencies.jar` convenience libraries.
 
-**AlertAppender** is the component that generates Geode alerts that are then managed by the JMX Management and Monitoring system. See [Notification Federation](../management/notification_federation_and_alerts.html#topic_212EE5A2ABAB4E8E8EF71807C9ECEF1A) for more details.
+**AlertAppender** is the component that generates <%=vars.product_name%> alerts that are then managed by the JMX Management and Monitoring system. See [Notification Federation](../management/notification_federation_and_alerts.html#topic_212EE5A2ABAB4E8E8EF71807C9ECEF1A) for more details.
 
-**LogWriterAppender** is the component that is configured by all the `log-*` Geode properties such as `log-file`, `log-file-size-limit` and `log-disk-space-limit`.
+**LogWriterAppender** is the component that is configured by all the `log-*` <%=vars.product_name%> properties such as `log-file`, `log-file-size-limit` and `log-disk-space-limit`.
 
-Both of these appenders are created and controlled programmatically. You configure their behavior with the `log-*` Geode properties and the alert level that is configured within the JMX Management & Monitoring system. These appenders do not currently support configuration within a `log4j2.xml` config file.
+Both of these appenders are created and controlled programmatically. You configure their behavior with the `log-*` <%=vars.product_name%> properties and the alert level that is configured within the JMX Management & Monitoring system. These appenders do not currently support configuration within a `log4j2.xml` config file.
 
-Advanced users may wish to define their own `log4j2.xml`. See [Advanced Users—Configuring Log4j 2 for Geode](configuring_log4j2.html) for more details.
+Advanced users may wish to define their own `log4j2.xml`. See [Advanced Users—Configuring Log4j 2 for <%=vars.product_name%>](configuring_log4j2.html) for more details.
 
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/logging/logging.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/logging/logging.html.md.erb b/geode-docs/managing/logging/logging.html.md.erb
index c4fbaf6..ab080a6 100644
--- a/geode-docs/managing/logging/logging.html.md.erb
+++ b/geode-docs/managing/logging/logging.html.md.erb
@@ -21,24 +21,24 @@ limitations under the License.
 
 Comprehensive logging messages help you confirm system configuration and debug problems in configuration and code.
 
--   **[How Geode Logging Works](../../managing/logging/how_logging_works.html)**
+-   **[How <%=vars.product_name%> Logging Works](how_logging_works.html)**
 
-    Apache Geode uses Apache Log4j 2 as the basis for its logging system.
+    <%=vars.product_name_long%> uses Apache Log4j 2 as the basis for its logging system.
 
--   **[Understanding Log Messages and Their Categories](../../managing/logging/logging_categories.html)**
+-   **[Understanding Log Messages and Their Categories](logging_categories.html)**
 
     System logging messages typically pertain to startup; logging management; connection and system membership; distribution; or cache, region, and entry management.
 
--   **[Naming, Searching, and Creating Log Files](../../managing/logging/logging_whats_next.html)**
+-   **[Naming, Searching, and Creating Log Files](logging_whats_next.html)**
 
     The best way to manage and understand the logs is to have each member log to its own files.
 
--   **[Set Up Logging](../../managing/logging/setting_up_logging.html)**
+-   **[Set Up Logging](setting_up_logging.html)**
 
     You configure logging in a member's `gemfire.properties` or at startup with `gfsh`.
 
--   **[Advanced Users—Configuring Log4j 2 for Geode](../../managing/logging/configuring_log4j2.html)**
+-   **[Advanced Users—Configuring Log4j 2 for <%=vars.product_name%>](configuring_log4j2.html)**
 
-    Basic Geode logging configuration is configured via the gemfire.properties file. This topic is intended for advanced users who need increased control over logging due to integration with third-party libraries.
+    Basic <%=vars.product_name%> logging configuration is configured via the gemfire.properties file. This topic is intended for advanced users who need increased control over logging due to integration with third-party libraries.
 
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/logging/logging_categories.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/logging/logging_categories.html.md.erb b/geode-docs/managing/logging/logging_categories.html.md.erb
index 6dc94fa..72a595f 100644
--- a/geode-docs/managing/logging/logging_categories.html.md.erb
+++ b/geode-docs/managing/logging/logging_categories.html.md.erb
@@ -21,7 +21,7 @@ limitations under the License.
 
 System logging messages typically pertain to startup; logging management; connection and system membership; distribution; or cache, region, and entry management.
 
--   **Startup information**. Describe the Java version, the Geode native version, the host system, current working directory, and environment settings. These messages contain all information about the system and configuration the process is running with.
+-   **Startup information**. Describe the Java version, the <%=vars.product_name%> native version, the host system, current working directory, and environment settings. These messages contain all information about the system and configuration the process is running with.
 -   **Logging management**. Pertain to the maintenance of the log files themselves. This information is always in the main log file (see the discussion at Log File Name).
 -   **Connections and system membership**. Report on the arrival and departure of distributed system members (including the current member) and any information related to connection activities or failures. This includes information on communication between tiers in a hierarchical cache.
 -   **Distribution**. Report on the distribution of data between system members. These messages include information about region configuration, entry creation and modification, and region and entry invalidation and destruction.
@@ -44,11 +44,11 @@ Cache initialized using "file:/Samples/quickstart/xml/PushConsumer.xml".
 
 ## <a id="how_logging_works__section_43A099C67FF04A1EB0A07B617D653A38" class="no-quick-link"></a>Log File Name
 
-Specify your Geode system member's main log in the gemfire property `log-file` setting.
+Specify your <%=vars.product_name%> system member's main log in the gemfire property `log-file` setting.
 
-Geode uses this name for the most recent log file, actively in use if the member is running, or used for the last run. Geode creates the main log file when the application starts.
+<%=vars.product_name%> uses this name for the most recent log file, actively in use if the member is running, or used for the last run. <%=vars.product_name%> creates the main log file when the application starts.
 
-By default, the main log contains the entire log for the member session. If you specify a `log-file-size-limit`, Geode splits the logging into these files:
+By default, the main log contains the entire log for the member session. If you specify a `log-file-size-limit`, <%=vars.product_name%> splits the logging into these files:
 
 -   **The main, current log**. Holding current logging entries. Named with the string you specified in `log-file`.
 -   **Child logs**. Holding older logging entries. These are created by renaming the main, current log when it reaches the size limit.
@@ -132,7 +132,7 @@ These are the levels, in descending order, with sample output:
 
 -   **error**. This level indicates that something is wrong in your system. You should be able to continue running, but the operation noted in the error message failed.
 
-    This error was produced by throwing a `Throwable` from a `CacheListener`. While dispatching events to a customer-implemented cache listener, Geode catches any `Throwable` thrown by the listener and logs it as an error. The text shown here is followed by the output from the `Throwable` itself.
+    This error was produced by throwing a `Throwable` from a `CacheListener`. While dispatching events to a customer-implemented cache listener, <%=vars.product_name%> catches any `Throwable` thrown by the listener and logs it as an error. The text shown here is followed by the output from the `Throwable` itself.
 
     ``` pre
     [error 2007/09/05 11:45:30.542 PDT gemfire1_newton_18222
@@ -244,4 +244,4 @@ These are the levels, in descending order, with sample output:
     Do not use these settings unless asked to do so by technical support.
 
 **Note:**
-Geode no longer supports setting system properties for VERBOSE logging. To enable VERBOSE logging, see [Advanced Users—Configuring Log4j 2 for Geode](configuring_log4j2.html)
+<%=vars.product_name%> no longer supports setting system properties for VERBOSE logging. To enable VERBOSE logging, see [Advanced Users—Configuring Log4j 2 for <%=vars.product_name%>](configuring_log4j2.html)

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/logging/setting_up_logging.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/logging/setting_up_logging.html.md.erb b/geode-docs/managing/logging/setting_up_logging.html.md.erb
index f2ab52e..04f345b 100644
--- a/geode-docs/managing/logging/setting_up_logging.html.md.erb
+++ b/geode-docs/managing/logging/setting_up_logging.html.md.erb
@@ -24,7 +24,7 @@ You configure logging in a member's `gemfire.properties` or at startup with `gfs
 <a id="setting_up_logging__section_35F8A9028A91441785BCACD6CD40A498"></a>
 Before you begin, make sure you understand [Basic Configuration and Programming](../../basic_config/book_intro.html).
 
-1.  Run a time synchronization service such as NTP on all Geode host machines. This is the only way to produce logs that are useful for troubleshooting. Synchronized time stamps ensure that log messages from different hosts can be merged to accurately reproduce a chronological history of a distributed run.
+1.  Run a time synchronization service such as NTP on all <%=vars.product_name%> host machines. This is the only way to produce logs that are useful for troubleshooting. Synchronized time stamps ensure that log messages from different hosts can be merged to accurately reproduce a chronological history of a distributed run.
 2.  Use a sniffer to monitor your logs Look for new or unexpected warnings, errors, or severe messages. The logs output by your system have their own characteristics, indicative of your system configuration and of the particular behavior of your applications, so you must become familiar with your applications' logs to use them effectively.
 3.  Configure member logging in each member's `gemfire.properties` as needed:
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/management/configuring_rmi_connector.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/configuring_rmi_connector.html.md.erb b/geode-docs/managing/management/configuring_rmi_connector.html.md.erb
index 5b29149..922b6d0 100644
--- a/geode-docs/managing/management/configuring_rmi_connector.html.md.erb
+++ b/geode-docs/managing/management/configuring_rmi_connector.html.md.erb
@@ -19,16 +19,16 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Geode programmatically emulates out-of-the-box JMX provided by Java and creates a JMXServiceURL with RMI Registry and RMI Connector ports on all manageable members.
+<%=vars.product_name%> programmatically emulates out-of-the-box JMX provided by Java and creates a JMXServiceURL with RMI Registry and RMI Connector ports on all manageable members.
 
 ## <a id="concept_BC793A7ACF9A4BD9A29C2DCC6894767D__section_143531EBBCF94033B8058FCF5F8A5A0D" class="no-quick-link"></a>Configuring JMX Manager Port and Bind Addresses
 
-You can configure a specific connection port and address when launching a process that will host the Geode JMX Manager. To do this, specify values for the `jmx-manager-bind-address`, which specifies the JMX manager's IP address and `jmx-manager-port`, which defines the RMI connection port.
+You can configure a specific connection port and address when launching a process that will host the <%=vars.product_name%> JMX Manager. To do this, specify values for the `jmx-manager-bind-address`, which specifies the JMX manager's IP address and `jmx-manager-port`, which defines the RMI connection port.
 
-The default Geode JMX Manager RMI port is 1099. You may need to modify this default if 1099 is reserved for other uses.
+The default <%=vars.product_name%> JMX Manager RMI port is 1099. You may need to modify this default if 1099 is reserved for other uses.
 
 ## <a id="concept_BC793A7ACF9A4BD9A29C2DCC6894767D__section_BF6352A05CE94F35A8355232D22AC2BC" class="no-quick-link"></a>Using Out-of-the-Box RMI Connectors
 
-If for some reason you need to use standard JMX RMI in your deployment for other monitoring purposes, set the Geode property `jmx-manager-port` to 0 on any members where you want to use standard JMX RMI.
+If for some reason you need to use standard JMX RMI in your deployment for other monitoring purposes, set the <%=vars.product_name%> property `jmx-manager-port` to 0 on any members where you want to use standard JMX RMI.
 
-If you use out-of-the-box JMX RMI instead of starting an embedded Geode JMX Manager, you should consider setting `-Dsun.rmi.dgc.server.gcInterval=Long.MAX_VALUE-1` when starting the JVM for customer applications and client processes. Every Geode process internally sets this setting before creating and starting the JMX RMI connector in order to prevent full garbage collection from pausing processes.
+If you use out-of-the-box JMX RMI instead of starting an embedded <%=vars.product_name%> JMX Manager, you should consider setting `-Dsun.rmi.dgc.server.gcInterval=Long.MAX_VALUE-1` when starting the JVM for customer applications and client processes. Every <%=vars.product_name%> process internally sets this setting before creating and starting the JMX RMI connector in order to prevent full garbage collection from pausing processes.

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/management/gfsh_and_management_api.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/gfsh_and_management_api.html.md.erb b/geode-docs/managing/management/gfsh_and_management_api.html.md.erb
index e7ee5c2..6c642d6 100644
--- a/geode-docs/managing/management/gfsh_and_management_api.html.md.erb
+++ b/geode-docs/managing/management/gfsh_and_management_api.html.md.erb
@@ -22,7 +22,7 @@ limitations under the License.
 You can also use management APIs to execute gfsh commands programmatically.
 
 **Note:**
-If you start the JMX Manager programmatically and wish to enable command processing, you must also add the absolute path of `gfsh-dependencies.jar` (located in `$GEMFIRE/lib` of your Geode installation) to the CLASSPATH of your application. Do not copy this library to your CLASSPATH because this library refers to other dependencies in `$GEMFIRE/lib` by a relative path. The following code samples demonstrate how to process and execute `gfsh` commands using the Java API.
+If you start the JMX Manager programmatically and wish to enable command processing, you must also add the absolute path of `gfsh-dependencies.jar` (located in `$GEMFIRE/lib` of your <%=vars.product_name%> installation) to the CLASSPATH of your application. Do not copy this library to your CLASSPATH because this library refers to other dependencies in `$GEMFIRE/lib` by a relative path. The following code samples demonstrate how to process and execute `gfsh` commands using the Java API.
 
 First, retrieve a CommandService instance.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/management/jmx_manager_node.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/jmx_manager_node.html.md.erb b/geode-docs/managing/management/jmx_manager_node.html.md.erb
index d002734..b143436 100644
--- a/geode-docs/managing/management/jmx_manager_node.html.md.erb
+++ b/geode-docs/managing/management/jmx_manager_node.html.md.erb
@@ -23,10 +23,10 @@ limitations under the License.
 
 Any member can host an embedded JMX Manager, which provides a federated view of all MBeans for the distributed system. The member can be configured to be a manager at startup or anytime during its life by invoking the appropriate API calls on the ManagementService.
 
-You need to have a JMX Manager started in your distributed system in order to use Geode management and monitoring tools such as [gfsh](../../tools_modules/gfsh/chapter_overview.html) and [Geode Pulse](../../tools_modules/pulse/pulse-overview.html).
+You need to have a JMX Manager started in your distributed system in order to use <%=vars.product_name%> management and monitoring tools such as [gfsh](../../tools_modules/gfsh/chapter_overview.html) and [<%=vars.product_name%> Pulse](../../tools_modules/pulse/pulse-overview.html).
 
 **Note:**
-Each node that acts as the JMX Manager has additional memory requirements depending on the number of resources that it is managing and monitoring. Being a JMX Manager can increase the memory footprint of any process, including locator processes. See [Memory Requirements for Cached Data](../../reference/topics/memory_requirements_for_cache_data.html#calculating_memory_requirements) for more information on calculating memory overhead on your Geode processes.
+Each node that acts as the JMX Manager has additional memory requirements depending on the number of resources that it is managing and monitoring. Being a JMX Manager can increase the memory footprint of any process, including locator processes. See [Memory Requirements for Cached Data](../../reference/topics/memory_requirements_for_cache_data.html#calculating_memory_requirements) for more information on calculating memory overhead on your <%=vars.product_name%> processes.
 
 -   **[Starting a JMX Manager](jmx_manager_operations.html)**
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/management/jmx_manager_operations.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/jmx_manager_operations.html.md.erb b/geode-docs/managing/management/jmx_manager_operations.html.md.erb
index e48209c..6b85129 100644
--- a/geode-docs/managing/management/jmx_manager_operations.html.md.erb
+++ b/geode-docs/managing/management/jmx_manager_operations.html.md.erb
@@ -22,14 +22,14 @@ limitations under the License.
 <a id="topic_686158E9AFBD47518BE1B4BEB232C190"></a>
 
 
-JMX Manager nodes are members that manage other Geode members (as well as themselves). A JMX Manager node can manage all other members in the distributed system. Typically a locator will function as the JMX Manager, but you can also turn any other distributed system member such as a server into a JMX Manager node as well.
+JMX Manager nodes are members that manage other <%=vars.product_name%> members (as well as themselves). A JMX Manager node can manage all other members in the distributed system. Typically a locator will function as the JMX Manager, but you can also turn any other distributed system member such as a server into a JMX Manager node as well.
 
-To allow a server to become a JMX Manager you configure Geode property `jmx-manager=true`, in the server's`gemfire.properties` file. This property configures the node to become a JMX Manager node passively; if gfsh cannot locate a JMX Manager when connecting to the distributed system, the server node will be started as a JMX Manager node.
+To allow a server to become a JMX Manager you configure <%=vars.product_name%> property `jmx-manager=true`, in the server's`gemfire.properties` file. This property configures the node to become a JMX Manager node passively; if gfsh cannot locate a JMX Manager when connecting to the distributed system, the server node will be started as a JMX Manager node.
 
 **Note:**
 The default property setting for all locators is `gemfire.jmx-manager=true`. For other members, the default property setting is `gemfire.jmx-manager=false`.
 
-To force a server to become a JMX Manager node whenever it is started, set the Geode properties `jmx-manager-start=true` and `jmx-manager=true` in the server's gemfire.properties file. Note that both of these properties must be set to true for the node.
+To force a server to become a JMX Manager node whenever it is started, set the <%=vars.product_name%> properties `jmx-manager-start=true` and `jmx-manager=true` in the server's gemfire.properties file. Note that both of these properties must be set to true for the node.
 
 To start the member as a JMX Manager node on the command line, provide`                     --J=-Dgemfire.jmx-manager-start=true and --J=-Dgemfire.jmx-manager=true` as arguments to either the `start server` or `start                     locator` command.
 
@@ -50,13 +50,13 @@ The following is an example of starting a new locator that also starts an embedd
 
 ``` pre
 gfsh>start locator --name=locator1
-Starting a Geode Locator in /Users/username/apache-geode/locator1...
+Starting a <%=vars.product_name%> Locator in /Users/username/apache-geode/locator1...
 ....
 Locator in /Users/username/apache-geode/locator1 on 192.0.2.0[10334] as locator1
 is currently online.
 Process ID: 27144
 Uptime: 5 seconds
-Geode Version: 1.2.0
+<%=vars.product_name%> Version: <%=vars.product_version%>
 Java Version: 1.8.0_111
 Log File: /Users/username/apache-geode/locator1/locator1.log
 JVM Arguments: -Dgemfire.enable-cluster-configuration=true 
@@ -102,12 +102,12 @@ In the `gemfire.properties` file, you configure a JMX manager as follows.
 <tbody>
 <tr class="odd">
 <td>http-service-port</td>
-<td>If non-zero, then Geode starts an embedded HTTP service that listens on this port. The HTTP service is used to host the Geode Pulse Web application. If you are hosting the Pulse web app on your own Web server, then disable this embedded HTTP service by setting this property to zero. Ignored if <code class="ph codeph">jmx-manager</code> is false.</td>
+<td>If non-zero, then <%=vars.product_name%> starts an embedded HTTP service that listens on this port. The HTTP service is used to host the <%=vars.product_name%> Pulse Web application. If you are hosting the Pulse web app on your own Web server, then disable this embedded HTTP service by setting this property to zero. Ignored if <code class="ph codeph">jmx-manager</code> is false.</td>
 <td>7070</td>
 </tr>
 <tr class="even">
 <td>http-service-bind-address</td>
-<td>If set, then the Geode member binds the embedded HTTP service to the specified address. If this property is not set but the HTTP service is enabled using <code class="ph codeph">http-service-port</code>, then Geode binds the HTTP service to the member's local address.</td>
+<td>If set, then the <%=vars.product_name%> member binds the embedded HTTP service to the specified address. If this property is not set but the HTTP service is enabled using <code class="ph codeph">http-service-port</code>, then <%=vars.product_name%> binds the HTTP service to the member's local address.</td>
 <td><em>not set</em></td>
 </tr>
 <tr class="odd">
@@ -123,7 +123,7 @@ In the `gemfire.properties` file, you configure a JMX manager as follows.
 </tr>
 <tr class="odd">
 <td>jmx-manager-bind-address</td>
-<td>By default, the JMX Manager when configured with a port listens on all the local host's addresses. You can use this property to configure which particular IP address or host name the JMX Manager will listen on. This property is ignored if <code class="ph codeph">jmx-manager</code> is false or <code class="ph codeph">jmx-manager-port</code> is zero. This address also applies to the Geode Pulse server if you are hosting a Pulse web application.</td>
+<td>By default, the JMX Manager when configured with a port listens on all the local host's addresses. You can use this property to configure which particular IP address or host name the JMX Manager will listen on. This property is ignored if <code class="ph codeph">jmx-manager</code> is false or <code class="ph codeph">jmx-manager-port</code> is zero. This address also applies to the <%=vars.product_name%> Pulse server if you are hosting a Pulse web application.</td>
 <td><em>not set</em></td>
 </tr>
 <tr class="even">
@@ -138,12 +138,12 @@ In the `gemfire.properties` file, you configure a JMX manager as follows.
 </tr>
 <tr class="even">
 <td>jmx-manager-port</td>
-<td>Port on which this JMX Manager listens for client connections. If this property is set to zero, Geode does not allow remote client connections. Alternatively, use the standard system properties supported by the JVM for configuring access from remote JMX clients. Ignored if jmx-manager is false. The Default RMI port is 1099.</td>
+<td>Port on which this JMX Manager listens for client connections. If this property is set to zero, <%=vars.product_name%> does not allow remote client connections. Alternatively, use the standard system properties supported by the JVM for configuring access from remote JMX clients. Ignored if jmx-manager is false. The Default RMI port is 1099.</td>
 <td>1099</td>
 </tr>
 <tr class="odd">
 <td>jmx-manager-ssl</td>
-<td>If true and <code class="ph codeph">jmx-manager-port</code> is not zero, the JMX Manager accepts only SSL connections. The ssl-enabled property does not apply to the JMX Manager, but the other SSL properties do. This allows SSL to be configured for just the JMX Manager without needing to configure it for the other Geode connections. Ignored if <code class="ph codeph">jmx-manager</code> is false.</td>
+<td>If true and <code class="ph codeph">jmx-manager-port</code> is not zero, the JMX Manager accepts only SSL connections. The ssl-enabled property does not apply to the JMX Manager, but the other SSL properties do. This allows SSL to be configured for just the JMX Manager without needing to configure it for the other <%=vars.product_name%> connections. Ignored if <code class="ph codeph">jmx-manager</code> is false.</td>
 <td>false</td>
 </tr>
 <tr class="even">
@@ -153,7 +153,7 @@ In the `gemfire.properties` file, you configure a JMX manager as follows.
 </tr>
 <tr class="odd">
 <td>jmx-manager-update-rate</td>
-<td>The rate, in milliseconds, at which this member pushes updates to any JMX Managers. Currently this value should be greater than or equal to the <code class="ph codeph">statistic-sample-rate</code>. Setting this value too high causes <code class="ph codeph">gfsh</code> and Geode Pulse to see stale values.</td>
+<td>The rate, in milliseconds, at which this member pushes updates to any JMX Managers. Currently this value should be greater than or equal to the <code class="ph codeph">statistic-sample-rate</code>. Setting this value too high causes <code class="ph codeph">gfsh</code> and <%=vars.product_name%> Pulse to see stale values.</td>
 <td>2000</td>
 </tr>
 </tbody>

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/management/list_of_mbean_notifications.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/list_of_mbean_notifications.html.md.erb b/geode-docs/managing/management/list_of_mbean_notifications.html.md.erb
index 0d9229d..5f7bcf3 100644
--- a/geode-docs/managing/management/list_of_mbean_notifications.html.md.erb
+++ b/geode-docs/managing/management/list_of_mbean_notifications.html.md.erb
@@ -21,7 +21,7 @@ limitations under the License.
 <a id="mbean_notifications_list"></a>
 
 
-This topic lists all available JMX notifications emitted by Geode MBeans.
+This topic lists all available JMX notifications emitted by <%=vars.product_name%> MBeans.
 
 Notifications are emitted by the following MBeans:
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/management/list_of_mbeans.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/list_of_mbeans.html.md.erb b/geode-docs/managing/management/list_of_mbeans.html.md.erb
index 57a4a24..7261028 100644
--- a/geode-docs/managing/management/list_of_mbeans.html.md.erb
+++ b/geode-docs/managing/management/list_of_mbeans.html.md.erb
@@ -1,6 +1,4 @@
----
-title: List of Geode JMX MBeans
----
+<% set_title("List of", product_name, "JMX MBeans") %>
 
 <!--
 Licensed to the Apache Software Foundation (ASF) under one or more
@@ -21,9 +19,9 @@ limitations under the License.
 <a id="topic_4BCF867697C3456D96066BAD7F39FC8B"></a>
 
 
-This topic provides descriptions for the various management and monitoring MBeans that are available in Geode.
+This topic provides descriptions for the various management and monitoring MBeans that are available in <%=vars.product_name%>.
 
-The following diagram illustrates the relationship between the different JMX MBeans that have been developed to manage and monitor Apache Geode.
+The following diagram illustrates the relationship between the different JMX MBeans that have been developed to manage and monitor <%=vars.product_name_long%>.
 
 <img src="../../images_svg/MBeans.svg" id="topic_4BCF867697C3456D96066BAD7F39FC8B__image_66525625D6804EDE9675D6CE509360A3" class="image" />
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/management/list_of_mbeans_full.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/list_of_mbeans_full.html.md.erb b/geode-docs/managing/management/list_of_mbeans_full.html.md.erb
index 169d4f6..ddc1f98 100644
--- a/geode-docs/managing/management/list_of_mbeans_full.html.md.erb
+++ b/geode-docs/managing/management/list_of_mbeans_full.html.md.erb
@@ -32,7 +32,7 @@ The JMX Manager node includes all local beans listed under [Managed Node MBeans]
 
 ## <a id="topic_14E3721DD0CF47D7AD8C742DFBE9FB9C__section_7B878B450B994514BDFE96571F0D3827" class="no-quick-link"></a>ManagerMXBean
 
-Represents the Geode Management layer for the hosting member. Controls the scope of management. This MBean provides `start` and `stop` methods to turn a managed node into a JMX Manager node or to stop a node from being a JMX Manager. For potential managers (`jmx-manager=true` and `jmx-manager-start=false`), this MBean is created when a Locator requests it.
+Represents the <%=vars.product_name%> Management layer for the hosting member. Controls the scope of management. This MBean provides `start` and `stop` methods to turn a managed node into a JMX Manager node or to stop a node from being a JMX Manager. For potential managers (`jmx-manager=true` and `jmx-manager-start=false`), this MBean is created when a Locator requests it.
 
 **Note:**
 You must configure the node to allow it to become a JMX Manager. See [Configuring a JMX Manager](jmx_manager_operations.html#topic_263072624B8D4CDBAD18B82E07AA44B6) for configuration information.
@@ -54,9 +54,9 @@ System-wide aggregate MBean that provides a high-level view of the entire distri
 
 The DistributedSystemMXBean provides APIs for performing distributed system-wide operations such as backing up all members, shutting down all members or showing various distributed system metrics.
 
-You can attach a standard JMX NotificationListener to this MBean to listen for notifications throughout the distributed system. See [Geode JMX MBean Notifications](mbean_notifications.html) for more information.
+You can attach a standard JMX NotificationListener to this MBean to listen for notifications throughout the distributed system. See [<%=vars.product_name%> JMX MBean Notifications](mbean_notifications.html) for more information.
 
-This MBean also provides some MBean model navigation APIS. These APIs should be used to navigate through all the MBeans exposed by a Geode System.
+This MBean also provides some MBean model navigation APIS. These APIs should be used to navigate through all the MBeans exposed by a <%=vars.product_name%> System.
 
 **MBean Details**
 
@@ -119,7 +119,7 @@ JMX Manager nodes will have managed node MBeans for themselves since they are al
 
 ## <a id="topic_48194A5BDF3F40F68E95A114DD702413__section_796A989549304BF7A536A33A913322A4" class="no-quick-link"></a>MemberMXBean
 
-Member's local view of its connection and cache. It is the primary gateway to manage a particular member. It exposes member level attributes and statistics. Some operations like `createCacheServer()` and `createManager()` will help to create some Geode resources. Any JMX client can connect to the MBean server and start managing a Geode Member by using this MBean.
+Member's local view of its connection and cache. It is the primary gateway to manage a particular member. It exposes member level attributes and statistics. Some operations like `createCacheServer()` and `createManager()` will help to create some <%=vars.product_name%> resources. Any JMX client can connect to the MBean server and start managing a <%=vars.product_name%> Member by using this MBean.
 
 See [MemberMXBean Notifications](list_of_mbean_notifications.html#reference_czt_hq2_vj) for a list of notifications emitted by this MBean.
 
@@ -136,7 +136,7 @@ See the `org.apache.geode.management.MemberMXBean` JavaDocs for information on a
 
 ## <a id="topic_48194A5BDF3F40F68E95A114DD702413__section_7287A7560650426E9B8249E2D87CE55F" class="no-quick-link"></a>CacheServerMXBean
 
-Represents the Geode CacheServer. Provides data and notifications about server, subscriptions, durable queues and indices.
+Represents the <%=vars.product_name%> CacheServer. Provides data and notifications about server, subscriptions, durable queues and indices.
 
 See [CacheServerMXBean Notifications](list_of_mbean_notifications.html#cacheservermxbean_notifications) for a list of notifications emitted by this MBean.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/management/management_and_monitoring.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/management_and_monitoring.html.md.erb b/geode-docs/managing/management/management_and_monitoring.html.md.erb
index 98b18e5..35d2c4b 100644
--- a/geode-docs/managing/management/management_and_monitoring.html.md.erb
+++ b/geode-docs/managing/management/management_and_monitoring.html.md.erb
@@ -1,6 +1,4 @@
----
-title:  Apache Geode Management and Monitoring
----
+<% set_title(product_name_long, "Management and Monitoring") %>
 
 <!--
 Licensed to the Apache Software Foundation (ASF) under one or more
@@ -19,33 +17,33 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Apache Geode provides APIs and tools for managing your distributed system and monitoring the health of your distributed system members.
+<%=vars.product_name_long%> provides APIs and tools for managing your distributed system and monitoring the health of your distributed system members.
 
--   **[Management and Monitoring Features](../../managing/management/management_and_monitoring_features.html)**
+-   **[Management and Monitoring Features](management_and_monitoring_features.html)**
 
-    Apache Geode uses a federated Open MBean strategy to manage and monitor all members of the distributed system. This strategy gives you a consolidated, single-agent view of the distributed system.
+    <%=vars.product_name_long%> uses a federated Open MBean strategy to manage and monitor all members of the distributed system. This strategy gives you a consolidated, single-agent view of the distributed system.
 
--   **[Overview of Geode Management and Monitoring Tools](../../managing/management/mm_overview.html)**
+-   **[Overview of <%=vars.product_name%> Management and Monitoring Tools](mm_overview.html)**
 
-    Geode provides a variety of management tools you can use to manage a Geode distributed system.
+    <%=vars.product_name%> provides a variety of management tools you can use to manage a <%=vars.product_name%> distributed system.
 
--   **[Architecture and Components](../../managing/management/management_system_overview.html)**
+-   **[Architecture and Components](management_system_overview.html)**
 
-    Geode's management and monitoring system consists of one JMX Manager node (there should only be one) and one or more managed nodes within a distributed system. All members in the distributed system are manageable through MBeans and Geode Management Service APIs.
+    <%=vars.product_name%>'s management and monitoring system consists of one JMX Manager node (there should only be one) and one or more managed nodes within a distributed system. All members in the distributed system are manageable through MBeans and <%=vars.product_name%> Management Service APIs.
 
--   **[JMX Manager Operations](../../managing/management/jmx_manager_node.html#topic_36C918B4202D45F3AC225FFD23B11D7C)**
+-   **[JMX Manager Operations](jmx_manager_node.html#topic_36C918B4202D45F3AC225FFD23B11D7C)**
 
     Any member can host an embedded JMX Manager, which provides a federated view of all MBeans for the distributed system. The member can be configured to be a manager at startup or anytime during its life by invoking the appropriate API calls on the ManagementService.
 
--   **[Federated MBean Architecture](../../managing/management/mbean_architecture.html)**
+-   **[Federated MBean Architecture](mbean_architecture.html)**
 
-    Geode uses MBeans to manage and monitor different parts of Geode. Geode's federated MBean architecture is scalable and allows you to have a single-agent view of a Geode distributed system.
+    <%=vars.product_name%> uses MBeans to manage and monitor different parts of <%=vars.product_name%>. <%=vars.product_name%>'s federated MBean architecture is scalable and allows you to have a single-agent view of a <%=vars.product_name%> distributed system.
 
--   **[Configuring RMI Registry Ports and RMI Connectors](../../managing/management/configuring_rmi_connector.html)**
+-   **[Configuring RMI Registry Ports and RMI Connectors](configuring_rmi_connector.html)**
 
-    Geode programmatically emulates out-of-the-box JMX provided by Java and creates a JMXServiceURL with RMI Registry and RMI Connector ports on all manageable members.
+    <%=vars.product_name%> programmatically emulates out-of-the-box JMX provided by Java and creates a JMXServiceURL with RMI Registry and RMI Connector ports on all manageable members.
 
--   **[Executing gfsh Commands through the Management API](../../managing/management/gfsh_and_management_api.html)**
+-   **[Executing gfsh Commands through the Management API](gfsh_and_management_api.html)**
 
     You can also use management APIs to execute gfsh commands programmatically.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/management/management_and_monitoring_features.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/management_and_monitoring_features.html.md.erb b/geode-docs/managing/management/management_and_monitoring_features.html.md.erb
index 1eb8c6c..425f7a1 100644
--- a/geode-docs/managing/management/management_and_monitoring_features.html.md.erb
+++ b/geode-docs/managing/management/management_and_monitoring_features.html.md.erb
@@ -19,18 +19,18 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Apache Geode uses a federated Open MBean strategy to manage and monitor all members of the distributed system. This strategy gives you a consolidated, single-agent view of the distributed system.
+<%=vars.product_name_long%> uses a federated Open MBean strategy to manage and monitor all members of the distributed system. This strategy gives you a consolidated, single-agent view of the distributed system.
 
 <a id="concept_F7B9EE348DA744D3BBDFD68E7F48A604__section_37CECE9B26644505A79784EA0CD1FDAE"></a>
 Application and manager development is much easier because you do not have to find the right MBeanServer to make a request on an MBean. Instead, you interact with a single MBeanServer that aggregates MBeans from all other local and remote MBeanServers.
 
-Some other key advantages and features of Geode administration architecture:
+Some other key advantages and features of <%=vars.product_name%> administration architecture:
 
--   Geode monitoring is tightly integrated into Geode's processes instead of running in a separately installed and configured monitoring agent. You can use the same framework to actually manage Geode and perform administrative operations, not just monitor it.
--   All Geode MBeans are *MXBeans*. They represent useful and relevant information on the state of the distributed system and all its members. Because MXBeans use the Open MBean model with a predefined set of types, clients and remote management programs no longer require access to model-specific classes representing your MBean types. Using MXBeans adds flexibility to your selection of clients and makes the Geode management and monitoring much easier to use.
+-   <%=vars.product_name%> monitoring is tightly integrated into <%=vars.product_name%>'s processes instead of running in a separately installed and configured monitoring agent. You can use the same framework to actually manage <%=vars.product_name%> and perform administrative operations, not just monitor it.
+-   All <%=vars.product_name%> MBeans are *MXBeans*. They represent useful and relevant information on the state of the distributed system and all its members. Because MXBeans use the Open MBean model with a predefined set of types, clients and remote management programs no longer require access to model-specific classes representing your MBean types. Using MXBeans adds flexibility to your selection of clients and makes the <%=vars.product_name%> management and monitoring much easier to use.
 -   Each member in the distributed system is manageable through MXBeans, and each member hosts its own MXBeans in a Platform MBeanServer.
--   Any Geode member can be configured to provide a federated view of all the MXBeans for all members in a Geode cluster.
--   Geode has also modified its use of JMX to be industry-standard and friendly to generic JMX clients. You can now easily monitor or manage the distributed system by using any third-party tool that is compliant with JMX. For example, JConsole.
+-   Any <%=vars.product_name%> member can be configured to provide a federated view of all the MXBeans for all members in a <%=vars.product_name%> cluster.
+-   <%=vars.product_name%> has also modified its use of JMX to be industry-standard and friendly to generic JMX clients. You can now easily monitor or manage the distributed system by using any third-party tool that is compliant with JMX. For example, JConsole.
 
 ## <a id="concept_F7B9EE348DA744D3BBDFD68E7F48A604__section_A3166A9657044E088DA0FE2C2B8325BE" class="no-quick-link"></a>References
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/management/management_system_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/management_system_overview.html.md.erb b/geode-docs/managing/management/management_system_overview.html.md.erb
index 13a3de0..965fe7b 100644
--- a/geode-docs/managing/management/management_system_overview.html.md.erb
+++ b/geode-docs/managing/management/management_system_overview.html.md.erb
@@ -19,7 +19,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Geode's management and monitoring system consists of one JMX Manager node (there should only be one) and one or more managed nodes within a distributed system. All members in the distributed system are manageable through MBeans and Geode Management Service APIs.
+<%=vars.product_name%>'s management and monitoring system consists of one JMX Manager node (there should only be one) and one or more managed nodes within a distributed system. All members in the distributed system are manageable through MBeans and <%=vars.product_name%> Management Service APIs.
 
 ## <a id="concept_1BAE2CE1146B4347ABD61F50B9F9781F__section_ABE7007BE3C244FBA0418C4B5BE7E1F2" class="no-quick-link"></a>Architecture
 
@@ -27,20 +27,20 @@ The following diagram depicts the architecture of the management and monitoring
 
 <img src="../../images_svg/JMX_Architecture.svg" id="concept_1BAE2CE1146B4347ABD61F50B9F9781F__image_1E9E8575E13D4087BC47B6A288097B7A" class="image" />
 
-In this architecture every Geode member is manageable. All Geode MBeans for the local Geode processes are automatically registered in the Platform MBeanServer (the default MBeanServer of each JVM that hosts platform MXBeans.)
+In this architecture every <%=vars.product_name%> member is manageable. All <%=vars.product_name%> MBeans for the local <%=vars.product_name%> processes are automatically registered in the Platform MBeanServer (the default MBeanServer of each JVM that hosts platform MXBeans.)
 
 ## <a id="concept_1BAE2CE1146B4347ABD61F50B9F9781F__section_1CF2B237C16F4095A609E62F0C7146C1" class="no-quick-link"></a>Managed Node
 
 Each member of a distributed system is a managed node. Any node that is not currently also acting as a JMX Manager node is referred to simply as a managed node. A managed node has the following resources so that it can answer JMX queries both locally and remotely:
 
--   Local MXBeans that represent the locally monitored components on the node. See [List of Geode JMX MBeans](list_of_mbeans.html#topic_4BCF867697C3456D96066BAD7F39FC8B) for a list of possible MXBeans existing for the managed node.
+-   Local MXBeans that represent the locally monitored components on the node. See [List of <%=vars.product_name%> JMX MBeans](list_of_mbeans.html#topic_4BCF867697C3456D96066BAD7F39FC8B) for a list of possible MXBeans existing for the managed node.
 -   Built-in platform MBeans.
 
 ## <a id="concept_1BAE2CE1146B4347ABD61F50B9F9781F__section_8604838507194C8B86F1420FBA46894C" class="no-quick-link"></a>JMX Manager Node
 
-A JMX Manager node is a member that can manage other Geode members --that is, other managed nodes -- as well as itself. A JMX Manager node can manage all other members in the distributed system.
+A JMX Manager node is a member that can manage other <%=vars.product_name%> members --that is, other managed nodes -- as well as itself. A JMX Manager node can manage all other members in the distributed system.
 
-To convert a managed node to a JMX Manager node, you configure the Geode property `jmx-manager=true`, in the `gemfire.properties` file, and start the member as a JMX Manager node.
+To convert a managed node to a JMX Manager node, you configure the <%=vars.product_name%> property `jmx-manager=true`, in the `gemfire.properties` file, and start the member as a JMX Manager node.
 
 You start the member as a JMX Manager node when you provide`                     --J=-Dgemfire.jmx-manager=true` as an argument to either the`                     start server` or `start locator` command. See [Starting a JMX Manager](jmx_manager_operations.html#topic_686158E9AFBD47518BE1B4BEB232C190) for more information.
 
@@ -58,15 +58,15 @@ The JMX Manager node has the following extra resources allocated so that it can
 
 ## <a id="concept_1BAE2CE1146B4347ABD61F50B9F9781F__section_32D9F98189B14AA09BAC5E843EC18EDA" class="no-quick-link"></a>JMX Integration
 
-Management and monitoring tools such as gfsh command-line interface and Pulse use JMX/RMI as the communication layer to connect to Geode nodes. All Geode processes by default allow JMX connections to the Platform MBeanServer from localhost. By default, both managed nodes and JMX manager nodes have RMI connectors enabled to allow JMX client connections.
+Management and monitoring tools such as gfsh command-line interface and Pulse use JMX/RMI as the communication layer to connect to <%=vars.product_name%> nodes. All <%=vars.product_name%> processes by default allow JMX connections to the Platform MBeanServer from localhost. By default, both managed nodes and JMX manager nodes have RMI connectors enabled to allow JMX client connections.
 
 JConsole (and other similar JMX clients that support Sun's Attach API) can connect to any local JVM without requiring an RMI connector by using the Attach API. This allows connections from the same machine.
 
 JConsole (and other JMX clients) can connect to any JVM if that JVM is configured to start an RMI connector. This allows remote connections from other machines.
 
-JConsole can connect to any Geode member, but if it connects to a non-JMX-Manager member, JConsole only detects the local MBeans for the node, and not MBeans for the cluster.
+JConsole can connect to any <%=vars.product_name%> member, but if it connects to a non-JMX-Manager member, JConsole only detects the local MBeans for the node, and not MBeans for the cluster.
 
-When a Geode locator or server becomes a JMX Manager for the cluster, it enables the RMI connector. JConsole can then connect only to that one JVM to view the MBeans for the entire cluster. It does not need to connect to all the other JVMs. Geode manages the inter-JVM communication required to provide a federated view of all MBeans in the distributed system.
+When a <%=vars.product_name%> locator or server becomes a JMX Manager for the cluster, it enables the RMI connector. JConsole can then connect only to that one JVM to view the MBeans for the entire cluster. It does not need to connect to all the other JVMs. <%=vars.product_name%> manages the inter-JVM communication required to provide a federated view of all MBeans in the distributed system.
 
 `gfsh` can only connect to a JMX Manager or to a locator. If connected to a locator, the locator provides the necessary connection information for the existing JMX Manager. If the locator detects a JMX Manager is not already running in the cluster, the locator makes itself a JMX Manager. gfsh cannot connect to other non-Manager or non-locator members.
 
@@ -74,9 +74,9 @@ For information on how to configure the RMI registry and RMI connector, see [Con
 
 ## <a id="concept_1BAE2CE1146B4347ABD61F50B9F9781F__section_A3F9E1594982480DA019CBA3E93CA895" class="no-quick-link"></a>Management APIs
 
-Geode management APIs represent the Geode cluster to a JMX user. However, they do not provide functionality that is otherwise present in JMX. They only provide a gateway into various services exclusively offered by Geode monitoring and management.
+<%=vars.product_name%> management APIs represent the <%=vars.product_name%> cluster to a JMX user. However, they do not provide functionality that is otherwise present in JMX. They only provide a gateway into various services exclusively offered by <%=vars.product_name%> monitoring and management.
 
-The entry point to Geode management is through the ManagementService interface. For example, to create an instance of the Management Service:
+The entry point to <%=vars.product_name%> management is through the ManagementService interface. For example, to create an instance of the Management Service:
 
 ``` pre
 ManagementService service = ManagementService.getManagementService(cache);
@@ -84,12 +84,12 @@ ManagementService service = ManagementService.getManagementService(cache);
 
 The resulting ManagementService instance is specific to the provided cache and its distributed system. The implementation of getManagementService is a singleton for now but may eventually support multiple cache instances.
 
-You can use the Geode management APIs to accomplish the following tasks:
+You can use the <%=vars.product_name%> management APIs to accomplish the following tasks:
 
 -   Monitor the health status of clients.
 -   Obtain the status and results of individual disk backups.
 -   View metrics related to disk usage and performance for a particular member.
--   Browse Geode properties set for a particular member.
+-   Browse <%=vars.product_name%> properties set for a particular member.
 -   View JVM metrics such as memory, heap, and thread usage.
 -   View network metrics, such as bytes received and sent.
 -   View partition region attributes such as total number of buckets, redundant copy, and maximum memory information.
@@ -100,13 +100,13 @@ See the JavaDocs for the `org.apache.geode.management` package for more details.
 
 You can also execute gfsh commands using the ManagementService API. See [Executing gfsh Commands through the Management API](gfsh_and_management_api.html#concept_451F0978285245E69C3E8DE795BD8635) and the JavaDocs for the `org.apache.geode.management.cli` package.
 
-## <a id="concept_1BAE2CE1146B4347ABD61F50B9F9781F__section_E69A93A6309E4747B52850D81FE1674E" class="no-quick-link"></a>Geode Management and Monitoring Tools
+## <a id="concept_1BAE2CE1146B4347ABD61F50B9F9781F__section_E69A93A6309E4747B52850D81FE1674E" class="no-quick-link"></a><%=vars.product_name%> Management and Monitoring Tools
 
-This section lists the currently available tools for managing and monitoring Geode:
+This section lists the currently available tools for managing and monitoring <%=vars.product_name%>:
 
--   **gfsh**. Apache Geode command-line interface that provides a simple & powerful command shell that supports the administration, debugging and deployment of Geode applications. It features context sensitive help, scripting and the ability to invoke any commands from within the application using a simple API. See [gfsh](../../tools_modules/gfsh/chapter_overview.html).
--   **Geode Pulse**. Easy-to-use, browser-based dashboard for monitoring Geode deployments. Geode Pulse provides an integrated view of all Geode members within a distributed system. See [Geode Pulse](../../tools_modules/pulse/pulse-overview.html).
--   **Pulse Data Browser**. This Geode Pulse utility provides a graphical interface for performing OQL ad-hoc queries in a Geode distributed system. See [Data Browser](../../tools_modules/pulse/pulse-views.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser).
+-   **gfsh**. <%=vars.product_name_long%> command-line interface that provides a simple & powerful command shell that supports the administration, debugging and deployment of <%=vars.product_name%> applications. It features context sensitive help, scripting and the ability to invoke any commands from within the application using a simple API. See [gfsh](../../tools_modules/gfsh/chapter_overview.html).
+-   **<%=vars.product_name%> Pulse**. Easy-to-use, browser-based dashboard for monitoring <%=vars.product_name%> deployments. <%=vars.product_name%> Pulse provides an integrated view of all <%=vars.product_name%> members within a distributed system. See [<%=vars.product_name%> Pulse](../../tools_modules/pulse/pulse-overview.html).
+-   **Pulse Data Browser**. This <%=vars.product_name%> Pulse utility provides a graphical interface for performing OQL ad-hoc queries in a <%=vars.product_name%> distributed system. See [Data Browser](../../tools_modules/pulse/pulse-views.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser).
 -   **Other Java Monitoring Tools such as JConsole and jvisualvm.** JConsole is a JMX-based management and monitoring tool provided in the Java 2 Platform that provides information on the performance and consumption of resources by Java applications. See [http://docs.oracle.com/javase/6/docs/technotes/guides/management/jconsole.html](http://docs.oracle.com/javase/6/docs/technotes/guides/management/jconsole.html). **Java VisualVM (jvisualvm)** is a profiling tool for analyzing your Java Virtual Machine. Java VisualVM is useful to Java application developers to troubleshoot applications and to monitor and improve the applications' performance. Java VisualVM can allow developers to generate and analyse heap dumps, track down memory leaks, perform and monitor garbage collection, and perform lightweight memory and CPU profiling. For more details on using jvisualvm, see [http://docs.oracle.com/javase/6/docs/technotes/tools/share/jvisualvm.html](http://docs.oracle.com/javase/6/docs/technot
 es/tools/share/jvisualvm.html).
 
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/management/mbean_architecture.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/mbean_architecture.html.md.erb b/geode-docs/managing/management/mbean_architecture.html.md.erb
index a982701..b82348e 100644
--- a/geode-docs/managing/management/mbean_architecture.html.md.erb
+++ b/geode-docs/managing/management/mbean_architecture.html.md.erb
@@ -19,13 +19,13 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-Geode uses MBeans to manage and monitor different parts of Geode. Geode's federated MBean architecture is scalable and allows you to have a single-agent view of a Geode distributed system.
+<%=vars.product_name%> uses MBeans to manage and monitor different parts of <%=vars.product_name%>. <%=vars.product_name%>'s federated MBean architecture is scalable and allows you to have a single-agent view of a <%=vars.product_name%> distributed system.
 
-## <a id="concept_40A475F186E249C597681069C835CF65__section_19948055E4184110910B11CD979A923A" class="no-quick-link"></a>Federation of Geode MBeans and MBeanServers
+## <a id="concept_40A475F186E249C597681069C835CF65__section_19948055E4184110910B11CD979A923A" class="no-quick-link"></a>Federation of <%=vars.product_name%> MBeans and MBeanServers
 
 Federation of the MBeanServers means that one member, the JMX Manager Node, can provide a proxied view of all the MBeans that the MBeanServer hosts. Federation also means that operations and notifications are spread across the distributed system.
 
-Geode federation takes care of the following functionality:
+<%=vars.product_name%> federation takes care of the following functionality:
 
 -   MBean proxy creation
 -   MBean state propagation
@@ -34,7 +34,7 @@ Geode federation takes care of the following functionality:
 
 ## <a id="concept_40A475F186E249C597681069C835CF65__section_AD13594ADA814194897488CF96BCC479" class="no-quick-link"></a>MBean Proxy Naming Conventions
 
-Each Geode MBean follows a particular naming convention for easier grouping. For example:
+Each <%=vars.product_name%> MBean follows a particular naming convention for easier grouping. For example:
 
 ``` pre
 GemFire:type=Member,service=LockService,name=<dlsName>,memberName=<memberName>
@@ -52,25 +52,25 @@ GemFire:type=Member,member=<Node1>
 
 ## <a id="concept_40A475F186E249C597681069C835CF65__section_8F9D375A185E476FB50E7D6E30BE2FC7" class="no-quick-link"></a>Use of MXBeans
 
-In its Management API, Geode provides MXBeans to ensure that any MBeans that are created are usable by any client, including remote clients, without requiring the client to access specific classes in order to access contents of the MBean.
+In its Management API, <%=vars.product_name%> provides MXBeans to ensure that any MBeans that are created are usable by any client, including remote clients, without requiring the client to access specific classes in order to access contents of the MBean.
 
 ## <a id="concept_40A475F186E249C597681069C835CF65__section_DCC1B2AB80B04E8CBED041C1F3BDAB5F" class="no-quick-link"></a>MBean Proxy Creation
 
-Geode proxies are inherently local MBeans. Every Geode JMX manager member hosts proxies pointing to the local MBeans of every managed node. Proxy MBeans will also emit any notification emitted by local MBeans in managed nodes when an event occurs in that managed node.
+<%=vars.product_name%> proxies are inherently local MBeans. Every <%=vars.product_name%> JMX manager member hosts proxies pointing to the local MBeans of every managed node. Proxy MBeans will also emit any notification emitted by local MBeans in managed nodes when an event occurs in that managed node.
 
 **Note:**
 Aggregate MBeans on the JMX Manager node are not proxied.
 
--   **[List of Geode JMX MBeans](../../managing/management/list_of_mbeans.html)**
+-   **[List of <%=vars.product_name%> JMX MBeans](../../managing/management/list_of_mbeans.html)**
 
-    This topic provides descriptions for the various management and monitoring MBeans that are available in Geode.
+    This topic provides descriptions for the various management and monitoring MBeans that are available in <%=vars.product_name%>.
 
--   **[Browsing Geode MBeans through JConsole](../../managing/management/mbeans_jconsole.html)**
+-   **[Browsing <%=vars.product_name%> MBeans through JConsole](../../managing/management/mbeans_jconsole.html)**
 
-    You can browse all the Geode MBeans in your distributed system by using JConsole.
+    You can browse all the <%=vars.product_name%> MBeans in your distributed system by using JConsole.
 
--   **[Geode JMX MBean Notifications](../../managing/management/mbean_notifications.html)**
+-   **[<%=vars.product_name%> JMX MBean Notifications](../../managing/management/mbean_notifications.html)**
 
-    Apache Geode MBeans emit notifications when specific events occur or if an alert is raised in the Geode system. Using standard JMX APIs, users can add notification handlers to listen for these events.
+    Apache <%=vars.product_name%> MBeans emit notifications when specific events occur or if an alert is raised in the <%=vars.product_name%> system. Using standard JMX APIs, users can add notification handlers to listen for these events.
 
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/management/mbean_notifications.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/mbean_notifications.html.md.erb b/geode-docs/managing/management/mbean_notifications.html.md.erb
index bf1a799..c76bd4d 100644
--- a/geode-docs/managing/management/mbean_notifications.html.md.erb
+++ b/geode-docs/managing/management/mbean_notifications.html.md.erb
@@ -1,6 +1,4 @@
----
-title: Geode JMX MBean Notifications
----
+<% set_title(product_name, "JMX MBean Notifications") %>
 
 <!--
 Licensed to the Apache Software Foundation (ASF) under one or more
@@ -21,7 +19,7 @@ limitations under the License.
 <a id="topic_czt_hq2_vk"></a>
 
 
-Apache Geode MBeans emit notifications when specific events occur or if an alert is raised in the Geode system. Using standard JMX APIs, users can add notification handlers to listen for these events.
+<%=vars.product_name_long%> MBeans emit notifications when specific events occur or if an alert is raised in the <%=vars.product_name%> system. Using standard JMX APIs, users can add notification handlers to listen for these events.
 
 -   **[Notification Federation](notification_federation_and_alerts.html)**
 
@@ -29,6 +27,6 @@ Apache Geode MBeans emit notifications when specific events occur or if an alert
 
 -   **[List of JMX MBean Notifications](list_of_mbean_notifications.html)**
 
-    This topic lists all available JMX notifications emitted by Geode MBeans.
+    This topic lists all available JMX notifications emitted by <%=vars.product_name%> MBeans.
 
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/management/mbeans_jconsole.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/mbeans_jconsole.html.md.erb b/geode-docs/managing/management/mbeans_jconsole.html.md.erb
index 88dde64..2e607fa 100644
--- a/geode-docs/managing/management/mbeans_jconsole.html.md.erb
+++ b/geode-docs/managing/management/mbeans_jconsole.html.md.erb
@@ -1,6 +1,4 @@
----
-title:  Browsing Geode MBeans through JConsole
----
+<% set_title("Browsing", product_name, "MBeans through JConsole") %>
 
 <!--
 Licensed to the Apache Software Foundation (ASF) under one or more
@@ -19,9 +17,9 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-You can browse all the Geode MBeans in your distributed system by using JConsole.
+You can browse all the <%=vars.product_name%> MBeans in your distributed system by using JConsole.
 
-To view Geode MBeans through JConsole, perform the following steps:
+To view <%=vars.product_name%> MBeans through JConsole, perform the following steps:
 
 1.  Start a `gfsh` prompt.
 2.  Connect to a running distributed system by either connecting to a locator with an embedded JMX Manager or connect directly to a JMX Manager. For example:
@@ -46,7 +44,7 @@ To view Geode MBeans through JConsole, perform the following steps:
 
 4.  On the JConsole screen, click on the MBeans tab. Expand **GemFire**. Then expand each MBean to browse individual MBean attributes, operations and notifications.
 
-    The following is an example screenshot of the MBean hierarchy in a Geode distributed system:
+    The following is an example screenshot of the MBean hierarchy in a <%=vars.product_name%> distributed system:
 
     <img src="../../images/jconsole_mbeans.png" id="concept_492532E145834248997BD23BCAC7AD45__image_7A45BE69B67A44A7A8AD40343A2B0AEB" class="image" />
 


[14/25] geode git commit: GEODE-2859: minor cleanup

Posted by ud...@apache.org.
GEODE-2859: minor cleanup


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/a0ad5689
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/a0ad5689
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/a0ad5689

Branch: refs/heads/feature/GEODE-3503
Commit: a0ad56896a8ed183f1e1db0ddc24e6857640de58
Parents: c95b32e
Author: Kirk Lund <kl...@apache.org>
Authored: Tue Aug 22 14:41:32 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Tue Aug 22 15:37:35 2017 -0700

----------------------------------------------------------------------
 .../cli/commands/ShowDeadlockDUnitTest.java     | 31 +++++++++++---------
 1 file changed, 17 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/a0ad5689/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ShowDeadlockDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ShowDeadlockDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ShowDeadlockDUnitTest.java
index cd68350..ca2a043 100755
--- a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ShowDeadlockDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ShowDeadlockDUnitTest.java
@@ -22,7 +22,6 @@ import java.io.File;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.HashSet;
-import java.util.Map;
 import java.util.Properties;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
@@ -41,7 +40,9 @@ import org.apache.geode.cache.execute.Function;
 import org.apache.geode.cache.execute.FunctionContext;
 import org.apache.geode.cache.execute.FunctionService;
 import org.apache.geode.cache.execute.ResultCollector;
+import org.apache.geode.cache30.CacheTestCase;
 import org.apache.geode.distributed.internal.deadlock.GemFireDeadlockDetector;
+import org.apache.geode.distributed.internal.deadlock.GemFireDeadlockDetectorDUnitTest;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
 import org.apache.geode.management.cli.CommandStatement;
 import org.apache.geode.management.cli.Result;
@@ -52,24 +53,21 @@ import org.apache.geode.management.internal.cli.util.CommandStringBuilder;
 import org.apache.geode.test.dunit.Host;
 import org.apache.geode.test.dunit.SerializableCallable;
 import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
 import org.apache.geode.test.junit.categories.DistributedTest;
 import org.apache.geode.test.junit.rules.serializable.SerializableTemporaryFolder;
 
 /**
- * This DUnit tests uses same code as GemFireDeadlockDetectorDUnitTest and uses the command
- * processor for executing the "show deadlock" command
+ * Distributed tests for show deadlock command in {@link MiscellaneousCommands}.
+ *
+ * @see GemFireDeadlockDetectorDUnitTest
  */
 @Category(DistributedTest.class)
-public class ShowDeadlockDUnitTest extends JUnit4CacheTestCase {
+public class ShowDeadlockDUnitTest extends CacheTestCase {
+
   private static final Set<Thread> stuckThreads =
       Collections.synchronizedSet(new HashSet<Thread>());
-  private static final Lock lock = new ReentrantLock();
 
-  private static final Map<String, String> EMPTY_ENV = Collections.emptyMap();
-
-  @Rule
-  public SerializableTemporaryFolder temporaryFolder = new SerializableTemporaryFolder();
+  private static final Lock lock = new ReentrantLock();
 
   private transient VM vm0;
   private transient VM vm1;
@@ -77,6 +75,9 @@ public class ShowDeadlockDUnitTest extends JUnit4CacheTestCase {
   private transient InternalDistributedMember member0;
   private transient InternalDistributedMember member1;
 
+  @Rule
+  public SerializableTemporaryFolder temporaryFolder = new SerializableTemporaryFolder();
+
   @Before
   public void setup() {
     Host host = Host.getHost(0);
@@ -123,7 +124,6 @@ public class ShowDeadlockDUnitTest extends JUnit4CacheTestCase {
     assertTrue(outputFile.exists());
   }
 
-
   @Test
   public void testDistributedDeadlockWithFunction() throws Exception {
     // Have two threads lock locks on different members in different orders.
@@ -155,7 +155,6 @@ public class ShowDeadlockDUnitTest extends JUnit4CacheTestCase {
     });
   }
 
-
   private void createCache(Properties props) {
     getSystem(props);
     getCache();
@@ -174,6 +173,7 @@ public class ShowDeadlockDUnitTest extends JUnit4CacheTestCase {
 
   private InternalDistributedMember createCache(VM vm) {
     return (InternalDistributedMember) vm.invoke(new SerializableCallable<Object>() {
+      @Override
       public Object call() {
         getCache();
         return getSystem().getDistributedMember();
@@ -193,20 +193,23 @@ public class ShowDeadlockDUnitTest extends JUnit4CacheTestCase {
   private static class TestFunction implements Function<Object> {
     private static final int LOCK_WAIT_TIME = 1000;
 
+    @Override
     public boolean hasResult() {
       return true;
     }
 
+    @Override
     public void execute(FunctionContext<Object> context) {
       try {
         stuckThreads.add(Thread.currentThread());
         lock.tryLock(LOCK_WAIT_TIME, TimeUnit.SECONDS);
-      } catch (InterruptedException e) {
-        // ingore
+      } catch (InterruptedException ignored) {
+        // ignored
       }
       context.getResultSender().lastResult(null);
     }
 
+    @Override
     public boolean isHA() {
       return false;
     }


[04/25] geode git commit: GEODE-3406: Address PR feedback

Posted by ud...@apache.org.
GEODE-3406: Address PR feedback

* Rename ExecutionContext -> MessageExecutionContext
* Properly close socket when processing ProtoBuf request in TcpServer
* GetAvailableServersRequestHandler guards against `null` servers
* minor style changes

Signed-off-by: Alexander Murmann <am...@pivotal.io>


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/0eb320fa
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/0eb320fa
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/0eb320fa

Branch: refs/heads/feature/GEODE-3503
Commit: 0eb320fad2071c0ebb8cb1e74576ed4a231a9e3b
Parents: 530f48f
Author: Hitesh Khamesra <hk...@pivotal.io>
Authored: Fri Aug 18 10:44:57 2017 -0700
Committer: Hitesh Khamesra <hk...@pivotal.io>
Committed: Tue Aug 22 10:55:12 2017 -0700

----------------------------------------------------------------------
 .../distributed/internal/InternalLocator.java   |   5 +-
 .../internal/tcpserver/TcpServer.java           | 171 ++++++++++---------
 .../geode/internal/cache/InternalCache.java     |   4 +-
 .../sockets/ClientProtocolMessageHandler.java   |   2 +-
 .../ClientProtocolMessageHandlerLoader.java     |  64 -------
 .../cache/tier/sockets/ExecutionContext.java    |  54 ------
 .../GenericProtocolServerConnection.java        |   2 +-
 .../tier/sockets/MessageExecutionContext.java   |  56 ++++++
 .../tier/sockets/MessageHandlerFactory.java     |  19 +++
 .../tier/sockets/ServerConnectionFactory.java   |  22 +--
 .../cache/tier/sockets/TcpServerFactory.java    |  39 +++++
 .../AutoConnectionSourceImplJUnitTest.java      |  10 +-
 .../tcpserver/TCPServerSSLJUnitTest.java        |   3 +-
 .../internal/tcpserver/TcpServerJUnitTest.java  |   5 +-
 .../tier/sockets/TcpServerFactoryTest.java      |  19 +++
 .../test/dunit/standalone/DUnitLauncher.java    |   2 +
 .../protocol/operations/OperationHandler.java   |   4 +-
 .../protocol/protobuf/ProtobufOpsProcessor.java |   5 +-
 .../protobuf/ProtobufStreamProcessor.java       |   7 +-
 .../GetAllRequestOperationHandler.java          |   4 +-
 .../GetAvailableServersOperationHandler.java    |  14 +-
 .../GetRegionNamesRequestOperationHandler.java  |   4 +-
 .../GetRegionRequestOperationHandler.java       |   4 +-
 .../operations/GetRequestOperationHandler.java  |   4 +-
 .../PutAllRequestOperationHandler.java          |   4 +-
 .../operations/PutRequestOperationHandler.java  |   4 +-
 .../RemoveRequestOperationHandler.java          |   4 +-
 .../RoundTripLocatorConnectionJUnitTest.java    |  19 ++-
 .../protobuf/ProtobufStreamProcessorTest.java   |   4 +-
 .../GetAllRequestOperationHandlerJUnitTest.java |   6 +-
 ...ailableServersOperationHandlerJUnitTest.java |  42 +++--
 ...onNamesRequestOperationHandlerJUnitTest.java |   6 +-
 ...tRegionRequestOperationHandlerJUnitTest.java |   7 +-
 .../GetRequestOperationHandlerJUnitTest.java    |  12 +-
 .../PutAllRequestOperationHandlerJUnitTest.java |   8 +-
 .../PutRequestOperationHandlerJUnitTest.java    |  10 +-
 .../RemoveRequestOperationHandlerJUnitTest.java |  10 +-
 37 files changed, 361 insertions(+), 298 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalLocator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalLocator.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalLocator.java
index 8d2daf6..06603cc 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalLocator.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalLocator.java
@@ -62,6 +62,7 @@ import org.apache.geode.distributed.internal.tcpserver.TcpServer;
 import org.apache.geode.internal.admin.remote.DistributionLocatorId;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.internal.cache.InternalCache;
+import org.apache.geode.internal.cache.tier.sockets.TcpServerFactory;
 import org.apache.geode.internal.cache.wan.WANServiceProvider;
 import org.apache.geode.internal.i18n.LocalizedStrings;
 import org.apache.geode.internal.logging.InternalLogWriter;
@@ -498,8 +499,8 @@ public class InternalLocator extends Locator implements ConnectListener {
     ThreadGroup group = LoggingThreadGroup.createThreadGroup("Distribution locators", logger);
     this.stats = new LocatorStats();
 
-    this.server = new TcpServer(port, this.bindAddress, null, this.config, this.handler,
-        new DelayedPoolStatHelper(), group, this.toString(), this);
+    this.server = new TcpServerFactory().makeTcpServer(port, this.bindAddress, null, this.config,
+        this.handler, new DelayedPoolStatHelper(), group, this.toString(), this);
   }
 
   // Reset the file names with the correct port number if startLocatorAndDS was called with port

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-core/src/main/java/org/apache/geode/distributed/internal/tcpserver/TcpServer.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/tcpserver/TcpServer.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/tcpserver/TcpServer.java
index c3d51c1..d471062 100755
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/tcpserver/TcpServer.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/tcpserver/TcpServer.java
@@ -14,6 +14,32 @@
  */
 package org.apache.geode.distributed.internal.tcpserver;
 
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.EOFException;
+import java.io.File;
+import java.io.IOException;
+import java.io.StreamCorruptedException;
+import java.net.InetAddress;
+import java.net.ServerSocket;
+import java.net.Socket;
+import java.net.SocketAddress;
+import java.net.URL;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import javax.net.ssl.SSLException;
+
+import org.apache.logging.log4j.Logger;
+
 import org.apache.geode.CancelException;
 import org.apache.geode.DataSerializer;
 import org.apache.geode.SystemFailure;
@@ -32,39 +58,13 @@ import org.apache.geode.internal.VersionedDataInputStream;
 import org.apache.geode.internal.VersionedDataOutputStream;
 import org.apache.geode.internal.cache.InternalCache;
 import org.apache.geode.internal.cache.tier.Acceptor;
-import org.apache.geode.internal.cache.tier.sockets.AcceptorImpl;
-import org.apache.geode.internal.cache.tier.sockets.ClientProtocolMessageHandlerLoader;
 import org.apache.geode.internal.cache.tier.sockets.ClientProtocolMessageHandler;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
 import org.apache.geode.internal.cache.tier.sockets.HandShake;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.internal.logging.LogService;
 import org.apache.geode.internal.net.SocketCreator;
 import org.apache.geode.internal.net.SocketCreatorFactory;
 import org.apache.geode.internal.security.SecurableCommunicationChannel;
-import org.apache.logging.log4j.Logger;
-
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.EOFException;
-import java.io.File;
-import java.io.IOException;
-import java.io.StreamCorruptedException;
-import java.net.InetAddress;
-import java.net.ServerSocket;
-import java.net.Socket;
-import java.net.SocketAddress;
-import java.net.URL;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Properties;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import javax.net.ssl.SSLException;
 
 /**
  * TCP server which listens on a port and delegates requests to a request handler. The server uses
@@ -99,6 +99,11 @@ public class TcpServer {
 
   private static/* GemStoneAddition */ final Map GOSSIP_TO_GEMFIRE_VERSION_MAP = new HashMap();
 
+  /**
+   * For the new client-server protocol, which ignores the usual handshake mechanism.
+   */
+  public static final byte PROTOBUF_CLIENT_SERVER_PROTOCOL = (byte) 110;
+
   // For test purpose only
   public static boolean isTesting = false;
   // Non-final field for testing to avoid any security holes in system.
@@ -126,8 +131,10 @@ public class TcpServer {
   private InetAddress bind_address;
   private volatile boolean shuttingDown = false; // GemStoneAddition
   private final PoolStatHelper poolHelper;
-  private InternalLocator internalLocator;
+  private final InternalLocator internalLocator;
   private final TcpHandler handler;
+  private ClientProtocolMessageHandler messageHandler;
+
 
   private PooledExecutorWithDMStats executor;
   private final ThreadGroup threadGroup;
@@ -150,12 +157,14 @@ public class TcpServer {
 
   public TcpServer(int port, InetAddress bind_address, Properties sslConfig,
       DistributionConfigImpl cfg, TcpHandler handler, PoolStatHelper poolHelper,
-      ThreadGroup threadGroup, String threadName, InternalLocator internalLocator) {
+      ThreadGroup threadGroup, String threadName, InternalLocator internalLocator,
+      ClientProtocolMessageHandler messageHandler) {
     this.port = port;
     this.bind_address = bind_address;
     this.handler = handler;
     this.poolHelper = poolHelper;
     this.internalLocator = internalLocator;
+    this.messageHandler = messageHandler;
     // register DSFID types first; invoked explicitly so that all message type
     // initializations do not happen in first deserialization on a possibly
     // "precious" thread
@@ -365,69 +374,67 @@ public class TcpServer {
 
         short versionOrdinal;
         if (gossipVersion == NON_GOSSIP_REQUEST_VERSION) {
-          if (input.readUnsignedByte() == AcceptorImpl.PROTOBUF_CLIENT_SERVER_PROTOCOL
+          if (input.readUnsignedByte() == PROTOBUF_CLIENT_SERVER_PROTOCOL
               && Boolean.getBoolean("geode.feature-protobuf-protocol")) {
-            ClientProtocolMessageHandler messageHandler = ClientProtocolMessageHandlerLoader.load();
             messageHandler.receiveMessage(input, socket.getOutputStream(),
-                new ExecutionContext(internalLocator));
+                new MessageExecutionContext(internalLocator));
           } else {
             rejectUnknownProtocolConnection(socket, gossipVersion);
-            return;
           }
-        }
-        if (gossipVersion <= getCurrentGossipVersion()
-            && GOSSIP_TO_GEMFIRE_VERSION_MAP.containsKey(gossipVersion)) {
-          // Create a versioned stream to remember sender's GemFire version
-          versionOrdinal = (short) GOSSIP_TO_GEMFIRE_VERSION_MAP.get(gossipVersion);
         } else {
-          // Close the socket. We can not accept requests from a newer version
-          rejectUnknownProtocolConnection(socket, gossipVersion);
-          return;
-        }
-        if (Version.GFE_71.compareTo(versionOrdinal) <= 0) {
-          // Recent versions of TcpClient will send the version ordinal
-          versionOrdinal = input.readShort();
-        }
-
-        if (log.isDebugEnabled() && versionOrdinal != Version.CURRENT_ORDINAL) {
-          log.debug("Locator reading request from " + socket.getInetAddress() + " with version "
-              + Version.fromOrdinal(versionOrdinal, false));
-        }
-        input = new VersionedDataInputStream(input, Version.fromOrdinal(versionOrdinal, false));
-        request = DataSerializer.readObject(input);
-        if (log.isDebugEnabled()) {
-          log.debug("Locator received request " + request + " from " + socket.getInetAddress());
-        }
-        if (request instanceof ShutdownRequest) {
-          shuttingDown = true;
-          // Don't call shutdown from within the worker thread, see java bug #6576792.
-          // Closing the socket will cause our acceptor thread to shutdown the executor
-          this.serverSocketPortAtClose = srv_sock.getLocalPort();
-          srv_sock.close();
-          response = new ShutdownResponse();
-        } else if (request instanceof InfoRequest) {
-          response = handleInfoRequest(request);
-        } else if (request instanceof VersionRequest) {
-          response = handleVersionRequest(request);
-        } else {
-          response = handler.processRequest(request);
-        }
-
-        handler.endRequest(request, startTime);
+          if (gossipVersion <= getCurrentGossipVersion()
+              && GOSSIP_TO_GEMFIRE_VERSION_MAP.containsKey(gossipVersion)) {
+            // Create a versioned stream to remember sender's GemFire version
+            versionOrdinal = (short) GOSSIP_TO_GEMFIRE_VERSION_MAP.get(gossipVersion);
+          } else {
+            // Close the socket. We can not accept requests from a newer version
+            rejectUnknownProtocolConnection(socket, gossipVersion);
+            return;
+          }
+          if (Version.GFE_71.compareTo(versionOrdinal) <= 0) {
+            // Recent versions of TcpClient will send the version ordinal
+            versionOrdinal = input.readShort();
+          }
 
-        startTime = DistributionStats.getStatTime();
-        if (response != null) {
-          DataOutputStream output = new DataOutputStream(socket.getOutputStream());
-          if (versionOrdinal != Version.CURRENT_ORDINAL) {
-            output =
-                new VersionedDataOutputStream(output, Version.fromOrdinal(versionOrdinal, false));
+          if (log.isDebugEnabled() && versionOrdinal != Version.CURRENT_ORDINAL) {
+            log.debug("Locator reading request from " + socket.getInetAddress() + " with version "
+                + Version.fromOrdinal(versionOrdinal, false));
+          }
+          input = new VersionedDataInputStream(input, Version.fromOrdinal(versionOrdinal, false));
+          request = DataSerializer.readObject(input);
+          if (log.isDebugEnabled()) {
+            log.debug("Locator received request " + request + " from " + socket.getInetAddress());
+          }
+          if (request instanceof ShutdownRequest) {
+            shuttingDown = true;
+            // Don't call shutdown from within the worker thread, see java bug #6576792.
+            // Closing the socket will cause our acceptor thread to shutdown the executor
+            this.serverSocketPortAtClose = srv_sock.getLocalPort();
+            srv_sock.close();
+            response = new ShutdownResponse();
+          } else if (request instanceof InfoRequest) {
+            response = handleInfoRequest(request);
+          } else if (request instanceof VersionRequest) {
+            response = handleVersionRequest(request);
+          } else {
+            response = handler.processRequest(request);
           }
-          DataSerializer.writeObject(response, output);
-          output.flush();
-        }
 
-        handler.endResponse(request, startTime);
+          handler.endRequest(request, startTime);
+
+          startTime = DistributionStats.getStatTime();
+          if (response != null) {
+            DataOutputStream output = new DataOutputStream(socket.getOutputStream());
+            if (versionOrdinal != Version.CURRENT_ORDINAL) {
+              output =
+                  new VersionedDataOutputStream(output, Version.fromOrdinal(versionOrdinal, false));
+            }
+            DataSerializer.writeObject(response, output);
+            output.flush();
+          }
 
+          handler.endResponse(request, startTime);
+        }
       } catch (EOFException ignore) {
         // client went away - ignore
       } catch (CancelException ignore) {

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-core/src/main/java/org/apache/geode/internal/cache/InternalCache.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/InternalCache.java b/geode-core/src/main/java/org/apache/geode/internal/cache/InternalCache.java
index 4c7a6ef..84aa66e 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/InternalCache.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/InternalCache.java
@@ -75,9 +75,7 @@ import org.apache.geode.pdx.internal.TypeRegistry;
  */
 public interface InternalCache extends Cache, Extensible<Cache>, CacheTime {
 
-  default InternalDistributedMember getMyId() {
-    return null;
-  }
+  InternalDistributedMember getMyId();
 
   Collection<DiskStore> listDiskStores();
 

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtocolMessageHandler.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtocolMessageHandler.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtocolMessageHandler.java
index 38ab73e..0ced3aa 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtocolMessageHandler.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtocolMessageHandler.java
@@ -30,5 +30,5 @@ import java.io.OutputStream;
  */
 public interface ClientProtocolMessageHandler {
   void receiveMessage(InputStream inputStream, OutputStream outputStream,
-      ExecutionContext executionContext) throws IOException;
+      MessageExecutionContext executionContext) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtocolMessageHandlerLoader.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtocolMessageHandlerLoader.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtocolMessageHandlerLoader.java
deleted file mode 100644
index 1dc6129..0000000
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtocolMessageHandlerLoader.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.geode.internal.cache.tier.sockets;
-
-import java.io.IOException;
-import java.net.Socket;
-import java.util.Iterator;
-import java.util.ServiceLoader;
-
-import org.apache.geode.internal.cache.InternalCache;
-import org.apache.geode.internal.cache.tier.Acceptor;
-import org.apache.geode.internal.cache.tier.CachedRegionHelper;
-import org.apache.geode.internal.security.SecurityService;
-
-/**
- * Creates instances of ServerConnection based on the connection mode provided.
- */
-public class ClientProtocolMessageHandlerLoader {
-  private static ClientProtocolMessageHandler protobufProtocolHandler;
-  private static final Object protocolLoadLock = new Object();
-
-  public static ClientProtocolMessageHandler load() {
-    if (protobufProtocolHandler != null) {
-      return protobufProtocolHandler;
-    }
-
-    synchronized (protocolLoadLock) {
-      if (protobufProtocolHandler != null) {
-        return protobufProtocolHandler;
-      }
-
-      ServiceLoader<ClientProtocolMessageHandler> loader =
-          ServiceLoader.load(ClientProtocolMessageHandler.class);
-      Iterator<ClientProtocolMessageHandler> iterator = loader.iterator();
-
-      if (!iterator.hasNext()) {
-        throw new ServiceLoadingFailureException(
-            "ClientProtocolMessageHandler implementation not found in JVM");
-      }
-
-      ClientProtocolMessageHandler returnValue = iterator.next();
-
-      if (iterator.hasNext()) {
-        throw new ServiceLoadingFailureException(
-            "Multiple service implementations found for ClientProtocolMessageHandler");
-      }
-
-      return returnValue;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ExecutionContext.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ExecutionContext.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ExecutionContext.java
deleted file mode 100644
index 27da205..0000000
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ExecutionContext.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.geode.internal.cache.tier.sockets;
-
-import org.apache.geode.cache.Cache;
-import org.apache.geode.distributed.internal.InternalLocator;
-
-public class ExecutionContext {
-  private Cache cache;
-  private InternalLocator locator;
-
-  public ExecutionContext(Cache cache) {
-    this.cache = cache;
-  }
-
-  public ExecutionContext(InternalLocator locator) {
-    this.locator = locator;
-  }
-
-  // This throws if the cache isn't present because we know that non of the callers can take any
-  // reasonable action if the cache is not present
-  public Cache getCache() throws InvalidExecutionContextException {
-    if (cache != null) {
-      return cache;
-    } else {
-      throw new InvalidExecutionContextException(
-          "Execution context's cache was accessed but isn't present. Did this happen on a locator? Operations on the locator should not try to operate on a cache");
-    }
-  }
-
-  // This throws if the locator isn't present because we know that non of the callers can take any
-  // reasonable action if the locator is not present
-  public InternalLocator getLocator() throws InvalidExecutionContextException {
-    if (locator != null) {
-      return locator;
-    } else {
-      throw new InvalidExecutionContextException(
-          "Execution context's locator was accessed but isn't present. Did this happen on a server? Operations on the locator should not try to operate on a cache");
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/GenericProtocolServerConnection.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/GenericProtocolServerConnection.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/GenericProtocolServerConnection.java
index 8f6720e..cd1647a 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/GenericProtocolServerConnection.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/GenericProtocolServerConnection.java
@@ -62,7 +62,7 @@ public class GenericProtocolServerConnection extends ServerConnection {
         authenticator.receiveMessage(inputStream, outputStream, securityManager);
       } else {
         messageHandler.receiveMessage(inputStream, outputStream,
-            new ExecutionContext(this.getCache()));
+            new MessageExecutionContext(this.getCache()));
       }
     } catch (IOException e) {
       logger.warn(e);

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/MessageExecutionContext.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/MessageExecutionContext.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/MessageExecutionContext.java
new file mode 100644
index 0000000..1cb8c9d
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/MessageExecutionContext.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.geode.internal.cache.tier.sockets;
+
+import org.apache.geode.annotations.Experimental;
+import org.apache.geode.cache.Cache;
+import org.apache.geode.distributed.internal.InternalLocator;
+
+@Experimental
+public class MessageExecutionContext {
+  private Cache cache;
+  private InternalLocator locator;
+
+  public MessageExecutionContext(Cache cache) {
+    this.cache = cache;
+  }
+
+  public MessageExecutionContext(InternalLocator locator) {
+    this.locator = locator;
+  }
+
+  // This throws if the cache isn't present because we know that non of the callers can take any
+  // reasonable action if the cache is not present
+  public Cache getCache() throws InvalidExecutionContextException {
+    if (cache != null) {
+      return cache;
+    } else {
+      throw new InvalidExecutionContextException(
+          "Operations on the locator should not to try to operate on a cache");
+    }
+  }
+
+  // This throws if the locator isn't present because we know that non of the callers can take any
+  // reasonable action if the locator is not present
+  public InternalLocator getLocator() throws InvalidExecutionContextException {
+    if (locator != null) {
+      return locator;
+    } else {
+      throw new InvalidExecutionContextException(
+          "Operations on the server should not to try to operate on a locator");
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/MessageHandlerFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/MessageHandlerFactory.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/MessageHandlerFactory.java
new file mode 100644
index 0000000..fd261d7
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/MessageHandlerFactory.java
@@ -0,0 +1,19 @@
+package org.apache.geode.internal.cache.tier.sockets;
+
+import java.util.Iterator;
+import java.util.ServiceLoader;
+
+public class MessageHandlerFactory {
+  public ClientProtocolMessageHandler makeMessageHandler() {
+    ServiceLoader<ClientProtocolMessageHandler> loader =
+        ServiceLoader.load(ClientProtocolMessageHandler.class);
+    Iterator<ClientProtocolMessageHandler> iterator = loader.iterator();
+
+    if (!iterator.hasNext()) {
+      throw new ServiceLoadingFailureException(
+          "There is no ClientProtocolMessageHandler implementation found in JVM");
+    }
+
+    return iterator.next();
+  }
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerConnectionFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerConnectionFactory.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerConnectionFactory.java
index 9173f6a..d2d85f6 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerConnectionFactory.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerConnectionFactory.java
@@ -24,7 +24,6 @@ import org.apache.geode.security.StreamAuthenticator;
 import java.io.IOException;
 import java.net.Socket;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.Map;
 import java.util.ServiceLoader;
 
@@ -32,7 +31,7 @@ import java.util.ServiceLoader;
  * Creates instances of ServerConnection based on the connection mode provided.
  */
 public class ServerConnectionFactory {
-  private ClientProtocolMessageHandler protobufProtocolHandler;
+  private ClientProtocolMessageHandler protocolHandler;
   private Map<String, Class<? extends StreamAuthenticator>> authenticators = null;
 
   public ServerConnectionFactory() {}
@@ -49,20 +48,13 @@ public class ServerConnectionFactory {
   }
 
   private synchronized ClientProtocolMessageHandler initializeMessageHandler() {
-    if (protobufProtocolHandler != null) {
-      return protobufProtocolHandler;
+    if (protocolHandler != null) {
+      return protocolHandler;
     }
-    ServiceLoader<ClientProtocolMessageHandler> loader =
-        ServiceLoader.load(ClientProtocolMessageHandler.class);
-    Iterator<ClientProtocolMessageHandler> iterator = loader.iterator();
 
-    if (!iterator.hasNext()) {
-      throw new ServiceLoadingFailureException(
-          "There is no ClientProtocolMessageHandler implementation found in JVM");
-    }
+    protocolHandler = new MessageHandlerFactory().makeMessageHandler();
 
-    protobufProtocolHandler = iterator.next();
-    return protobufProtocolHandler;
+    return protocolHandler;
   }
 
   private StreamAuthenticator findStreamAuthenticator(String implementationID) {
@@ -86,10 +78,10 @@ public class ServerConnectionFactory {
   }
 
   private ClientProtocolMessageHandler getClientProtocolMessageHandler() {
-    if (protobufProtocolHandler == null) {
+    if (protocolHandler == null) {
       initializeMessageHandler();
     }
-    return protobufProtocolHandler;
+    return protocolHandler;
   }
 
   public ServerConnection makeServerConnection(Socket s, InternalCache c, CachedRegionHelper helper,

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactory.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactory.java
new file mode 100644
index 0000000..991ed75
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactory.java
@@ -0,0 +1,39 @@
+package org.apache.geode.internal.cache.tier.sockets;
+
+import java.net.InetAddress;
+import java.util.Properties;
+
+import org.apache.geode.distributed.internal.DistributionConfigImpl;
+import org.apache.geode.distributed.internal.InternalLocator;
+import org.apache.geode.distributed.internal.PoolStatHelper;
+import org.apache.geode.distributed.internal.tcpserver.TcpHandler;
+import org.apache.geode.distributed.internal.tcpserver.TcpServer;
+
+public class TcpServerFactory {
+  private ClientProtocolMessageHandler protocolHandler;
+
+  public TcpServerFactory() {
+    initializeMessageHandler();
+  }
+
+  public TcpServer makeTcpServer(int port, InetAddress bind_address, Properties sslConfig,
+      DistributionConfigImpl cfg, TcpHandler handler, PoolStatHelper poolHelper,
+      ThreadGroup threadGroup, String threadName, InternalLocator internalLocator) {
+
+    return new TcpServer(port, bind_address, sslConfig, cfg, handler, poolHelper, threadGroup,
+        threadName, internalLocator, protocolHandler);
+  }
+
+  public synchronized ClientProtocolMessageHandler initializeMessageHandler() {
+    if (!Boolean.getBoolean("geode.feature-protobuf-protocol")) {
+      return null;
+    }
+    if (protocolHandler != null) {
+      return protocolHandler;
+    }
+
+    protocolHandler = new MessageHandlerFactory().makeMessageHandler();
+
+    return protocolHandler;
+  }
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-core/src/test/java/org/apache/geode/cache/client/internal/AutoConnectionSourceImplJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache/client/internal/AutoConnectionSourceImplJUnitTest.java b/geode-core/src/test/java/org/apache/geode/cache/client/internal/AutoConnectionSourceImplJUnitTest.java
index 802620c..e57ca83 100644
--- a/geode-core/src/test/java/org/apache/geode/cache/client/internal/AutoConnectionSourceImplJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/cache/client/internal/AutoConnectionSourceImplJUnitTest.java
@@ -35,6 +35,7 @@ import org.apache.geode.distributed.internal.tcpserver.TcpServer;
 import org.apache.geode.internal.AvailablePortHelper;
 import org.apache.geode.internal.cache.PoolStats;
 import org.apache.geode.internal.cache.tier.InternalClientMembership;
+import org.apache.geode.internal.cache.tier.sockets.TcpServerFactory;
 import org.apache.geode.management.membership.ClientMembershipEvent;
 import org.apache.geode.management.membership.ClientMembershipListener;
 import org.apache.geode.test.junit.categories.ClientServerTest;
@@ -303,8 +304,9 @@ public class AutoConnectionSourceImplJUnitTest {
   public void test_DiscoverLocators_whenOneLocatorWasShutdown() throws Exception {
     startFakeLocator();
     int secondPort = AvailablePortHelper.getRandomAvailableTCPPort();
-    TcpServer server2 = new TcpServer(secondPort, InetAddress.getLocalHost(), null, null, handler,
-        new FakeHelper(), Thread.currentThread().getThreadGroup(), "tcp server", null);
+    TcpServer server2 =
+        new TcpServerFactory().makeTcpServer(secondPort, InetAddress.getLocalHost(), null, null,
+            handler, new FakeHelper(), Thread.currentThread().getThreadGroup(), "tcp server", null);
     server2.start();
 
     try {
@@ -387,8 +389,8 @@ public class AutoConnectionSourceImplJUnitTest {
   }
 
   private void startFakeLocator() throws UnknownHostException, IOException, InterruptedException {
-    server = new TcpServer(port, InetAddress.getLocalHost(), null, null, handler, new FakeHelper(),
-        Thread.currentThread().getThreadGroup(), "Tcp Server", null);
+    server = new TcpServerFactory().makeTcpServer(port, InetAddress.getLocalHost(), null, null,
+        handler, new FakeHelper(), Thread.currentThread().getThreadGroup(), "Tcp Server", null);
     server.start();
     Thread.sleep(500);
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-core/src/test/java/org/apache/geode/distributed/internal/tcpserver/TCPServerSSLJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/distributed/internal/tcpserver/TCPServerSSLJUnitTest.java b/geode-core/src/test/java/org/apache/geode/distributed/internal/tcpserver/TCPServerSSLJUnitTest.java
index 229fbb9..c58eb31 100644
--- a/geode-core/src/test/java/org/apache/geode/distributed/internal/tcpserver/TCPServerSSLJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/distributed/internal/tcpserver/TCPServerSSLJUnitTest.java
@@ -138,7 +138,8 @@ public class TCPServerSSLJUnitTest {
     public DummyTcpServer(int port, InetAddress bind_address, Properties sslConfig,
         DistributionConfigImpl cfg, TcpHandler handler, PoolStatHelper poolHelper,
         ThreadGroup threadGroup, String threadName) {
-      super(port, bind_address, sslConfig, cfg, handler, poolHelper, threadGroup, threadName, null);
+      super(port, bind_address, sslConfig, cfg, handler, poolHelper, threadGroup, threadName, null,
+          null);
       if (cfg == null) {
         cfg = new DistributionConfigImpl(sslConfig);
       }

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-core/src/test/java/org/apache/geode/distributed/internal/tcpserver/TcpServerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/distributed/internal/tcpserver/TcpServerJUnitTest.java b/geode-core/src/test/java/org/apache/geode/distributed/internal/tcpserver/TcpServerJUnitTest.java
index 9d20e8c..d02051f 100644
--- a/geode-core/src/test/java/org/apache/geode/distributed/internal/tcpserver/TcpServerJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/distributed/internal/tcpserver/TcpServerJUnitTest.java
@@ -26,6 +26,7 @@ import org.apache.geode.distributed.internal.ClusterConfigurationService;
 import org.apache.geode.distributed.internal.DistributionConfigImpl;
 import org.apache.geode.distributed.internal.PoolStatHelper;
 import org.apache.geode.internal.AvailablePort;
+import org.apache.geode.internal.cache.tier.sockets.TcpServerFactory;
 import org.apache.geode.internal.net.SocketCreatorFactory;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 import org.apache.geode.test.junit.categories.MembershipTest;
@@ -68,8 +69,8 @@ public class TcpServerJUnitTest {
     port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
 
     stats = new SimpleStats();
-    server = new TcpServer(port, localhost, new Properties(), null, handler, stats,
-        Thread.currentThread().getThreadGroup(), "server thread", null);
+    server = new TcpServerFactory().makeTcpServer(port, localhost, new Properties(), null, handler,
+        stats, Thread.currentThread().getThreadGroup(), "server thread", null);
     server.start();
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactoryTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactoryTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactoryTest.java
new file mode 100644
index 0000000..7d40d01
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactoryTest.java
@@ -0,0 +1,19 @@
+package org.apache.geode.internal.cache.tier.sockets;
+
+import static org.junit.Assert.*;
+
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.geode.distributed.internal.tcpserver.TcpServer;
+import org.apache.geode.test.junit.categories.UnitTest;
+
+@Category(UnitTest.class)
+public class TcpServerFactoryTest {
+  @Test
+  public void createsATcpServer() {
+    TcpServerFactory factory = new TcpServerFactory();
+    TcpServer server = factory.makeTcpServer(80, null, null, null, null, null, null, null, null);
+    assertTrue(server != null);
+  }
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-core/src/test/java/org/apache/geode/test/dunit/standalone/DUnitLauncher.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/standalone/DUnitLauncher.java b/geode-core/src/test/java/org/apache/geode/test/dunit/standalone/DUnitLauncher.java
index b35270e..fd88abf 100644
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/standalone/DUnitLauncher.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/standalone/DUnitLauncher.java
@@ -297,6 +297,8 @@ public class DUnitLauncher {
         // able to do so successfully anyway
         p.setProperty(DISABLE_AUTO_RECONNECT, "true");
 
+        System.setProperty("geode.feature-protobuf-protocol", "true");
+
         try {
           Locator.startLocatorAndDS(0, locatorLogFile, p);
           InternalLocator internalLocator = (InternalLocator) Locator.getLocator();

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/main/java/org/apache/geode/protocol/operations/OperationHandler.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/operations/OperationHandler.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/operations/OperationHandler.java
index 5d9012f..ca3548b 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/operations/OperationHandler.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/operations/OperationHandler.java
@@ -15,7 +15,7 @@
 package org.apache.geode.protocol.operations;
 
 import org.apache.geode.annotations.Experimental;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.protobuf.ProtobufOpsProcessor;
 import org.apache.geode.protocol.protobuf.Result;
@@ -34,6 +34,6 @@ public interface OperationHandler<Req, Resp> {
    * indicated on the provided cache, and return a response.
    */
   Result<Resp> process(SerializationService serializationService, Req request,
-      ExecutionContext executionContext) throws InvalidExecutionContextException;
+      MessageExecutionContext executionContext) throws InvalidExecutionContextException;
 }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtobufOpsProcessor.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtobufOpsProcessor.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtobufOpsProcessor.java
index 76f81e7..3619e0d 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtobufOpsProcessor.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtobufOpsProcessor.java
@@ -15,7 +15,7 @@
 package org.apache.geode.protocol.protobuf;
 
 import org.apache.geode.annotations.Experimental;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.protobuf.registry.OperationContextRegistry;
 import org.apache.geode.protocol.protobuf.utilities.ProtobufResponseUtilities;
@@ -37,7 +37,8 @@ public class ProtobufOpsProcessor {
     this.operationContextRegistry = operationContextRegistry;
   }
 
-  public ClientProtocol.Response process(ClientProtocol.Request request, ExecutionContext context) {
+  public ClientProtocol.Response process(ClientProtocol.Request request,
+      MessageExecutionContext context) {
     ClientProtocol.Request.RequestAPICase requestType = request.getRequestAPICase();
     OperationContext operationContext = operationContextRegistry.getOperationContext(requestType);
     ClientProtocol.Response.Builder builder;

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtobufStreamProcessor.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtobufStreamProcessor.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtobufStreamProcessor.java
index d04e49e..accb899 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtobufStreamProcessor.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/ProtobufStreamProcessor.java
@@ -21,7 +21,7 @@ import java.io.OutputStream;
 
 import org.apache.geode.annotations.Experimental;
 import org.apache.geode.internal.cache.tier.sockets.ClientProtocolMessageHandler;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.protocol.exception.InvalidProtocolMessageException;
 import org.apache.geode.protocol.protobuf.registry.OperationContextRegistry;
 import org.apache.geode.protocol.protobuf.serializer.ProtobufProtocolSerializer;
@@ -46,7 +46,7 @@ public class ProtobufStreamProcessor implements ClientProtocolMessageHandler {
 
   @Override
   public void receiveMessage(InputStream inputStream, OutputStream outputStream,
-      ExecutionContext executionContext) throws IOException {
+      MessageExecutionContext executionContext) throws IOException {
     try {
       processOneMessage(inputStream, outputStream, executionContext);
     } catch (InvalidProtocolMessageException e) {
@@ -55,7 +55,8 @@ public class ProtobufStreamProcessor implements ClientProtocolMessageHandler {
   }
 
   private void processOneMessage(InputStream inputStream, OutputStream outputStream,
-      ExecutionContext executionContext) throws InvalidProtocolMessageException, IOException {
+      MessageExecutionContext executionContext)
+      throws InvalidProtocolMessageException, IOException {
     ClientProtocol.Message message = protobufProtocolSerializer.deserialize(inputStream);
     if (message == null) {
       throw new EOFException("Tried to deserialize protobuf message at EOF");

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandler.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandler.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandler.java
index 75274c1..77cef67 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandler.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandler.java
@@ -20,7 +20,7 @@ import java.util.Set;
 
 import org.apache.geode.annotations.Experimental;
 import org.apache.geode.cache.Region;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.operations.OperationHandler;
 import org.apache.geode.protocol.protobuf.BasicTypes;
@@ -41,7 +41,7 @@ public class GetAllRequestOperationHandler
 
   @Override
   public Result<RegionAPI.GetAllResponse> process(SerializationService serializationService,
-      RegionAPI.GetAllRequest request, ExecutionContext executionContext)
+      RegionAPI.GetAllRequest request, MessageExecutionContext executionContext)
       throws InvalidExecutionContextException {
     String regionName = request.getRegionName();
     Region region = executionContext.getCache().getRegion(regionName);

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetAvailableServersOperationHandler.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetAvailableServersOperationHandler.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetAvailableServersOperationHandler.java
index e7c18cd..c1c3e99 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetAvailableServersOperationHandler.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetAvailableServersOperationHandler.java
@@ -21,7 +21,7 @@ import java.util.stream.Collectors;
 import org.apache.geode.annotations.Experimental;
 import org.apache.geode.distributed.internal.InternalLocator;
 import org.apache.geode.distributed.internal.ServerLocation;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.operations.OperationHandler;
 import org.apache.geode.protocol.protobuf.BasicTypes;
@@ -37,13 +37,17 @@ public class GetAvailableServersOperationHandler implements
   @Override
   public Result<ServerAPI.GetAvailableServersResponse> process(
       SerializationService serializationService, ServerAPI.GetAvailableServersRequest request,
-      ExecutionContext executionContext) throws InvalidExecutionContextException {
+      MessageExecutionContext executionContext) throws InvalidExecutionContextException {
 
     InternalLocator locator = executionContext.getLocator();
-    ArrayList servers2 = locator.getServerLocatorAdvisee().getLoadSnapshot().getServers(null);
+    ArrayList serversFromSnapshot =
+        locator.getServerLocatorAdvisee().getLoadSnapshot().getServers(null);
+    if (serversFromSnapshot == null) {
+      serversFromSnapshot = new ArrayList();
+    }
 
-    Collection<BasicTypes.Server> servers = (Collection<BasicTypes.Server>) servers2.stream()
-        .map(serverLocation -> getServerProtobufMessage((ServerLocation) serverLocation))
+    Collection<BasicTypes.Server> servers = (Collection<BasicTypes.Server>) serversFromSnapshot
+        .stream().map(serverLocation -> getServerProtobufMessage((ServerLocation) serverLocation))
         .collect(Collectors.toList());
     ServerAPI.GetAvailableServersResponse.Builder builder =
         ServerAPI.GetAvailableServersResponse.newBuilder().addAllServers(servers);

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRegionNamesRequestOperationHandler.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRegionNamesRequestOperationHandler.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRegionNamesRequestOperationHandler.java
index 53898ed..e2edfed 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRegionNamesRequestOperationHandler.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRegionNamesRequestOperationHandler.java
@@ -18,7 +18,7 @@ import java.util.Set;
 
 import org.apache.geode.annotations.Experimental;
 import org.apache.geode.cache.Region;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.operations.OperationHandler;
 import org.apache.geode.protocol.protobuf.RegionAPI;
@@ -33,7 +33,7 @@ public class GetRegionNamesRequestOperationHandler
 
   @Override
   public Result<RegionAPI.GetRegionNamesResponse> process(SerializationService serializationService,
-      RegionAPI.GetRegionNamesRequest request, ExecutionContext executionContext)
+      RegionAPI.GetRegionNamesRequest request, MessageExecutionContext executionContext)
       throws InvalidExecutionContextException {
     Set<Region<?, ?>> regions = executionContext.getCache().rootRegions();
     return Success.of(ProtobufResponseUtilities.createGetRegionNamesResponse(regions));

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRegionRequestOperationHandler.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRegionRequestOperationHandler.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRegionRequestOperationHandler.java
index 007f96b..5a8d4d3 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRegionRequestOperationHandler.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRegionRequestOperationHandler.java
@@ -16,7 +16,7 @@ package org.apache.geode.protocol.protobuf.operations;
 
 import org.apache.geode.annotations.Experimental;
 import org.apache.geode.cache.Region;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.operations.OperationHandler;
 import org.apache.geode.protocol.protobuf.BasicTypes;
@@ -35,7 +35,7 @@ public class GetRegionRequestOperationHandler
 
   @Override
   public Result<RegionAPI.GetRegionResponse> process(SerializationService serializationService,
-      RegionAPI.GetRegionRequest request, ExecutionContext executionContext)
+      RegionAPI.GetRegionRequest request, MessageExecutionContext executionContext)
       throws InvalidExecutionContextException {
     String regionName = request.getRegionName();
 

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandler.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandler.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandler.java
index 8f0fef7..504189e 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandler.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandler.java
@@ -16,7 +16,7 @@ package org.apache.geode.protocol.protobuf.operations;
 
 import org.apache.geode.annotations.Experimental;
 import org.apache.geode.cache.Region;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.operations.OperationHandler;
 import org.apache.geode.protocol.protobuf.BasicTypes;
@@ -37,7 +37,7 @@ public class GetRequestOperationHandler
 
   @Override
   public Result<RegionAPI.GetResponse> process(SerializationService serializationService,
-      RegionAPI.GetRequest request, ExecutionContext executionContext)
+      RegionAPI.GetRequest request, MessageExecutionContext executionContext)
       throws InvalidExecutionContextException {
     String regionName = request.getRegionName();
     Region region = executionContext.getCache().getRegion(regionName);

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/PutAllRequestOperationHandler.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/PutAllRequestOperationHandler.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/PutAllRequestOperationHandler.java
index e0ebc41..99c7766 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/PutAllRequestOperationHandler.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/PutAllRequestOperationHandler.java
@@ -22,7 +22,7 @@ import org.apache.logging.log4j.Logger;
 
 import org.apache.geode.annotations.Experimental;
 import org.apache.geode.cache.Region;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.operations.OperationHandler;
 import org.apache.geode.protocol.protobuf.BasicTypes;
@@ -44,7 +44,7 @@ public class PutAllRequestOperationHandler
 
   @Override
   public Result<RegionAPI.PutAllResponse> process(SerializationService serializationService,
-      RegionAPI.PutAllRequest putAllRequest, ExecutionContext executionContext)
+      RegionAPI.PutAllRequest putAllRequest, MessageExecutionContext executionContext)
       throws InvalidExecutionContextException {
     Region region = executionContext.getCache().getRegion(putAllRequest.getRegionName());
 

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/PutRequestOperationHandler.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/PutRequestOperationHandler.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/PutRequestOperationHandler.java
index cf5afb4..e94127b 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/PutRequestOperationHandler.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/PutRequestOperationHandler.java
@@ -16,7 +16,7 @@ package org.apache.geode.protocol.protobuf.operations;
 
 import org.apache.geode.annotations.Experimental;
 import org.apache.geode.cache.Region;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.operations.OperationHandler;
 import org.apache.geode.protocol.protobuf.BasicTypes;
@@ -37,7 +37,7 @@ public class PutRequestOperationHandler
 
   @Override
   public Result<RegionAPI.PutResponse> process(SerializationService serializationService,
-      RegionAPI.PutRequest request, ExecutionContext executionContext)
+      RegionAPI.PutRequest request, MessageExecutionContext executionContext)
       throws InvalidExecutionContextException {
     String regionName = request.getRegionName();
     Region region = executionContext.getCache().getRegion(regionName);

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandler.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandler.java b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandler.java
index 052efcf..94e3504 100644
--- a/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandler.java
+++ b/geode-protobuf/src/main/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandler.java
@@ -19,7 +19,7 @@ import org.apache.logging.log4j.Logger;
 
 import org.apache.geode.annotations.Experimental;
 import org.apache.geode.cache.Region;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.operations.OperationHandler;
 import org.apache.geode.protocol.protobuf.Failure;
@@ -40,7 +40,7 @@ public class RemoveRequestOperationHandler
 
   @Override
   public Result<RegionAPI.RemoveResponse> process(SerializationService serializationService,
-      RegionAPI.RemoveRequest request, ExecutionContext executionContext)
+      RegionAPI.RemoveRequest request, MessageExecutionContext executionContext)
       throws InvalidExecutionContextException {
 
     String regionName = request.getRegionName();

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/test/java/org/apache/geode/protocol/RoundTripLocatorConnectionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/RoundTripLocatorConnectionJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/RoundTripLocatorConnectionJUnitTest.java
index 799c55c..14d8c44 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/RoundTripLocatorConnectionJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/RoundTripLocatorConnectionJUnitTest.java
@@ -15,12 +15,22 @@
 
 package org.apache.geode.protocol;
 
+import static org.apache.geode.distributed.ConfigurationProperties.DISABLE_AUTO_RECONNECT;
+import static org.apache.geode.distributed.ConfigurationProperties.ENABLE_CLUSTER_CONFIGURATION;
+import static org.apache.geode.distributed.ConfigurationProperties.ENABLE_NETWORK_PARTITION_DETECTION;
+import static org.apache.geode.distributed.ConfigurationProperties.LOCATORS;
+import static org.apache.geode.distributed.ConfigurationProperties.LOG_LEVEL;
+import static org.apache.geode.distributed.ConfigurationProperties.MAX_WAIT_TIME_RECONNECT;
+import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT;
+import static org.apache.geode.distributed.ConfigurationProperties.MEMBER_TIMEOUT;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertThat;
 
 import java.io.DataOutputStream;
+import java.io.File;
 import java.io.IOException;
 import java.net.Socket;
+import java.util.Properties;
 
 import org.junit.Before;
 import org.junit.Rule;
@@ -29,7 +39,11 @@ import org.junit.contrib.java.lang.system.RestoreSystemProperties;
 import org.junit.experimental.categories.Category;
 
 import org.apache.geode.cache.server.CacheServer;
+import org.apache.geode.distributed.Locator;
+import org.apache.geode.distributed.internal.InternalLocator;
+import org.apache.geode.internal.AvailablePort;
 import org.apache.geode.internal.cache.InternalCache;
+import org.apache.geode.internal.cache.tier.sockets.AcceptorImpl;
 import org.apache.geode.protocol.exception.InvalidProtocolMessageException;
 import org.apache.geode.protocol.protobuf.ClientProtocol;
 import org.apache.geode.protocol.protobuf.ProtocolErrorCode;
@@ -39,6 +53,7 @@ import org.apache.geode.protocol.protobuf.utilities.ProtobufRequestUtilities;
 import org.apache.geode.protocol.protobuf.utilities.ProtobufUtilities;
 import org.apache.geode.test.dunit.DistributedTestUtils;
 import org.apache.geode.test.dunit.Host;
+import org.apache.geode.test.dunit.LogWriterUtils;
 import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
 import org.apache.geode.test.junit.categories.DistributedTest;
@@ -48,6 +63,7 @@ public class RoundTripLocatorConnectionJUnitTest extends JUnit4CacheTestCase {
 
   private Socket socket;
   private DataOutputStream dataOutputStream;
+  private Locator locator;
 
   @Rule
   public final RestoreSystemProperties restoreSystemProperties = new RestoreSystemProperties();
@@ -63,7 +79,8 @@ public class RoundTripLocatorConnectionJUnitTest extends JUnit4CacheTestCase {
     socket = new Socket(host.getHostName(), locatorPort);
     dataOutputStream = new DataOutputStream(socket.getOutputStream());
     dataOutputStream.writeInt(0);
-    dataOutputStream.writeByte(110);
+    // Using the constant from AcceptorImpl to ensure that magic byte is the same
+    dataOutputStream.writeByte(AcceptorImpl.PROTOBUF_CLIENT_SERVER_PROTOCOL);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/ProtobufStreamProcessorTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/ProtobufStreamProcessorTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/ProtobufStreamProcessorTest.java
index 2185b15..16eb48b 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/ProtobufStreamProcessorTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/ProtobufStreamProcessorTest.java
@@ -26,7 +26,7 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.geode.internal.cache.InternalCache;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.test.junit.categories.UnitTest;
 
 @Category(UnitTest.class)
@@ -39,6 +39,6 @@ public class ProtobufStreamProcessorTest {
     ProtobufStreamProcessor protobufStreamProcessor = new ProtobufStreamProcessor();
     InternalCache mockInternalCache = mock(InternalCache.class);
     protobufStreamProcessor.receiveMessage(inputStream, outputStream,
-        new ExecutionContext(mockInternalCache));
+        new MessageExecutionContext(mockInternalCache));
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandlerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandlerJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandlerJUnitTest.java
index f4d098c..64ee50b 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandlerJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandlerJUnitTest.java
@@ -27,7 +27,7 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.geode.cache.Region;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.protobuf.BasicTypes;
 import org.apache.geode.protocol.protobuf.RegionAPI;
@@ -82,7 +82,7 @@ public class GetAllRequestOperationHandlerJUnitTest extends OperationHandlerJUni
       throws CodecAlreadyRegisteredForTypeException, UnsupportedEncodingTypeException,
       CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     Result<RegionAPI.GetAllResponse> result = operationHandler.process(serializationServiceStub,
-        generateTestRequest(true), new ExecutionContext(cacheStub));
+        generateTestRequest(true), new MessageExecutionContext(cacheStub));
 
     Assert.assertTrue(result instanceof Success);
 
@@ -102,7 +102,7 @@ public class GetAllRequestOperationHandlerJUnitTest extends OperationHandlerJUni
   public void processReturnsNoEntriesForNoKeysRequested() throws UnsupportedEncodingTypeException,
       CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     Result<RegionAPI.GetAllResponse> result = operationHandler.process(serializationServiceStub,
-        generateTestRequest(false), new ExecutionContext(cacheStub));
+        generateTestRequest(false), new MessageExecutionContext(cacheStub));
 
     Assert.assertTrue(result instanceof Success);
 

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAvailableServersOperationHandlerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAvailableServersOperationHandlerJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAvailableServersOperationHandlerJUnitTest.java
index cff6ddc..406beea 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAvailableServersOperationHandlerJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAvailableServersOperationHandlerJUnitTest.java
@@ -18,7 +18,8 @@ import org.apache.geode.distributed.internal.InternalLocator;
 import org.apache.geode.distributed.internal.LocatorLoadSnapshot;
 import org.apache.geode.distributed.internal.ServerLocation;
 import org.apache.geode.distributed.internal.ServerLocator;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.protocol.protobuf.BasicTypes;
 import org.apache.geode.protocol.protobuf.Result;
 import org.apache.geode.protocol.protobuf.ServerAPI;
@@ -31,6 +32,7 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import java.util.ArrayList;
+import java.util.HashMap;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
@@ -41,13 +43,14 @@ import static org.mockito.Mockito.when;
 @Category(UnitTest.class)
 public class GetAvailableServersOperationHandlerJUnitTest extends OperationHandlerJUnitTest {
 
-  public static final String HOSTNAME_1 = "hostname1";
-  public static final int PORT_1 = 12345;
+  private final String HOSTNAME_1 = "hostname1";
+  private final int PORT_1 = 12345;
 
-  public static final String HOSTNAME_2 = "hostname2";
-  public static final int PORT_2 = 23456;
+  private final String HOSTNAME_2 = "hostname2";
+  private final int PORT_2 = 23456;
 
   private InternalLocator internalLocatorMock;
+  private LocatorLoadSnapshot locatorLoadSnapshot;
 
   @Before
   public void setUp() throws Exception {
@@ -56,27 +59,44 @@ public class GetAvailableServersOperationHandlerJUnitTest extends OperationHandl
     operationHandler = new GetAvailableServersOperationHandler();
     internalLocatorMock = mock(InternalLocator.class);
     ServerLocator serverLocatorAdviseeMock = mock(ServerLocator.class);
-    LocatorLoadSnapshot locatorLoadSnapshot = mock(LocatorLoadSnapshot.class);
-    ArrayList<Object> serverList = new ArrayList<>();
-    serverList.add(new ServerLocation(HOSTNAME_1, PORT_1));
-    serverList.add(new ServerLocation(HOSTNAME_2, PORT_2));
+    locatorLoadSnapshot = mock(LocatorLoadSnapshot.class);
+
 
     when(internalLocatorMock.getServerLocatorAdvisee()).thenReturn(serverLocatorAdviseeMock);
     when(serverLocatorAdviseeMock.getLoadSnapshot()).thenReturn(locatorLoadSnapshot);
-    when(locatorLoadSnapshot.getServers(null)).thenReturn(serverList);
   }
 
   @Test
   public void testServerReturnedFromHandler() throws Exception {
+    ArrayList<Object> serverList = new ArrayList<>();
+    serverList.add(new ServerLocation(HOSTNAME_1, PORT_1));
+    serverList.add(new ServerLocation(HOSTNAME_2, PORT_2));
+    when(locatorLoadSnapshot.getServers(null)).thenReturn(serverList);
+
     ServerAPI.GetAvailableServersRequest getAvailableServersRequest =
         ProtobufRequestUtilities.createGetAvailableServersRequest();
     Result operationHandlerResult = operationHandler.process(serializationServiceStub,
-        getAvailableServersRequest, new ExecutionContext(internalLocatorMock));
+        getAvailableServersRequest, new MessageExecutionContext(internalLocatorMock));
     assertTrue(operationHandlerResult instanceof Success);
     ValidateGetAvailableServersResponse(
         (GetAvailableServersResponse) operationHandlerResult.getMessage());
   }
 
+  @Test
+  public void testWhenServersFromSnapshotAreNullReturnsEmtpy()
+      throws InvalidExecutionContextException {
+    when(locatorLoadSnapshot.getServers(any())).thenReturn(null);
+
+    ServerAPI.GetAvailableServersRequest getAvailableServersRequest =
+        ProtobufRequestUtilities.createGetAvailableServersRequest();
+    Result operationHandlerResult = operationHandler.process(serializationServiceStub,
+        getAvailableServersRequest, new MessageExecutionContext(internalLocatorMock));
+    assertTrue(operationHandlerResult instanceof Success);
+    GetAvailableServersResponse availableServersResponse =
+        (GetAvailableServersResponse) operationHandlerResult.getMessage();
+    assertEquals(0, availableServersResponse.getServersCount());
+  }
+
   private void ValidateGetAvailableServersResponse(
       GetAvailableServersResponse getAvailableServersResponse) {
     assertEquals(2, getAvailableServersResponse.getServersCount());

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRegionNamesRequestOperationHandlerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRegionNamesRequestOperationHandlerJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRegionNamesRequestOperationHandlerJUnitTest.java
index fd84d41..2fcf575 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRegionNamesRequestOperationHandlerJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRegionNamesRequestOperationHandlerJUnitTest.java
@@ -27,7 +27,7 @@ import org.junit.experimental.categories.Category;
 
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.Region;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.protobuf.RegionAPI;
 import org.apache.geode.protocol.protobuf.Result;
@@ -67,7 +67,7 @@ public class GetRegionNamesRequestOperationHandlerJUnitTest extends OperationHan
       CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     Result<RegionAPI.GetRegionNamesResponse> result = operationHandler.process(
         serializationServiceStub, ProtobufRequestUtilities.createGetRegionNamesRequest(),
-        new ExecutionContext(cacheStub));
+        new MessageExecutionContext(cacheStub));
     Assert.assertTrue(result instanceof Success);
 
     RegionAPI.GetRegionNamesResponse getRegionsResponse = result.getMessage();
@@ -93,7 +93,7 @@ public class GetRegionNamesRequestOperationHandlerJUnitTest extends OperationHan
         .thenReturn(Collections.unmodifiableSet(new HashSet<Region<String, String>>()));
     Result<RegionAPI.GetRegionNamesResponse> result = operationHandler.process(
         serializationServiceStub, ProtobufRequestUtilities.createGetRegionNamesRequest(),
-        new ExecutionContext(emptyCache));
+        new MessageExecutionContext(emptyCache));
     Assert.assertTrue(result instanceof Success);
 
     RegionAPI.GetRegionNamesResponse getRegionsResponse = result.getMessage();

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRegionRequestOperationHandlerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRegionRequestOperationHandlerJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRegionRequestOperationHandlerJUnitTest.java
index 6762f66..60d4985 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRegionRequestOperationHandlerJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRegionRequestOperationHandlerJUnitTest.java
@@ -19,7 +19,7 @@ import org.apache.geode.cache.DataPolicy;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionAttributes;
 import org.apache.geode.cache.Scope;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.MessageUtil;
 import org.apache.geode.protocol.protobuf.BasicTypes;
@@ -75,7 +75,7 @@ public class GetRegionRequestOperationHandlerJUnitTest extends OperationHandlerJ
 
 
     Result<RegionAPI.GetRegionResponse> result = operationHandler.process(serializationServiceStub,
-        MessageUtil.makeGetRegionRequest(TEST_REGION1), new ExecutionContext(cacheStub));
+        MessageUtil.makeGetRegionRequest(TEST_REGION1), new MessageExecutionContext(cacheStub));
     RegionAPI.GetRegionResponse response = result.getMessage();
     BasicTypes.Region region = response.getRegion();
     Assert.assertEquals(TEST_REGION1, region.getName());
@@ -100,7 +100,8 @@ public class GetRegionRequestOperationHandlerJUnitTest extends OperationHandlerJ
         .thenReturn(Collections.unmodifiableSet(new HashSet<Region<String, String>>()));
     String unknownRegionName = "UNKNOWN_REGION";
     Result<RegionAPI.GetRegionResponse> result = operationHandler.process(serializationServiceStub,
-        MessageUtil.makeGetRegionRequest(unknownRegionName), new ExecutionContext(emptyCache));
+        MessageUtil.makeGetRegionRequest(unknownRegionName),
+        new MessageExecutionContext(emptyCache));
     Assert.assertTrue(result instanceof Failure);
     Assert.assertEquals(ProtocolErrorCode.REGION_NOT_FOUND.codeValue,
         result.getErrorMessage().getError().getErrorCode());

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandlerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandlerJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandlerJUnitTest.java
index af35f6b..6885666 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandlerJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandlerJUnitTest.java
@@ -16,7 +16,7 @@ package org.apache.geode.protocol.protobuf.operations;
 
 import com.google.protobuf.ByteString;
 import org.apache.geode.cache.Region;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.protobuf.BasicTypes;
 import org.apache.geode.protocol.protobuf.Failure;
@@ -75,7 +75,7 @@ public class GetRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
       CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     RegionAPI.GetRequest getRequest = generateTestRequest(false, false, false);
     Result<RegionAPI.GetResponse> result = operationHandler.process(serializationServiceStub,
-        getRequest, new ExecutionContext(cacheStub));
+        getRequest, new MessageExecutionContext(cacheStub));
 
     Assert.assertTrue(result instanceof Success);
     Assert.assertEquals(BasicTypes.EncodedValue.ValueCase.STRINGRESULT,
@@ -90,7 +90,7 @@ public class GetRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
       CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     RegionAPI.GetRequest getRequest = generateTestRequest(true, false, false);
     Result<RegionAPI.GetResponse> response = operationHandler.process(serializationServiceStub,
-        getRequest, new ExecutionContext(cacheStub));
+        getRequest, new MessageExecutionContext(cacheStub));
 
     Assert.assertTrue(response instanceof Failure);
     Assert.assertEquals(ProtocolErrorCode.REGION_NOT_FOUND.codeValue,
@@ -103,7 +103,7 @@ public class GetRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
       CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     RegionAPI.GetRequest getRequest = generateTestRequest(false, true, false);
     Result<RegionAPI.GetResponse> response = operationHandler.process(serializationServiceStub,
-        getRequest, new ExecutionContext(cacheStub));
+        getRequest, new MessageExecutionContext(cacheStub));
 
     Assert.assertTrue(response instanceof Success);
   }
@@ -114,7 +114,7 @@ public class GetRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
       CodecNotRegisteredForTypeException, InvalidExecutionContextException {
     RegionAPI.GetRequest getRequest = generateTestRequest(false, false, true);
     Result<RegionAPI.GetResponse> response = operationHandler.process(serializationServiceStub,
-        getRequest, new ExecutionContext(cacheStub));
+        getRequest, new MessageExecutionContext(cacheStub));
 
     Assert.assertTrue(response instanceof Success);
   }
@@ -136,7 +136,7 @@ public class GetRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
     RegionAPI.GetRequest getRequest =
         ProtobufRequestUtilities.createGetRequest(TEST_REGION, encodedKey).getGetRequest();
     Result<RegionAPI.GetResponse> response = operationHandler.process(serializationServiceStub,
-        getRequest, new ExecutionContext(cacheStub));
+        getRequest, new MessageExecutionContext(cacheStub));
 
     Assert.assertTrue(response instanceof Failure);
     Assert.assertEquals(ProtocolErrorCode.VALUE_ENCODING_ERROR.codeValue,

http://git-wip-us.apache.org/repos/asf/geode/blob/0eb320fa/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/PutAllRequestOperationHandlerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/PutAllRequestOperationHandlerJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/PutAllRequestOperationHandlerJUnitTest.java
index d3fff49..955013f 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/PutAllRequestOperationHandlerJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/PutAllRequestOperationHandlerJUnitTest.java
@@ -15,7 +15,7 @@
 package org.apache.geode.protocol.protobuf.operations;
 
 import org.apache.geode.cache.Region;
-import org.apache.geode.internal.cache.tier.sockets.ExecutionContext;
+import org.apache.geode.internal.cache.tier.sockets.MessageExecutionContext;
 import org.apache.geode.internal.cache.tier.sockets.InvalidExecutionContextException;
 import org.apache.geode.protocol.protobuf.BasicTypes;
 import org.apache.geode.protocol.protobuf.RegionAPI;
@@ -74,7 +74,7 @@ public class PutAllRequestOperationHandlerJUnitTest extends OperationHandlerJUni
     PutAllRequestOperationHandler operationHandler = new PutAllRequestOperationHandler();
 
     Result<RegionAPI.PutAllResponse> result = operationHandler.process(serializationServiceStub,
-        generateTestRequest(false, true), new ExecutionContext(cacheStub));
+        generateTestRequest(false, true), new MessageExecutionContext(cacheStub));
 
     Assert.assertTrue(result instanceof Success);
 
@@ -88,7 +88,7 @@ public class PutAllRequestOperationHandlerJUnitTest extends OperationHandlerJUni
     PutAllRequestOperationHandler operationHandler = new PutAllRequestOperationHandler();
 
     Result<RegionAPI.PutAllResponse> result = operationHandler.process(serializationServiceStub,
-        generateTestRequest(true, true), new ExecutionContext(cacheStub));
+        generateTestRequest(true, true), new MessageExecutionContext(cacheStub));
 
     assertTrue(result instanceof Success);
     verify(regionMock).put(TEST_KEY1, TEST_VALUE1);
@@ -107,7 +107,7 @@ public class PutAllRequestOperationHandlerJUnitTest extends OperationHandlerJUni
     PutAllRequestOperationHandler operationHandler = new PutAllRequestOperationHandler();
 
     Result<RegionAPI.PutAllResponse> result = operationHandler.process(serializationServiceStub,
-        generateTestRequest(false, false), new ExecutionContext(cacheStub));
+        generateTestRequest(false, false), new MessageExecutionContext(cacheStub));
 
     assertTrue(result instanceof Success);
 


[16/25] geode git commit: GEODE-3461: increase test timeouts

Posted by ud...@apache.org.
GEODE-3461: increase test timeouts

Also fixes:
* GEODE-3505

This closes #729


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/6f7667d9
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/6f7667d9
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/6f7667d9

Branch: refs/heads/feature/GEODE-3503
Commit: 6f7667d90e56d4dec6f2670a1c46e40b4e354026
Parents: afded2a
Author: Kirk Lund <kl...@apache.org>
Authored: Mon Aug 21 15:38:45 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Tue Aug 22 15:37:36 2017 -0700

----------------------------------------------------------------------
 .../process/AbstractProcessStreamReaderIntegrationTest.java  | 8 +++-----
 1 file changed, 3 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/6f7667d9/geode-core/src/test/java/org/apache/geode/internal/process/AbstractProcessStreamReaderIntegrationTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/process/AbstractProcessStreamReaderIntegrationTest.java b/geode-core/src/test/java/org/apache/geode/internal/process/AbstractProcessStreamReaderIntegrationTest.java
index 18feea7..7f4d60a 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/process/AbstractProcessStreamReaderIntegrationTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/process/AbstractProcessStreamReaderIntegrationTest.java
@@ -15,6 +15,7 @@
 package org.apache.geode.internal.process;
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.MINUTES;
 import static org.apache.commons.lang.SystemUtils.LINE_SEPARATOR;
 import static org.apache.geode.internal.process.ProcessUtils.isProcessAlive;
 import static org.assertj.core.api.Assertions.assertThat;
@@ -40,7 +41,7 @@ import org.apache.geode.internal.util.StopWatch;
 public abstract class AbstractProcessStreamReaderIntegrationTest {
 
   /** Timeout to join to a running ProcessStreamReader thread */
-  private static final int READER_JOIN_TIMEOUT_MILLIS = 20 * 1000;
+  private static final int READER_JOIN_TIMEOUT_MILLIS = 2 * 60 * 1000;
 
   /** Sleep timeout for {@link ProcessSleeps} instead of sleeping Long.MAX_VALUE */
   private static final int PROCESS_FAIL_SAFE_TIMEOUT_MILLIS = 10 * 60 * 1000;
@@ -48,9 +49,6 @@ public abstract class AbstractProcessStreamReaderIntegrationTest {
   /** Additional time for launched processes to live before terminating */
   private static final int PROCESS_TIME_TO_LIVE_MILLIS = 3 * 500;
 
-  /** Timeout to wait for a new {@link ProcessStreamReader} to be running */
-  private static final int WAIT_FOR_READER_IS_RUNNING_TIMEOUT_MILLIS = 20 * 1000;
-
   protected Process process;
   protected ProcessStreamReader stderr;
   protected ProcessStreamReader stdout;
@@ -149,7 +147,7 @@ public abstract class AbstractProcessStreamReaderIntegrationTest {
   }
 
   protected ConditionFactory await() {
-    return Awaitility.await().atMost(WAIT_FOR_READER_IS_RUNNING_TIMEOUT_MILLIS, MILLISECONDS);
+    return Awaitility.await().atMost(2, MINUTES);
   }
 
   protected static String[] createCommandLine(final Class<?> clazz) {


[06/25] geode git commit: GEODE-3406: Address more PR feedback

Posted by ud...@apache.org.
GEODE-3406: Address more PR feedback

Signed-off-by: Alexander Murmann <am...@pivotal.io>


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/0cc60434
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/0cc60434
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/0cc60434

Branch: refs/heads/feature/GEODE-3503
Commit: 0cc60434cd75814e755b88be39af53b32d53faeb
Parents: a4fc1dd
Author: Hitesh Khamesra <hk...@pivotal.io>
Authored: Fri Aug 18 15:10:47 2017 -0700
Committer: Hitesh Khamesra <hk...@pivotal.io>
Committed: Tue Aug 22 10:55:48 2017 -0700

----------------------------------------------------------------------
 .../internal/tcpserver/TcpServer.java           | 18 +++---
 .../ClientProtoclMessageHandlerLoader.java      | 64 --------------------
 .../tier/sockets/MessageHandlerFactory.java     | 15 +++++
 .../cache/tier/sockets/TcpServerFactory.java    | 15 +++++
 .../tier/sockets/TcpServerFactoryTest.java      | 15 +++++
 5 files changed, 53 insertions(+), 74 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/0cc60434/geode-core/src/main/java/org/apache/geode/distributed/internal/tcpserver/TcpServer.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/tcpserver/TcpServer.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/tcpserver/TcpServer.java
index d471062..83f87ee 100755
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/tcpserver/TcpServer.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/tcpserver/TcpServer.java
@@ -381,16 +381,11 @@ public class TcpServer {
           } else {
             rejectUnknownProtocolConnection(socket, gossipVersion);
           }
-        } else {
-          if (gossipVersion <= getCurrentGossipVersion()
-              && GOSSIP_TO_GEMFIRE_VERSION_MAP.containsKey(gossipVersion)) {
-            // Create a versioned stream to remember sender's GemFire version
-            versionOrdinal = (short) GOSSIP_TO_GEMFIRE_VERSION_MAP.get(gossipVersion);
-          } else {
-            // Close the socket. We can not accept requests from a newer version
-            rejectUnknownProtocolConnection(socket, gossipVersion);
-            return;
-          }
+        } else if (gossipVersion <= getCurrentGossipVersion()
+            && GOSSIP_TO_GEMFIRE_VERSION_MAP.containsKey(gossipVersion)) {
+          // Create a versioned stream to remember sender's GemFire version
+          versionOrdinal = (short) GOSSIP_TO_GEMFIRE_VERSION_MAP.get(gossipVersion);
+
           if (Version.GFE_71.compareTo(versionOrdinal) <= 0) {
             // Recent versions of TcpClient will send the version ordinal
             versionOrdinal = input.readShort();
@@ -434,6 +429,9 @@ public class TcpServer {
           }
 
           handler.endResponse(request, startTime);
+        } else {
+          // Close the socket. We can not accept requests from a newer version
+          rejectUnknownProtocolConnection(socket, gossipVersion);
         }
       } catch (EOFException ignore) {
         // client went away - ignore

http://git-wip-us.apache.org/repos/asf/geode/blob/0cc60434/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtoclMessageHandlerLoader.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtoclMessageHandlerLoader.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtoclMessageHandlerLoader.java
deleted file mode 100644
index 6654757..0000000
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ClientProtoclMessageHandlerLoader.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.geode.internal.cache.tier.sockets;
-
-import java.io.IOException;
-import java.net.Socket;
-import java.util.Iterator;
-import java.util.ServiceLoader;
-
-import org.apache.geode.internal.cache.InternalCache;
-import org.apache.geode.internal.cache.tier.Acceptor;
-import org.apache.geode.internal.cache.tier.CachedRegionHelper;
-import org.apache.geode.internal.security.SecurityService;
-
-/**
- * Creates instances of ServerConnection based on the connection mode provided.
- */
-public class ClientProtoclMessageHandlerLoader {
-  private static ClientProtocolMessageHandler protobufProtocolHandler;
-  private static final Object protocolLoadLock = new Object();
-
-  public static ClientProtocolMessageHandler load() {
-    if (protobufProtocolHandler != null) {
-      return protobufProtocolHandler;
-    }
-
-    synchronized (protocolLoadLock) {
-      if (protobufProtocolHandler != null) {
-        return protobufProtocolHandler;
-      }
-
-      ServiceLoader<ClientProtocolMessageHandler> loader =
-          ServiceLoader.load(ClientProtocolMessageHandler.class);
-      Iterator<ClientProtocolMessageHandler> iterator = loader.iterator();
-
-      if (!iterator.hasNext()) {
-        throw new ServiceLoadingFailureException(
-            "ClientProtocolMessageHandler implementation not found in JVM");
-      }
-
-      ClientProtocolMessageHandler returnValue = iterator.next();
-
-      if (iterator.hasNext()) {
-        throw new ServiceLoadingFailureException(
-            "Multiple service implementations found for ClientProtocolMessageHandler");
-      }
-
-      return returnValue;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/0cc60434/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/MessageHandlerFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/MessageHandlerFactory.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/MessageHandlerFactory.java
index fd261d7..2aca8c2 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/MessageHandlerFactory.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/MessageHandlerFactory.java
@@ -1,3 +1,18 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
 package org.apache.geode.internal.cache.tier.sockets;
 
 import java.util.Iterator;

http://git-wip-us.apache.org/repos/asf/geode/blob/0cc60434/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactory.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactory.java
index 991ed75..9c6bd8c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactory.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactory.java
@@ -1,3 +1,18 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
 package org.apache.geode.internal.cache.tier.sockets;
 
 import java.net.InetAddress;

http://git-wip-us.apache.org/repos/asf/geode/blob/0cc60434/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactoryTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactoryTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactoryTest.java
index 7d40d01..b5c1951 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactoryTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/TcpServerFactoryTest.java
@@ -1,3 +1,18 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
 package org.apache.geode.internal.cache.tier.sockets;
 
 import static org.junit.Assert.*;


[25/25] geode git commit: GEODE-3503: Removal of Codec classes left behind. Added tests to test the remaining JSONCodec.

Posted by ud...@apache.org.
GEODE-3503: Removal of Codec classes left behind.
Added tests to test the remaining JSONCodec.


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/a182a5a9
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/a182a5a9
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/a182a5a9

Branch: refs/heads/feature/GEODE-3503
Commit: a182a5a956d8a2e299fa3fb1307a79aa7c353e9e
Parents: 039edfc
Author: Udo Kohlmeyer <uk...@pivotal.io>
Authored: Wed Aug 23 13:48:11 2017 -0700
Committer: Udo Kohlmeyer <uk...@pivotal.io>
Committed: Wed Aug 23 13:54:42 2017 -0700

----------------------------------------------------------------------
 .../geode/serialization/codec/BinaryCodec.java  |  37 ----
 .../geode/serialization/codec/BooleanCodec.java |  39 ----
 .../geode/serialization/codec/ByteCodec.java    |  39 ----
 .../geode/serialization/codec/DoubleCodec.java  |  39 ----
 .../geode/serialization/codec/FloatCodec.java   |  39 ----
 .../geode/serialization/codec/IntCodec.java     |  39 ----
 .../geode/serialization/codec/LongCodec.java    |  39 ----
 .../geode/serialization/codec/ShortCodec.java   |  39 ----
 .../geode/serialization/codec/StringCodec.java  |  41 ----
 geode-protobuf/src/main/proto/region_API.proto  |   1 +
 ...e.geode.protocol.operations.OperationHandler |   1 -
 ...geode.protocol.serializer.ProtocolSerializer |   1 -
 .../org.apache.geode.serialization.TypeCodec    |   9 -
 .../GetAllRequestOperationHandlerJUnitTest.java |   3 -
 .../GetRequestOperationHandlerJUnitTest.java    |   3 -
 .../RemoveRequestOperationHandlerJUnitTest.java |   3 -
 .../ProtobufProtocolSerializerJUnitTest.java    |  27 +--
 .../codec/BinaryFormatJUnitTest.java            |  65 ------
 .../serialization/codec/JSONCodecJUnitTest.java | 206 +++++++++++++++++++
 19 files changed, 217 insertions(+), 453 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/a182a5a9/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/BinaryCodec.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/BinaryCodec.java b/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/BinaryCodec.java
deleted file mode 100644
index cca88dd..0000000
--- a/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/BinaryCodec.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.serialization.codec;
-
-import org.apache.geode.annotations.Experimental;
-import org.apache.geode.serialization.SerializationType;
-import org.apache.geode.serialization.TypeCodec;
-
-@Experimental
-public class BinaryCodec implements TypeCodec<byte[]> {
-  @Override
-  public byte[] decode(byte[] incoming) {
-    return incoming;
-  }
-
-  @Override
-  public byte[] encode(byte[] incoming) {
-    return incoming;
-  }
-
-  @Override
-  public SerializationType getSerializationType() {
-    return SerializationType.BINARY;
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/a182a5a9/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/BooleanCodec.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/BooleanCodec.java b/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/BooleanCodec.java
deleted file mode 100644
index ca0443c..0000000
--- a/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/BooleanCodec.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.serialization.codec;
-
-import java.nio.ByteBuffer;
-
-import org.apache.geode.annotations.Experimental;
-import org.apache.geode.serialization.SerializationType;
-import org.apache.geode.serialization.TypeCodec;
-
-@Experimental
-public class BooleanCodec implements TypeCodec<Boolean> {
-  @Override
-  public Boolean decode(byte[] incoming) {
-    return ByteBuffer.wrap(incoming).get() == 1;
-  }
-
-  @Override
-  public byte[] encode(Boolean incoming) {
-    return ByteBuffer.allocate(Byte.BYTES).put(incoming ? (byte) 1 : (byte) 0).array();
-  }
-
-  @Override
-  public SerializationType getSerializationType() {
-    return SerializationType.BOOLEAN;
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/a182a5a9/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/ByteCodec.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/ByteCodec.java b/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/ByteCodec.java
deleted file mode 100644
index 847d210..0000000
--- a/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/ByteCodec.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.serialization.codec;
-
-import java.nio.ByteBuffer;
-
-import org.apache.geode.annotations.Experimental;
-import org.apache.geode.serialization.SerializationType;
-import org.apache.geode.serialization.TypeCodec;
-
-@Experimental
-public class ByteCodec implements TypeCodec<Byte> {
-  @Override
-  public Byte decode(byte[] incoming) {
-    return ByteBuffer.wrap(incoming).get();
-  }
-
-  @Override
-  public byte[] encode(Byte incoming) {
-    return ByteBuffer.allocate(Byte.BYTES).put(incoming).array();
-  }
-
-  @Override
-  public SerializationType getSerializationType() {
-    return SerializationType.BYTE;
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/a182a5a9/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/DoubleCodec.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/DoubleCodec.java b/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/DoubleCodec.java
deleted file mode 100644
index 8f01639..0000000
--- a/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/DoubleCodec.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.serialization.codec;
-
-import java.nio.ByteBuffer;
-
-import org.apache.geode.annotations.Experimental;
-import org.apache.geode.serialization.SerializationType;
-import org.apache.geode.serialization.TypeCodec;
-
-@Experimental
-public class DoubleCodec implements TypeCodec<Double> {
-  @Override
-  public Double decode(byte[] incoming) {
-    return ByteBuffer.wrap(incoming).getDouble();
-  }
-
-  @Override
-  public byte[] encode(Double incoming) {
-    return ByteBuffer.allocate(Double.BYTES).putDouble(incoming).array();
-  }
-
-  @Override
-  public SerializationType getSerializationType() {
-    return SerializationType.DOUBLE;
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/a182a5a9/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/FloatCodec.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/FloatCodec.java b/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/FloatCodec.java
deleted file mode 100644
index 75c1e0d..0000000
--- a/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/FloatCodec.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.serialization.codec;
-
-import java.nio.ByteBuffer;
-
-import org.apache.geode.annotations.Experimental;
-import org.apache.geode.serialization.SerializationType;
-import org.apache.geode.serialization.TypeCodec;
-
-@Experimental
-public class FloatCodec implements TypeCodec<Float> {
-  @Override
-  public Float decode(byte[] incoming) {
-    return ByteBuffer.wrap(incoming).getFloat();
-  }
-
-  @Override
-  public byte[] encode(Float incoming) {
-    return ByteBuffer.allocate(Float.BYTES).putFloat(incoming).array();
-  }
-
-  @Override
-  public SerializationType getSerializationType() {
-    return SerializationType.FLOAT;
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/a182a5a9/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/IntCodec.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/IntCodec.java b/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/IntCodec.java
deleted file mode 100644
index 4366c84..0000000
--- a/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/IntCodec.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.serialization.codec;
-
-import java.nio.ByteBuffer;
-
-import org.apache.geode.annotations.Experimental;
-import org.apache.geode.serialization.SerializationType;
-import org.apache.geode.serialization.TypeCodec;
-
-@Experimental
-public class IntCodec implements TypeCodec<Integer> {
-  @Override
-  public Integer decode(byte[] incoming) {
-    return ByteBuffer.wrap(incoming).getInt();
-  }
-
-  @Override
-  public byte[] encode(Integer incoming) {
-    return ByteBuffer.allocate(Integer.BYTES).putInt(incoming).array();
-  }
-
-  @Override
-  public SerializationType getSerializationType() {
-    return SerializationType.INT;
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/a182a5a9/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/LongCodec.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/LongCodec.java b/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/LongCodec.java
deleted file mode 100644
index b6b8053..0000000
--- a/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/LongCodec.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.serialization.codec;
-
-import java.nio.ByteBuffer;
-
-import org.apache.geode.annotations.Experimental;
-import org.apache.geode.serialization.SerializationType;
-import org.apache.geode.serialization.TypeCodec;
-
-@Experimental
-public class LongCodec implements TypeCodec<Long> {
-  @Override
-  public Long decode(byte[] incoming) {
-    return ByteBuffer.wrap(incoming).getLong();
-  }
-
-  @Override
-  public byte[] encode(Long incoming) {
-    return ByteBuffer.allocate(Long.BYTES).putLong(incoming).array();
-  }
-
-  @Override
-  public SerializationType getSerializationType() {
-    return SerializationType.LONG;
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/a182a5a9/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/ShortCodec.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/ShortCodec.java b/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/ShortCodec.java
deleted file mode 100644
index df79fb0..0000000
--- a/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/ShortCodec.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.serialization.codec;
-
-import java.nio.ByteBuffer;
-
-import org.apache.geode.annotations.Experimental;
-import org.apache.geode.serialization.SerializationType;
-import org.apache.geode.serialization.TypeCodec;
-
-@Experimental
-public class ShortCodec implements TypeCodec<Short> {
-  @Override
-  public Short decode(byte[] incoming) {
-    return ByteBuffer.wrap(incoming).getShort();
-  }
-
-  @Override
-  public byte[] encode(Short incoming) {
-    return ByteBuffer.allocate(Short.BYTES).putShort(incoming).array();
-  }
-
-  @Override
-  public SerializationType getSerializationType() {
-    return SerializationType.SHORT;
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/a182a5a9/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/StringCodec.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/StringCodec.java b/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/StringCodec.java
deleted file mode 100644
index 027f4ca..0000000
--- a/geode-protobuf/src/main/java/org/apache/geode/serialization/codec/StringCodec.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.serialization.codec;
-
-import java.nio.charset.Charset;
-
-import org.apache.geode.annotations.Experimental;
-import org.apache.geode.serialization.SerializationType;
-import org.apache.geode.serialization.TypeCodec;
-
-@Experimental
-public class StringCodec implements TypeCodec<String> {
-  private static final Charset UTF8 = Charset.forName("UTF-8");
-
-  @Override
-  public String decode(byte[] incoming) {
-    return new String(incoming, UTF8);
-  }
-
-  @Override
-  public byte[] encode(String incoming) {
-    return incoming.getBytes(UTF8);
-  }
-
-  @Override
-  public SerializationType getSerializationType() {
-    return SerializationType.STRING;
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/a182a5a9/geode-protobuf/src/main/proto/region_API.proto
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/proto/region_API.proto b/geode-protobuf/src/main/proto/region_API.proto
index 40bf882..7900f5e 100644
--- a/geode-protobuf/src/main/proto/region_API.proto
+++ b/geode-protobuf/src/main/proto/region_API.proto
@@ -58,6 +58,7 @@ message GetAllRequest {
 
 message GetAllResponse {
     repeated Entry entries = 1;
+    repeated KeyedErrorResponse failedKeys = 2;
 }
 
 message RemoveRequest {

http://git-wip-us.apache.org/repos/asf/geode/blob/a182a5a9/geode-protobuf/src/main/resources/META-INF/services/org.apache.geode.protocol.operations.OperationHandler
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/resources/META-INF/services/org.apache.geode.protocol.operations.OperationHandler b/geode-protobuf/src/main/resources/META-INF/services/org.apache.geode.protocol.operations.OperationHandler
deleted file mode 100644
index b6ec564..0000000
--- a/geode-protobuf/src/main/resources/META-INF/services/org.apache.geode.protocol.operations.OperationHandler
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.geode.protocol.protobuf.operations.GetRequestOperationHandler
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/geode/blob/a182a5a9/geode-protobuf/src/main/resources/META-INF/services/org.apache.geode.protocol.serializer.ProtocolSerializer
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/resources/META-INF/services/org.apache.geode.protocol.serializer.ProtocolSerializer b/geode-protobuf/src/main/resources/META-INF/services/org.apache.geode.protocol.serializer.ProtocolSerializer
deleted file mode 100644
index 2a9af4e..0000000
--- a/geode-protobuf/src/main/resources/META-INF/services/org.apache.geode.protocol.serializer.ProtocolSerializer
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.geode.protocol.protobuf.serializer.ProtobufProtocolSerializer
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/geode/blob/a182a5a9/geode-protobuf/src/main/resources/META-INF/services/org.apache.geode.serialization.TypeCodec
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/main/resources/META-INF/services/org.apache.geode.serialization.TypeCodec b/geode-protobuf/src/main/resources/META-INF/services/org.apache.geode.serialization.TypeCodec
index 1b7b333..fed6cd0 100644
--- a/geode-protobuf/src/main/resources/META-INF/services/org.apache.geode.serialization.TypeCodec
+++ b/geode-protobuf/src/main/resources/META-INF/services/org.apache.geode.serialization.TypeCodec
@@ -1,10 +1 @@
-org.apache.geode.serialization.codec.BinaryCodec
-org.apache.geode.serialization.codec.BooleanCodec
-org.apache.geode.serialization.codec.ByteCodec
-org.apache.geode.serialization.codec.DoubleCodec
-org.apache.geode.serialization.codec.FloatCodec
-org.apache.geode.serialization.codec.IntCodec
 org.apache.geode.serialization.codec.JSONCodec
-org.apache.geode.serialization.codec.LongCodec
-org.apache.geode.serialization.codec.ShortCodec
-org.apache.geode.serialization.codec.StringCodec

http://git-wip-us.apache.org/repos/asf/geode/blob/a182a5a9/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandlerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandlerJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandlerJUnitTest.java
index 64ee50b..cb6592a 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandlerJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetAllRequestOperationHandlerJUnitTest.java
@@ -35,7 +35,6 @@ import org.apache.geode.protocol.protobuf.Result;
 import org.apache.geode.protocol.protobuf.Success;
 import org.apache.geode.protocol.protobuf.utilities.ProtobufRequestUtilities;
 import org.apache.geode.protocol.protobuf.utilities.ProtobufUtilities;
-import org.apache.geode.serialization.codec.StringCodec;
 import org.apache.geode.serialization.exception.UnsupportedEncodingTypeException;
 import org.apache.geode.serialization.registry.exception.CodecAlreadyRegisteredForTypeException;
 import org.apache.geode.serialization.registry.exception.CodecNotRegisteredForTypeException;
@@ -51,7 +50,6 @@ public class GetAllRequestOperationHandlerJUnitTest extends OperationHandlerJUni
   private static final String TEST_KEY3 = "my key3";
   private static final String TEST_VALUE3 = "my value3";
   private static final String TEST_REGION = "test region";
-  private StringCodec stringDecoder;
 
   @Before
   public void setUp() throws Exception {
@@ -74,7 +72,6 @@ public class GetAllRequestOperationHandlerJUnitTest extends OperationHandlerJUni
 
     when(cacheStub.getRegion(TEST_REGION)).thenReturn(regionStub);
     operationHandler = new GetAllRequestOperationHandler();
-    stringDecoder = new StringCodec();
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/geode/blob/a182a5a9/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandlerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandlerJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandlerJUnitTest.java
index 6885666..e2eb8dd 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandlerJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/GetRequestOperationHandlerJUnitTest.java
@@ -26,7 +26,6 @@ import org.apache.geode.protocol.protobuf.Result;
 import org.apache.geode.protocol.protobuf.Success;
 import org.apache.geode.protocol.protobuf.utilities.ProtobufRequestUtilities;
 import org.apache.geode.protocol.protobuf.utilities.ProtobufUtilities;
-import org.apache.geode.serialization.codec.StringCodec;
 import org.apache.geode.serialization.exception.UnsupportedEncodingTypeException;
 import org.apache.geode.serialization.registry.exception.CodecAlreadyRegisteredForTypeException;
 import org.apache.geode.serialization.registry.exception.CodecNotRegisteredForTypeException;
@@ -50,7 +49,6 @@ public class GetRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
   private final String MISSING_REGION = "missing region";
   private final String MISSING_KEY = "missing key";
   private final String NULLED_KEY = "nulled key";
-  private StringCodec stringDecoder;
 
   @Before
   public void setUp() throws Exception {
@@ -66,7 +64,6 @@ public class GetRequestOperationHandlerJUnitTest extends OperationHandlerJUnitTe
     when(cacheStub.getRegion(TEST_REGION)).thenReturn(regionStub);
     when(cacheStub.getRegion(MISSING_REGION)).thenReturn(null);
     operationHandler = new GetRequestOperationHandler();
-    stringDecoder = new StringCodec();
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/geode/blob/a182a5a9/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandlerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandlerJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandlerJUnitTest.java
index 4350ece..e0ee9ec 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandlerJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/protobuf/operations/RemoveRequestOperationHandlerJUnitTest.java
@@ -27,7 +27,6 @@ import org.apache.geode.protocol.protobuf.Result;
 import org.apache.geode.protocol.protobuf.Success;
 import org.apache.geode.protocol.protobuf.utilities.ProtobufRequestUtilities;
 import org.apache.geode.protocol.protobuf.utilities.ProtobufUtilities;
-import org.apache.geode.serialization.codec.StringCodec;
 import org.apache.geode.serialization.exception.UnsupportedEncodingTypeException;
 import org.apache.geode.serialization.registry.exception.CodecAlreadyRegisteredForTypeException;
 import org.apache.geode.serialization.registry.exception.CodecNotRegisteredForTypeException;
@@ -52,7 +51,6 @@ public class RemoveRequestOperationHandlerJUnitTest extends OperationHandlerJUni
   private final String TEST_REGION = "test region";
   private final String MISSING_REGION = "missing region";
   private final String MISSING_KEY = "missing key";
-  private StringCodec stringDecoder;
   private Region regionStub;
 
   @Before
@@ -67,7 +65,6 @@ public class RemoveRequestOperationHandlerJUnitTest extends OperationHandlerJUni
     when(cacheStub.getRegion(TEST_REGION)).thenReturn(regionStub);
     when(cacheStub.getRegion(MISSING_REGION)).thenReturn(null);
     operationHandler = new RemoveRequestOperationHandler();
-    stringDecoder = new StringCodec();
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/geode/blob/a182a5a9/geode-protobuf/src/test/java/org/apache/geode/protocol/serializer/ProtobufProtocolSerializerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/protocol/serializer/ProtobufProtocolSerializerJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/protocol/serializer/ProtobufProtocolSerializerJUnitTest.java
index 0f6435a..c1b6a04 100644
--- a/geode-protobuf/src/test/java/org/apache/geode/protocol/serializer/ProtobufProtocolSerializerJUnitTest.java
+++ b/geode-protobuf/src/test/java/org/apache/geode/protocol/serializer/ProtobufProtocolSerializerJUnitTest.java
@@ -14,22 +14,20 @@
  */
 package org.apache.geode.protocol.serializer;
 
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ServiceLoader;
-
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
 import org.apache.geode.protocol.MessageUtil;
 import org.apache.geode.protocol.exception.InvalidProtocolMessageException;
 import org.apache.geode.protocol.protobuf.ClientProtocol;
 import org.apache.geode.protocol.protobuf.serializer.ProtobufProtocolSerializer;
 import org.apache.geode.test.junit.categories.UnitTest;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
 
 @Category(UnitTest.class)
 public class ProtobufProtocolSerializerJUnitTest {
@@ -37,12 +35,7 @@ public class ProtobufProtocolSerializerJUnitTest {
 
   @Before
   public void startup() {
-    ServiceLoader<ProtocolSerializer> serviceLoader = ServiceLoader.load(ProtocolSerializer.class);
-    for (ProtocolSerializer protocolSerializer : serviceLoader) {
-      if (protocolSerializer instanceof ProtobufProtocolSerializer) {
-        this.protocolSerializer = protocolSerializer;
-      }
-    }
+    this.protocolSerializer = new ProtobufProtocolSerializer();
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/geode/blob/a182a5a9/geode-protobuf/src/test/java/org/apache/geode/serialization/codec/BinaryFormatJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/serialization/codec/BinaryFormatJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/serialization/codec/BinaryFormatJUnitTest.java
deleted file mode 100644
index 4d749ab..0000000
--- a/geode-protobuf/src/test/java/org/apache/geode/serialization/codec/BinaryFormatJUnitTest.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.serialization.codec;
-
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-
-import java.nio.charset.Charset;
-
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import org.apache.geode.test.junit.categories.UnitTest;
-
-@Category(UnitTest.class)
-public class BinaryFormatJUnitTest {
-  private static final Charset UTF8 = Charset.forName("UTF-8");
-  private static final Charset UTF16 = Charset.forName("UTF-16");
-  private String testString = "Test String";
-
-  private StringCodec stringCodec;
-
-  @Before
-  public void startup() {
-    stringCodec = new StringCodec();
-  }
-
-  @Test
-  public void testStringsUseUTF8Encoding() {
-    assertArrayEquals(testString.getBytes(UTF8), stringCodec.encode(testString));
-  }
-
-  @Test
-  public void testStringDontUseUTF16Encoding() {
-    byte[] expectedEncodedString = stringCodec.encode(testString);
-    byte[] incorrectEncodedString = testString.getBytes(UTF16);
-    assertNotEquals(expectedEncodedString.length, incorrectEncodedString.length);
-  }
-
-  @Test
-  public void testImproperlyEncodedStringDecodingFails() {
-    byte[] encodedString = testString.getBytes(UTF16);
-    assertNotEquals(testString, stringCodec.decode(encodedString));
-  }
-
-  @Test
-  public void testProperlyEncodedStringDecoding() {
-    byte[] encodedString = testString.getBytes(UTF8);
-    assertEquals(testString, stringCodec.decode(encodedString));
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/a182a5a9/geode-protobuf/src/test/java/org/apache/geode/serialization/codec/JSONCodecJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-protobuf/src/test/java/org/apache/geode/serialization/codec/JSONCodecJUnitTest.java b/geode-protobuf/src/test/java/org/apache/geode/serialization/codec/JSONCodecJUnitTest.java
new file mode 100644
index 0000000..6fa6ea0
--- /dev/null
+++ b/geode-protobuf/src/test/java/org/apache/geode/serialization/codec/JSONCodecJUnitTest.java
@@ -0,0 +1,206 @@
+package org.apache.geode.serialization.codec;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+import com.sun.tools.javac.util.List;
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.CacheFactory;
+import org.apache.geode.internal.cache.InternalCache;
+import org.apache.geode.pdx.JSONFormatter;
+import org.apache.geode.pdx.PdxInstance;
+import org.apache.geode.pdx.PdxInstanceFactory;
+import org.apache.geode.pdx.WritablePdxInstance;
+import org.apache.geode.test.junit.categories.UnitTest;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.LinkedList;
+
+@Category(UnitTest.class)
+public class JSONCodecJUnitTest {
+
+  private String complexJSONString = "{\n" + "    \"_id\": \"599c7d885df276ac3e0bf10a\",\n"
+      + "    \"index\": 0,\n" + "    \"guid\": \"395902d8-36ed-4178-ad70-2f720c557c55\",\n"
+      + "    \"isActive\": true,\n" + "    \"balance\": \"$3,152.82\",\n"
+      + "    \"picture\": \"http://placehold.it/32x32\",\n" + "    \"age\": 27,\n"
+      + "    \"eyeColor\": \"blue\",\n" + "    \"name\": \"Kristina Norman\",\n"
+      + "    \"gender\": \"female\",\n" + "    \"company\": \"ORBALIX\",\n"
+      + "    \"email\": \"kristinanorman@orbalix.com\",\n"
+      + "    \"phone\": \"+1 (983) 526-3433\",\n"
+      + "    \"address\": \"400 Vermont Court, Denio, Wyoming, 7142\",\n"
+      + "    \"about\": \"Mollit nostrud irure excepteur veniam aliqua. Non id tempor magna nisi ipsum minim. Culpa velit tempor culpa mollit cillum deserunt nisi culpa irure ut nostrud enim consectetur voluptate. Elit veniam velit enim minim. Sunt nostrud ea duis enim sit cillum.\",\n"
+      + "    \"registered\": \"2015-03-11T02:22:45 +07:00\",\n" + "    \"latitude\": -0.853065,\n"
+      + "    \"longitude\": -29.749358,\n" + "    \"tags\": [\n" + "      \"laboris\",\n"
+      + "      \"velit\",\n" + "      \"non\",\n" + "      \"est\",\n" + "      \"anim\",\n"
+      + "      \"amet\",\n" + "      \"cupidatat\"\n" + "    ],\n" + "    \"friends\": [\n"
+      + "      {\n" + "        \"id\": 0,\n" + "        \"name\": \"Roseann Roy\"\n" + "      },\n"
+      + "      {\n" + "        \"id\": 1,\n" + "        \"name\": \"Adriana Perry\"\n"
+      + "      },\n" + "      {\n" + "        \"id\": 2,\n"
+      + "        \"name\": \"Tyler Mccarthy\"\n" + "      }\n" + "    ],\n"
+      + "    \"greeting\": \"Hello, Kristina Norman! You have 8 unread messages.\",\n"
+      + "    \"favoriteFruit\": \"apple\"\n" + "  }";
+  private Cache cache;
+
+  @Before
+  public void setUp() throws Exception {
+    cache = new CacheFactory().create();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    if (cache != null) {
+      cache.close();
+    }
+  }
+
+  @Test
+  public void testSimpleJSONEncode() throws Exception {
+    InternalCache cache = (InternalCache) new CacheFactory().create();
+    PdxInstanceFactory pdxInstanceFactory =
+        cache.createPdxInstanceFactory(JSONFormatter.JSON_CLASSNAME, false);
+
+    pdxInstanceFactory.writeString("string", "someString");
+    pdxInstanceFactory.writeBoolean("boolean", true);
+    PdxInstance pdxInstance = pdxInstanceFactory.create();
+
+    byte[] encodedJSONByte = new JSONCodec().encode(pdxInstance);
+
+    String expectedJsonString =
+        "{\n" + "  \"string\" : \"someString\",\n" + "  \"boolean\" : true\n" + "}";
+    assertArrayEquals(expectedJsonString.getBytes(), encodedJSONByte);
+  }
+
+  @Test
+  public void testComplexJSONEncode() {
+    PdxInstance pdxInstanceForComplexJSONString = createPDXInstanceForComplexJSONString();
+    PdxInstance decodedJSONPdxInstance = new JSONCodec().decode(complexJSONString.getBytes());
+
+    assertEquals(pdxInstanceForComplexJSONString.getFieldNames(),
+        decodedJSONPdxInstance.getFieldNames());
+
+    List<String> fieldNames = List.of("_id", "index", "guid", "isActive", "balance", "picture",
+        "age", "eyeColor", "name", "gender", "company", "email", "phone", "address", "about",
+        "registered", "latitude", "longitude", "tags", "friends", "greeting", "favoriteFruit");
+    fieldNames.forEach(
+        fieldName -> assertEquals(pdxInstanceForComplexJSONString.getField(fieldName).getClass(),
+            decodedJSONPdxInstance.getField(fieldName).getClass()));
+
+    fieldNames.forEach(
+        fieldName -> assertEquals(pdxFieldValues(pdxInstanceForComplexJSONString, fieldName),
+            pdxFieldValues(decodedJSONPdxInstance, fieldName)));
+  }
+
+  /**
+   * This method is very specific to this test. It will take an pdxInstance object and return you
+   * the value for the fieldName. In most cases it will return the value directly, but in the case
+   * of collections LinkedList<String> it will return an ArrayList<String> or in the case of a
+   * LinkedList<PdxInstance> it will return an ArrayList<ArrayList>.
+   */
+  private Object pdxFieldValues(PdxInstance pdxInstance, String fieldName) {
+    Object fieldValue = pdxInstance.getField(fieldName);
+    // Check if the value is of type PDXInstance. If so, then iterate over its fields and return an
+    // ArrayList of all the values
+    if (fieldValue instanceof PdxInstance) {
+      ArrayList<Object> objects = new ArrayList<>();
+      ((PdxInstance) fieldValue).getFieldNames().forEach(
+          innerFieldName -> objects.add(pdxFieldValues((PdxInstance) fieldValue, innerFieldName)));
+      return objects;
+    }
+    // Else check if the value is of type LinkedList. Then it is a collection of either type String
+    // or type PDXInstance. If of type String, then return an ArrayList<String> otherwise the
+    // collection
+    // contains a collection of PdxInstance.
+    else if (fieldValue instanceof LinkedList) {
+      LinkedList value = (LinkedList) fieldValue;
+      // if the first value of the LinkedList is not a PDXInstance return the LinkedList
+      if (!value.isEmpty() && !(value.getFirst() instanceof PdxInstance)) {
+        return value;
+      } else {
+        // Here the LinkedList contains PDXInstances. Here we will iterate the linkedList and
+        // process
+        // each pdxInstance into and ArrayList of the pdx's values.
+        ArrayList<Object> objects = new ArrayList<>();
+        value.forEach(internalPdxInstance -> {
+          ArrayList innerObject = new ArrayList();
+          ((PdxInstance) internalPdxInstance).getFieldNames()
+              .forEach(internalFieldName -> innerObject
+                  .add(pdxFieldValues((PdxInstance) internalPdxInstance, internalFieldName)));
+          objects.add(innerObject);
+          objects.sort((Comparator) (o1, o2) -> (byte) ((ArrayList) o1).get(0));
+        });
+        return objects;
+      }
+    }
+    // Otherwise if the field is not a PdxInstance or LinkedList, then return the value.
+    else {
+      return fieldValue;
+    }
+  }
+
+  /**
+   * Create a PDXInstance object that is equivalent to @link{complexJSONString}
+   */
+  private PdxInstance createPDXInstanceForComplexJSONString() {
+    PdxInstanceFactory friendPdxFactory =
+        ((InternalCache) cache).createPdxInstanceFactory(JSONFormatter.JSON_CLASSNAME, false);
+
+    friendPdxFactory.writeByte("id", (byte) 0);
+    PdxInstance friendPdx1 = friendPdxFactory.writeString("name", "Roseann Roy").create();
+
+    WritablePdxInstance friendPdx2 = friendPdx1.createWriter();
+    friendPdx2.setField("id", (byte) 1);
+    friendPdx2.setField("name", "Adriana Perry");
+
+    WritablePdxInstance friendPdx3 = friendPdx1.createWriter();
+    friendPdx3.setField("id", (byte) 2);
+    friendPdx3.setField("name", "Tyler Mccarthy");
+
+    PdxInstanceFactory pdxInstanceFactory =
+        cache.createPdxInstanceFactory(JSONFormatter.JSON_CLASSNAME);
+    pdxInstanceFactory.writeString("_id", "599c7d885df276ac3e0bf10a");
+    pdxInstanceFactory.writeByte("index", (byte) 0);
+    pdxInstanceFactory.writeString("guid", "395902d8-36ed-4178-ad70-2f720c557c55");
+    pdxInstanceFactory.writeBoolean("isActive", true);
+    pdxInstanceFactory.writeString("balance", "$3,152.82");
+    pdxInstanceFactory.writeString("picture", "http://placehold.it/32x32");
+    pdxInstanceFactory.writeByte("age", (byte) 27);
+    pdxInstanceFactory.writeString("eyeColor", "blue");
+    pdxInstanceFactory.writeString("name", "Kristina Norman");
+    pdxInstanceFactory.writeString("gender", "female");
+    pdxInstanceFactory.writeString("company", "ORBALIX");
+    pdxInstanceFactory.writeString("email", "kristinanorman@orbalix.com");
+    pdxInstanceFactory.writeString("phone", "+1 (983) 526-3433");
+    pdxInstanceFactory.writeString("address", "400 Vermont Court, Denio, Wyoming, 7142");
+    pdxInstanceFactory.writeString("about",
+        "Mollit nostrud irure excepteur veniam aliqua. Non id tempor magna nisi ipsum minim. Culpa velit tempor culpa mollit cillum deserunt nisi culpa irure ut nostrud enim consectetur voluptate. Elit veniam velit enim minim. Sunt nostrud ea duis enim sit cillum.");
+    pdxInstanceFactory.writeString("registered", "2015-03-11T02:22:45 +07:00");
+    pdxInstanceFactory.writeDouble("latitude", -0.853065);
+    pdxInstanceFactory.writeDouble("longitude", -29.749358);
+    pdxInstanceFactory.writeObject("tags",
+        new LinkedList<>(List.of("laboris", "velit", "non", "est", "anim", "amet", "cupidatat")));
+    pdxInstanceFactory.writeObject("friends",
+        new LinkedList<>(List.of(friendPdx1, friendPdx2, friendPdx3)));
+    pdxInstanceFactory.writeString("greeting",
+        "Hello, Kristina Norman! You have 8 unread messages.");
+    pdxInstanceFactory.writeString("favoriteFruit", "apple");
+    return pdxInstanceFactory.create();
+  }
+
+  @Test
+  public void testJSONDecode() throws Exception {
+    PdxInstance pdxInstance = new JSONCodec().decode(complexJSONString.getBytes());
+
+    assertNotNull(pdxInstance);
+    List<String> fieldNames = List.of("_id", "index", "guid", "isActive", "balance", "picture",
+        "age", "eyeColor", "name", "gender", "company", "email", "phone", "address", "about",
+        "registered", "latitude", "longitude", "tags", "friends", "greeting", "favoriteFruit");
+    assertEquals(fieldNames, pdxInstance.getFieldNames());
+  }
+
+}


[17/25] geode git commit: GEODE-3395 Variable-ize product version and name in user guide - Managing

Posted by ud...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/troubleshooting/diagnosing_system_probs.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/troubleshooting/diagnosing_system_probs.html.md.erb b/geode-docs/managing/troubleshooting/diagnosing_system_probs.html.md.erb
index 9b34a3b..6f8b9f8 100644
--- a/geode-docs/managing/troubleshooting/diagnosing_system_probs.html.md.erb
+++ b/geode-docs/managing/troubleshooting/diagnosing_system_probs.html.md.erb
@@ -102,13 +102,13 @@ Response:
 Response: Check these possible causes.
 
 -   Network problem—the most common cause. First, try to ping the other hosts.
--   Firewall problems. If members of your distributed Geode system are located outside the LAN, check whether the firewall is blocking communication. Geode is a network-centric distributed system, so if you have a firewall running on your machine, it could cause connection problems. For example, your connections may fail if your firewall places restrictions on inbound or outbound permissions for Java-based sockets. You may need to modify your firewall configuration to permit traffic to Java applications running on your machine. The specific configuration depends on the firewall you are using.
+-   Firewall problems. If members of your distributed <%=vars.product_name%> system are located outside the LAN, check whether the firewall is blocking communication. <%=vars.product_name%> is a network-centric distributed system, so if you have a firewall running on your machine, it could cause connection problems. For example, your connections may fail if your firewall places restrictions on inbound or outbound permissions for Java-based sockets. You may need to modify your firewall configuration to permit traffic to Java applications running on your machine. The specific configuration depends on the firewall you are using.
 -   Wrong multicast port when using multicast for membership. Check the `gemfire.properties` file of this application or cache server to see that the mcast-port is configured correctly. If you are running multiple distributed systems at your site, each distributed system must use a unique multicast port.
 -   Can not connect to locator (when using TCP for discovery).
     -   Check that the locators attribute in this process’s `gemfire.properties` has the correct IP address for the locator.
     -   Check that the locator process is running. If not, see instructions for related problem, [Data distribution has stopped, although member processes are running](diagnosing_system_probs.html#diagnosing_system_probs__section_04CEF27475924E5D9860BEE6D64C49E2).
     -   Bind address set incorrectly on a multi-homed host. When you specify the bind address, use the IP address rather than the host name. Sometimes multiple network adapters are configured with the same hostname. See [Topology and Communication General Concepts](../../topologies_and_comm/topology_concepts/chapter_overview.html#concept_7628F498DB534A2D8A99748F5DA5DC94) for more information about using bind addresses.
--   Wrong version of Geode . A version mismatch can cause the process to hang or crash. Check the software version with the gemfire version command.
+-   Wrong version of <%=vars.product_name%> . A version mismatch can cause the process to hang or crash. Check the software version with the gemfire version command.
 
 ## <a id="diagnosing_system_probs__section_D607C96A6CBE42FD880F1463A20A8BEF" class="no-quick-link"></a>Member process seems to hang
 
@@ -124,7 +124,7 @@ Either the process can’t find the configuration file or, if it is an applicati
 Response:
 
 -   Check that the `gemfire.properties` file is in the right directory.
--   Make sure the process is not picking up settings from another `gemfire.properties` file earlier in the search path. Geode looks for a `gemfire.properties` file in the current working directory, the home directory, and the CLASSPATH, in that order.
+-   Make sure the process is not picking up settings from another `gemfire.properties` file earlier in the search path. <%=vars.product_name%> looks for a `gemfire.properties` file in the current working directory, the home directory, and the CLASSPATH, in that order.
 -   For an application, check the documentation to see whether it does programmatic configuration. If so, the properties that are set programmatically cannot be reset in a `gemfire.properties` file. See your application’s customer support group for configuration changes.
 
 ## <a id="diagnosing_system_probs__section_B0698527A4DF4D84877B1AF66291ABFD" class="no-quick-link"></a>Cache creation fails - must match schema definition root
@@ -145,7 +145,7 @@ Error while parsing XML, caused by org.xml.sax.SAXParseException:
 Document root element "cache", must match DOCTYPE root "client-cache".
 ```
 
-Geode declarative cache creation uses one of two root element pairs: `cache` or `client-cache`. The name must be the same in both places.
+<%=vars.product_name%> declarative cache creation uses one of two root element pairs: `cache` or `client-cache`. The name must be the same in both places.
 
 Response:
 
@@ -264,17 +264,17 @@ The process may be hitting its virtual address space limits. The virtual address
 
 ## <a id="diagnosing_system_probs__section_B49BD03F4CA241C7BED4A2C4D5936A7A" class="no-quick-link"></a>PartitionedRegionDistributionException
 
-The org.apache.geode.cache.PartitionedRegionDistributionException appears when Geode fails after many attempts to complete a distributed operation. This exception indicates that no data store member can be found to perform a destroy, invalidate, or get operation.
+The org.apache.geode.cache.PartitionedRegionDistributionException appears when <%=vars.product_name%> fails after many attempts to complete a distributed operation. This exception indicates that no data store member can be found to perform a destroy, invalidate, or get operation.
 
 Response:
 
 -   Check the network for traffic congestion or a broken connection to a member.
--   Look at the overall installation for problems, such as operations at the application level set to a higher priority than the Geode processes.
+-   Look at the overall installation for problems, such as operations at the application level set to a higher priority than the <%=vars.product_name%> processes.
 -   If you keep seeing PartitionedRegionDistributionException, you should evaluate whether you need to start more members.
 
 ## <a id="diagnosing_system_probs__section_7DE15A6C99974821B6CA418BC2AF98F1" class="no-quick-link"></a>PartitionedRegionStorageException
 
-The org.apache.geode.cache.PartitionedRegionStorageException appears when Geode can’t create a new entry. This exception arises from a lack of storage space for put and create operations or for get operations with a loader. PartitionedRegionStorageException often indicates data loss or impending data loss.
+The org.apache.geode.cache.PartitionedRegionStorageException appears when <%=vars.product_name%> can’t create a new entry. This exception arises from a lack of storage space for put and create operations or for get operations with a loader. PartitionedRegionStorageException often indicates data loss or impending data loss.
 
 The text string indicates the cause of the exception, as in these examples:
 
@@ -289,7 +289,7 @@ Ran out of retries attempting to allocate a bucket in the partitioned region....
 Response:
 
 -   Check the network for traffic congestion or a broken connection to a member.
--   Look at the overall installation for problems, such as operations at the application level set to a higher priority than the Geode processes.
+-   Look at the overall installation for problems, such as operations at the application level set to a higher priority than the <%=vars.product_name%> processes.
 -   If you keep seeing PartitionedRegionStorageException, you should evaluate whether you need to start more members.
 
 ## <a id="diagnosing_system_probs__section_AFA1D06BC3AA44A4AB0593FD1EF0B0B7" class="no-quick-link"></a>Application crashes without producing an exception
@@ -320,7 +320,7 @@ Increase the default socket timeout setting for the member. This timeout is set
 
 ## <a id="diagnosing_system_probs__section_8C7CB2EA0A274DAF90083FECE0BF3B1F" class="no-quick-link"></a>Member logs ForcedDisconnectException, Cache and DistributedSystem forcibly closed
 
-A distributed system member’s Cache and DistributedSystem are forcibly closed by the system membership coordinator if it becomes sick or too slow to respond to heartbeat requests. When this happens, listeners receive RegionDestroyed notification with an opcode of FORCED\_DISCONNECT. The Geode log file for the member shows a ForcedDisconnectException with the message
+A distributed system member’s Cache and DistributedSystem are forcibly closed by the system membership coordinator if it becomes sick or too slow to respond to heartbeat requests. When this happens, listeners receive RegionDestroyed notification with an opcode of FORCED\_DISCONNECT. The <%=vars.product_name%> log file for the member shows a ForcedDisconnectException with the message
 
 ``` pre
 This member has been forced out of the distributed system because it did not respond
@@ -397,7 +397,7 @@ If you are experiencing slow performance and are sending large objects (multiple
 
 ## <a id="diagnosing_system_probs__section_F93DD765FF2A43439D3FF7936F8883DE" class="no-quick-link"></a>Can’t get Windows performance data
 
-Attempting to run performance measurements for Geode on Windows can produce this error message:
+Attempting to run performance measurements for <%=vars.product_name%> on Windows can produce this error message:
 
 ``` pre
 Can't get Windows performance data. RegQueryValueEx returned 5
@@ -407,7 +407,7 @@ This error can occur because incorrect information is returned when a Win32 appl
 
 Response:
 
-To successfully acquire Windows performance data, you need to verify that you have the proper registry key access permissions in the system registry. In particular, make sure that Perflib in the following registry path is readable (KEY\_READ access) by the Geode process:
+To successfully acquire Windows performance data, you need to verify that you have the proper registry key access permissions in the system registry. In particular, make sure that Perflib in the following registry path is readable (KEY\_READ access) by the <%=vars.product_name%> process:
 
 ``` pre
 HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Perflib
@@ -415,7 +415,7 @@ HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Perflib
 
 An example of reasonable security on the performance data would be to grant administrators KEY\_ALL\_ACCESS access and interactive users KEY\_READ access. This particular configuration would prevent non-administrator remote users from querying performance data.
 
-See [http://support.microsoft.com/kb/310426](http://support.microsoft.com/kb/310426) and [http://support.microsoft.com/kb/146906](http://support.microsoft.com/kb/146906) for instructions about how to ensure that Geode processes have access to the registry keys associated with performance.
+See [http://support.microsoft.com/kb/310426](http://support.microsoft.com/kb/310426) and [http://support.microsoft.com/kb/146906](http://support.microsoft.com/kb/146906) for instructions about how to ensure that <%=vars.product_name%> processes have access to the registry keys associated with performance.
 
 ## <a id="diagnosing_system_probs__section_E70C332303A242BEAE9D2C0A2EE70E0A" class="no-quick-link"></a>Java applications on 64-bit platforms hang or use 100% CPU
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/troubleshooting/prevent_and_recover_disk_full_errors.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/troubleshooting/prevent_and_recover_disk_full_errors.html.md.erb b/geode-docs/managing/troubleshooting/prevent_and_recover_disk_full_errors.html.md.erb
index ca1e64d..8f8bc18 100644
--- a/geode-docs/managing/troubleshooting/prevent_and_recover_disk_full_errors.html.md.erb
+++ b/geode-docs/managing/troubleshooting/prevent_and_recover_disk_full_errors.html.md.erb
@@ -19,7 +19,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-It is important to monitor the disk usage of Geode members. If a member lacks sufficient disk space for a disk store, the member attempts to shut down the disk store and its associated cache, and logs an error message. A shutdown due to a member running out of disk space can cause loss of data, data file corruption, log file corruption and other error conditions that can negatively impact your applications.
+It is important to monitor the disk usage of <%=vars.product_name%> members. If a member lacks sufficient disk space for a disk store, the member attempts to shut down the disk store and its associated cache, and logs an error message. A shutdown due to a member running out of disk space can cause loss of data, data file corruption, log file corruption and other error conditions that can negatively impact your applications.
 
 After you make sufficient disk space available to the member, you can restart the member.
 
@@ -33,7 +33,7 @@ When a disk write fails due to disk full conditions, the member is shutdown and
 
 ## Recovering from Disk Full Errors
 
-If a member of your Geode distributed system fails due to a disk full error condition, add or make additional disk capacity available and attempt to restart the member normally. If the member does not restart and there is a redundant copy of its regions in a disk store on another member, you can restore the member using the following steps:
+If a member of your <%=vars.product_name%> distributed system fails due to a disk full error condition, add or make additional disk capacity available and attempt to restart the member normally. If the member does not restart and there is a redundant copy of its regions in a disk store on another member, you can restore the member using the following steps:
 
 1.  Delete or move the disk store files from the failed member.
 2.  Use the gfsh `show missing-disk-stores` command to identify any missing data. You may need to manually restore this data.

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/troubleshooting/recovering_conflicting_data_exceptions.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/troubleshooting/recovering_conflicting_data_exceptions.html.md.erb b/geode-docs/managing/troubleshooting/recovering_conflicting_data_exceptions.html.md.erb
index 4eade62..0f51dcf 100644
--- a/geode-docs/managing/troubleshooting/recovering_conflicting_data_exceptions.html.md.erb
+++ b/geode-docs/managing/troubleshooting/recovering_conflicting_data_exceptions.html.md.erb
@@ -19,11 +19,11 @@ See the License for the specific language governing permissions and
 limitations under the License.
 -->
 
-A `ConflictingPersistentDataException` while starting up persistent members indicates that you have multiple copies of some persistent data, and Geode cannot determine which copy to use.
+A `ConflictingPersistentDataException` while starting up persistent members indicates that you have multiple copies of some persistent data, and <%=vars.product_name%> cannot determine which copy to use.
 
-Normally Geode uses metadata to determine automatically which copy of persistent data to use. Along with the region data, each member persists a list of other members that are hosting the region and whether their data is up to date. A `ConflictingPersistentDataException` happens when two members compare their metadata and find that it is inconsistent. The members either don’t know about each other, or they both think the other member has stale data.
+Normally <%=vars.product_name%> uses metadata to determine automatically which copy of persistent data to use. Along with the region data, each member persists a list of other members that are hosting the region and whether their data is up to date. A `ConflictingPersistentDataException` happens when two members compare their metadata and find that it is inconsistent. The members either don’t know about each other, or they both think the other member has stale data.
 
-The following sections describe scenarios that can cause `ConflictingPersistentDataException`s in Geode and how to resolve the conflict.
+The following sections describe scenarios that can cause `ConflictingPersistentDataException`s in <%=vars.product_name%> and how to resolve the conflict.
 
 ## <a id="topic_ghw_z2m_jq__section_sj3_lpm_jq" class="no-quick-link"></a>Independently Created Copies
 
@@ -34,21 +34,21 @@ There are a few ways to end up with independently created systems.
 -   Create two different distributed systems by having members connect to different locators that are not aware of each other.
 -   Shut down all persistent members and then start up a different set of brand new persistent members.
 
-Geode will not automatically merge independently created data for the same region. Instead, you need to export the data from one of the systems and import it into the other system. See the section [Cache and Region Snapshots](../cache_snapshots/chapter_overview.html#concept_E6AC3E25404D4D7788F2D52D83EE3071) for instructions on how to export data from one system and import it into another.
+<%=vars.product_name%> will not automatically merge independently created data for the same region. Instead, you need to export the data from one of the systems and import it into the other system. See the section [Cache and Region Snapshots](../cache_snapshots/chapter_overview.html#concept_E6AC3E25404D4D7788F2D52D83EE3071) for instructions on how to export data from one system and import it into another.
 
 ## <a id="topic_ghw_z2m_jq__section_op5_hpm_jq" class="no-quick-link"></a>Starting New Members First
 
 Starting a brand new member that has no persistent data before starting older members with persistent data can cause a `ConflictingPersistentDataException`.
 
-One accidental way this can happen is to shut the system down, add a new member to the startup scripts, and start all members in parallel. By chance, the new member may start first. The issue is that the new member will create an empty, independent copy of the data before the older members start up. Geode will be treat this situation like the [Independently Created Copies](#topic_ghw_z2m_jq__section_sj3_lpm_jq) case.
+One accidental way this can happen is to shut the system down, add a new member to the startup scripts, and start all members in parallel. By chance, the new member may start first. The issue is that the new member will create an empty, independent copy of the data before the older members start up. <%=vars.product_name%> will be treat this situation like the [Independently Created Copies](#topic_ghw_z2m_jq__section_sj3_lpm_jq) case.
 
 In this case the fix is simply to move aside or delete the persistent files for the new member, shut down the new member and then restart the older members. When the older members have fully recovered, then restart the new member.
 
 ## A Network Failure Occurs and Network Partitioning Detection is Disabled
 
-When `enable-network-partition-detection` is set to the default value of true, Geode will detect a network partition and shut down unreachable members to prevent a network partition ("split brain") from occurring. No conflicts should occur when the system is healed.
+When `enable-network-partition-detection` is set to the default value of true, <%=vars.product_name%> will detect a network partition and shut down unreachable members to prevent a network partition ("split brain") from occurring. No conflicts should occur when the system is healed.
 
-However if `enable-network-partition-detection` is false, Geode will not detect the network partition. Instead, each side of the network partition will end up recording that the other side of the partition has stale data. When the partition is healed and persistent members are restarted, the members will report a conflict because both sides of the partition think the other members are stale.
+However if `enable-network-partition-detection` is false, <%=vars.product_name%> will not detect the network partition. Instead, each side of the network partition will end up recording that the other side of the partition has stale data. When the partition is healed and persistent members are restarted, the members will report a conflict because both sides of the partition think the other members are stale.
 
 In some cases it may be possible to choose between sides of the network partition and just keep the data from one side of the partition. Otherwise you may need to salvage data and import it into a fresh system.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/troubleshooting/recovering_from_app_crashes.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/troubleshooting/recovering_from_app_crashes.html.md.erb b/geode-docs/managing/troubleshooting/recovering_from_app_crashes.html.md.erb
index 01397af..2f168ca 100644
--- a/geode-docs/managing/troubleshooting/recovering_from_app_crashes.html.md.erb
+++ b/geode-docs/managing/troubleshooting/recovering_from_app_crashes.html.md.erb
@@ -21,11 +21,11 @@ limitations under the License.
 
 When the application or cache server crashes, its local cache is lost, and any resources it owned (for example, distributed locks) are released. The member must recreate its local cache upon recovery.
 
--   **[Recovering from Crashes with a Peer-to-Peer Configuration](../../managing/troubleshooting/recovering_from_p2p_crashes.html)**
+-   **[Recovering from Crashes with a Peer-to-Peer Configuration](recovering_from_p2p_crashes.html)**
 
     When a member crashes, the remaining members continue operation as though the missing application or cache server had never existed. The recovery process differs according to region type and scope, as well as data redundancy configuration.
 
--   **[Recovering from Crashes with a Client/Server Configuration](../../managing/troubleshooting/recovering_from_cs_crashes.html)**
+-   **[Recovering from Crashes with a Client/Server Configuration](recovering_from_cs_crashes.html)**
 
     In a client/server configuration, you first make the server available as a member of a distributed system again, and then restart clients as quickly as possible. The client recovers its data from its servers through normal operation.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/troubleshooting/recovering_from_cs_crashes.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/troubleshooting/recovering_from_cs_crashes.html.md.erb b/geode-docs/managing/troubleshooting/recovering_from_cs_crashes.html.md.erb
index 3475556..ad9f483 100644
--- a/geode-docs/managing/troubleshooting/recovering_from_cs_crashes.html.md.erb
+++ b/geode-docs/managing/troubleshooting/recovering_from_cs_crashes.html.md.erb
@@ -51,4 +51,4 @@ When a client crashes, restart it as quickly as possible in the usual way. The c
 -   **Entries sent lazily to the client**—Entries are sent lazily to the client for entries that the client registers interest in that are not initially available in the server cache.
 -   **Events sent immediately to the client**—If the server has been saving events for the client, these are immediately replayed when the client reconnects. Cache modification events for entries in which the client has registered durable interest are saved.
 
-If you have a durable client configured to connect to multiple servers, keep in mind that Geode does not maintain server redundancy while the client is disconnected. If you lose all of its primary and secondary servers, you lose the client’s queued messages. Even if the servers fail one at a time, so that running clients have time to fail over and pick new secondary servers, an off-line durable client cannot do that and thus loses its queued messages.
+If you have a durable client configured to connect to multiple servers, keep in mind that <%=vars.product_name%> does not maintain server redundancy while the client is disconnected. If you lose all of its primary and secondary servers, you lose the client’s queued messages. Even if the servers fail one at a time, so that running clients have time to fail over and pick new secondary servers, an off-line durable client cannot do that and thus loses its queued messages.

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/troubleshooting/recovering_from_machine_crashes.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/troubleshooting/recovering_from_machine_crashes.html.md.erb b/geode-docs/managing/troubleshooting/recovering_from_machine_crashes.html.md.erb
index cacb935..82705ea 100644
--- a/geode-docs/managing/troubleshooting/recovering_from_machine_crashes.html.md.erb
+++ b/geode-docs/managing/troubleshooting/recovering_from_machine_crashes.html.md.erb
@@ -29,7 +29,7 @@ To recover from a machine crash:
 
 1.  Determine which processes run on this machine.
 2.  Reboot the machine.
-3.  If a Geode locator runs here, start it first.
+3.  If a <%=vars.product_name%> locator runs here, start it first.
     **Note:**
     At least one locator must be running before you start any applications or cache servers.
 
@@ -41,9 +41,9 @@ If you have to move a locator process to a different machine, the locator isn’
 
 The partitioned region initializes itself correctly regardless of the order in which the data stores rejoin. The applications and cache servers recreate their data automatically as they return to active work.
 
-If the partitioned region is configured for data redundancy, Geode may be able to handle a machine crash automatically with no data loss, depending on how many redundant copies there are and how many members have to be restarted. See also [Recovery for Partitioned Regions](recovering_from_p2p_crashes.html#rec_app_p2p_crash__section_0E7D482DD8E84250A10070431B29AAC5).
+If the partitioned region is configured for data redundancy, <%=vars.product_name%> may be able to handle a machine crash automatically with no data loss, depending on how many redundant copies there are and how many members have to be restarted. See also [Recovery for Partitioned Regions](recovering_from_p2p_crashes.html#rec_app_p2p_crash__section_0E7D482DD8E84250A10070431B29AAC5).
 
-If the partitioned region does not have redundant copies, the system members recreate the data through normal operation. If the member that crashed was an application, check whether it was designed to write its data to an external data source. If so, decide whether data recovery is possible and preferable to starting with new data generated through the Geode distributed system.
+If the partitioned region does not have redundant copies, the system members recreate the data through normal operation. If the member that crashed was an application, check whether it was designed to write its data to an external data source. If so, decide whether data recovery is possible and preferable to starting with new data generated through the <%=vars.product_name%> distributed system.
 
 ## <a id="rec_system_crash__section_D3E3002D6C864853B1517A310BD05BDF" class="no-quick-link"></a>Data Recovery for Distributed Regions
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/troubleshooting/recovering_from_network_outages.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/troubleshooting/recovering_from_network_outages.html.md.erb b/geode-docs/managing/troubleshooting/recovering_from_network_outages.html.md.erb
index f798b2b..cbeef24 100644
--- a/geode-docs/managing/troubleshooting/recovering_from_network_outages.html.md.erb
+++ b/geode-docs/managing/troubleshooting/recovering_from_network_outages.html.md.erb
@@ -27,7 +27,7 @@ However, if you know the architecture of your system well, and you are sure you
 
 When the network connecting members of a distributed system goes down, system members treat this like a machine crash. Members on each side of the network failure respond by removing the members on the other side from the membership list. If network partitioning detection is enabled (the default), the partition that contains sufficient quorum (&gt; 51% based on member weight) will continue to operate, while the other partition with insufficient quorum will shut down. See [Network Partitioning](../network_partitioning/chapter_overview.html#network_partitioning) for a detailed explanation on how this detection system operates.
 
-In addition, members that have been disconnected either via network partition or due to unresponsiveness will automatically try to reconnect to the distributed system unless configured otherwise. See [Handling Forced Cache Disconnection Using Autoreconnect](../autoreconnect/member-reconnect.html).
+In addition, members that have been disconnected either via network partition or due to unresponsiveness will automatically try to reconnect to the distributed system unless configured otherwise. See [Handling Forced Cache Disconnection Using Autoreconnect](../member-reconnect.html).
 
 ## <a id="rec_network_crash__section_F9A0C31AE25C4E7185DF3B1A8486BDFA" class="no-quick-link"></a>Recovery Procedure
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/managing/troubleshooting/recovering_from_p2p_crashes.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/troubleshooting/recovering_from_p2p_crashes.html.md.erb b/geode-docs/managing/troubleshooting/recovering_from_p2p_crashes.html.md.erb
index 332bdc5..c3e1199 100644
--- a/geode-docs/managing/troubleshooting/recovering_from_p2p_crashes.html.md.erb
+++ b/geode-docs/managing/troubleshooting/recovering_from_p2p_crashes.html.md.erb
@@ -63,7 +63,7 @@ When an application or cache server crashes, any data in local memory is lost, i
 
 If the partitioned region is configured for redundancy and a member crashes, the system continues to operate with the remaining copies of the data. You may need to perform recovery actions depending on how many members you have lost and how you have configured redundancy in your system.
 
-By default, Geode does not make new copies of the data until a new member is brought online to replace the member that crashed. You can control this behavior using the recovery delay attributes. For more information, see [Configure High Availability for a Partitioned Region](../../developing/partitioned_regions/configuring_ha_for_pr.html).
+By default, <%=vars.product_name%> does not make new copies of the data until a new member is brought online to replace the member that crashed. You can control this behavior using the recovery delay attributes. For more information, see [Configure High Availability for a Partitioned Region](../../developing/partitioned_regions/configuring_ha_for_pr.html).
 
 To recover, start a replacement member. The new member regenerates the lost copies and returns them to the configured redundancy level.
 
@@ -78,7 +78,7 @@ You can also lose access to all copies of your data through network failure. See
 
 **Recovery Without Data Redundancy**
 
-If a member crashes and there are no redundant copies, any logic that tries to interact with the bucket data is *blocked* until the primary buckets are restored from disk. (If you do not have persistence enabled, Geode will reallocate the buckets on any available remaining nodes, however you will need to recover any lost data using external mechanisms.)
+If a member crashes and there are no redundant copies, any logic that tries to interact with the bucket data is *blocked* until the primary buckets are restored from disk. (If you do not have persistence enabled, <%=vars.product_name%> will reallocate the buckets on any available remaining nodes, however you will need to recover any lost data using external mechanisms.)
 
 To recover, restart the member. The application returns to active work and automatically begins to create new data.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/reference/topics/gemfire_properties.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/reference/topics/gemfire_properties.html.md.erb b/geode-docs/reference/topics/gemfire_properties.html.md.erb
index a226618..1616759 100644
--- a/geode-docs/reference/topics/gemfire_properties.html.md.erb
+++ b/geode-docs/reference/topics/gemfire_properties.html.md.erb
@@ -149,7 +149,7 @@ See <a href="../../configuring/cluster_config/deploying_application_jars.html">D
 <tr class="even">
 <td>disable-auto-reconnect</td>
 <td>By default, a <%=vars.product_name%> member (both locators and servers) will attempt to reconnect and reinitialize the cache after it has been forced out of the distributed system by a network partition event or has otherwise been shunned by other members. Use this property to turn off the autoreconnect behavior. 
-See <a href="../../managing/autoreconnect/member-reconnect.html">Handling Forced Cache Disconnection Using Autoreconnect</a> for more details.</td>
+See <a href="../../managing/member-reconnect.html">Handling Forced Cache Disconnection Using Autoreconnect</a> for more details.</td>
 <td>S, L</td>
 <td>false</td>
 </tr>

http://git-wip-us.apache.org/repos/asf/geode/blob/1b84ecbe/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb b/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb
index 5d20794..858a007 100644
--- a/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb
+++ b/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb
@@ -942,7 +942,7 @@ See [Region Data Storage and Distribution](../../../developing/region_options/ch
 </tr>
 <tr class="even">
 <td><span class="keyword parmname">\-\-compressor</span></td>
-<td>Java class name that implements compression for the region. You can write a custom compressor that implements <code class="ph codeph">org.apache.geode.compression.Compressor</code> or you can specify the Snappy compressor (<code class="ph codeph">org.apache.geode.compression.SnappyCompressor</code>), which is bundled with <%=vars.product_name%>. See <a href="../../../managing/region_compression/region_compression.html#topic_r43_wgc_gl">Region Compression</a>.</td>
+<td>Java class name that implements compression for the region. You can write a custom compressor that implements <code class="ph codeph">org.apache.geode.compression.Compressor</code> or you can specify the Snappy compressor (<code class="ph codeph">org.apache.geode.compression.SnappyCompressor</code>), which is bundled with <%=vars.product_name%>. See <a href="../../../managing/region_compression.html#topic_r43_wgc_gl">Region Compression</a>.</td>
 <td>no compression</td>
 </tr>
 <tr class="odd">