You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by kl...@apache.org on 2017/05/30 17:41:53 UTC

[01/43] geode git commit: GEODE-2913 Update Lucene index documentation [Forced Update!]

Repository: geode
Updated Branches:
  refs/heads/feature/GEODE-2632-17 6bfa3a0d7 -> 283215f9f (forced update)


GEODE-2913 Update Lucene index documentation


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/096c22d5
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/096c22d5
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/096c22d5

Branch: refs/heads/feature/GEODE-2632-17
Commit: 096c22d5c73dc609651caf2887b4d95f162230ad
Parents: f271667
Author: Karen Miller <km...@pivotal.io>
Authored: Wed May 17 14:10:42 2017 -0700
Committer: Karen Miller <km...@pivotal.io>
Committed: Wed May 24 17:07:49 2017 -0700

----------------------------------------------------------------------
 .../source/subnavs/geode-subnav.erb             |  22 +-
 .../implementing_authorization.html.md.erb      |   5 +
 .../statistics/statistics_list.html.md.erb      |  24 ++
 .../topics/cache-elements-list.html.md.erb      |   4 +-
 .../reference/topics/cache_xml.html.md.erb      |  63 +++++
 ...mory_requirements_for_cache_data.html.md.erb |   2 +
 .../gfsh/command-pages/create.html.md.erb       |   2 +-
 .../gfsh/command-pages/destroy.html.md.erb      |  13 +-
 .../lucene_integration.html.md.erb              | 283 +++++++++++++++----
 9 files changed, 339 insertions(+), 79 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/096c22d5/geode-book/master_middleman/source/subnavs/geode-subnav.erb
----------------------------------------------------------------------
diff --git a/geode-book/master_middleman/source/subnavs/geode-subnav.erb b/geode-book/master_middleman/source/subnavs/geode-subnav.erb
index c97e5ec..12b2151 100644
--- a/geode-book/master_middleman/source/subnavs/geode-subnav.erb
+++ b/geode-book/master_middleman/source/subnavs/geode-subnav.erb
@@ -2308,16 +2308,7 @@ gfsh</a>
                                 <a href="/docs/guide/12/tools_modules/lucene_integration.html#using-the-apache-lucene-integration">Using the Apache Lucene Integration</a>
                             </li>
                             <li>
-                                <a href="/docs/guide/12/tools_modules/lucene_integration.html#java-api-example">Java API Example</a>
-                            </li>
-                            <li>
-                                <a href="/docs/guide/12/tools_modules/lucene_integration.html#search-example">Search Example</a>
-                            </li>
-                            <li>
-                                <a href="/docs/guide/12/tools_modules/lucene_integration.html#gfsh-api">Gfsh API</a>
-                            </li>
-                            <li>
-                                <a href="/docs/guide/12/tools_modules/lucene_integration.html#xml-configuration">XML Configuration</a>
+                                <a href="/docs/guide/12/tools_modules/lucene_integration.html#LuceneRandC">Requirements and Caveats</a>
                             </li>
                         </ul>
                     </li>
@@ -2557,6 +2548,14 @@ gfsh</a>
                                                 <a href="/docs/guide/12/reference/topics/cache_xml.html#index">&lt;index&gt;</a>
                                             </li>
                                             <li class="has_submenu">
+                                                <a href="/docs/guide/12/reference/topics/cache_xml.html#luceneindex">&lt;lucene:index&gt;</a>
+                                                <ul>
+                                                    <li>
+                                                        <a href="/docs/guide/12/reference/topics/cache_xml.html#lucenefield">&lt;lucene:field&gt;</a>
+                                                    </li>
+                                                </ul>
+                                            </li>
+                                            <li class="has_submenu">
                                                 <a href="/docs/guide/12/reference/topics/cache_xml.html#entry">&lt;entry&gt;</a>
                                                 <ul>
                                                     <li class="has_submenu">
@@ -3037,6 +3036,9 @@ gfsh</a>
                                 <a href="/docs/guide/12/reference/statistics/statistics_list.html#section_C48B654F973E4B44AD825D459C23A6CD">Locator (LocatorStatistics)</a>
                             </li>
                             <li>
+                                <a href="/docs/guide/12/reference/statistics/statistics_list.html#LuceneStats">Lucene Indexes (LuceneIndexStats)</a>
+                            </li>
+                            <li>
                                 <a href="/docs/guide/12/reference/statistics/statistics_list.html#topic_ohc_tjk_w5">Off-Heap (OffHeapMemoryStats)</a>
                             </li>
                             <li>

http://git-wip-us.apache.org/repos/asf/geode/blob/096c22d5/geode-docs/managing/security/implementing_authorization.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/security/implementing_authorization.html.md.erb b/geode-docs/managing/security/implementing_authorization.html.md.erb
index f897e4c..d16280e 100644
--- a/geode-docs/managing/security/implementing_authorization.html.md.erb
+++ b/geode-docs/managing/security/implementing_authorization.html.md.erb
@@ -117,18 +117,21 @@ This table classifies the permissions assigned for `gfsh` operations.
 | create gateway-receiver                | DATA:MANAGE                      |
 | create gateway-sender                  | DATA:MANAGE                      |
 | create index                           | DATA:MANAGE:RegionName           |
+| create lucene index                    | DATA:MANAGE:RegionName           |
 | create region                          | DATA:MANAGE                      |
 | define index                           | DATA:MANAGE:RegionName           |
 | deploy                                 | DATA:MANAGE, DATA:WRITE, CLUSTER:MANAGE, and CLUSTER:WRITE |
 | describe client                        | CLUSTER:READ                     |
 | describe config                        | CLUSTER:READ                     |
 | describe disk-store                    | CLUSTER:READ                     |
+| describe lucene index                  | CLUSTER:READ                     |
 | describe member                        | CLUSTER:READ                     |
 | describe offline-disk-store            | CLUSTER:READ                     |
 | describe region                        | CLUSTER:READ                     |
 | destroy disk-store                     | DATA:MANAGE                      |
 | destroy function                       | DATA:MANAGE                      |
 | destroy index                          | DATA:MANAGE or DATA:MANAGE:RegionName |
+| destroy lucene index                   | DATA:MANAGE:RegionName           |
 | destroy region                         | DATA:MANAGE                      |
 | disconnect                             | DATA:MANAGE                      |
 | echo                                   | DATA:MANAGE                      |
@@ -152,6 +155,7 @@ This table classifies the permissions assigned for `gfsh` operations.
 | list functions                         | CLUSTER:READ                     |
 | list gateways                          | CLUSTER:READ                     |
 | list indexes                           | CLUSTER:READ                     |
+| list lucene indexes                    | CLUSTER:READ                     |
 | list members                           | CLUSTER:READ                     |
 | list regions                           | DATA:READ                        |
 | load-balance gateway-sender            | DATA:MANAGE                      |
@@ -165,6 +169,7 @@ This table classifies the permissions assigned for `gfsh` operations.
 | remove                                 | DATA:WRITE:RegionName or DATA:WRITE:RegionName:Key |
 | resume gateway-sender                  | DATA:MANAGE                      |
 | revoke mising-disk-store               | DATA:MANAGE                      |
+| search lucene                          | DATA:WRITE                       |
 | show dead-locks                        | CLUSTER:READ                     |
 | show log                               | CLUSTER:READ                     |
 | show metrics                           | CLUSTER:READ                     |

http://git-wip-us.apache.org/repos/asf/geode/blob/096c22d5/geode-docs/reference/statistics/statistics_list.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/reference/statistics/statistics_list.html.md.erb b/geode-docs/reference/statistics/statistics_list.html.md.erb
index c38b2f7..49e416e 100644
--- a/geode-docs/reference/statistics/statistics_list.html.md.erb
+++ b/geode-docs/reference/statistics/statistics_list.html.md.erb
@@ -60,6 +60,8 @@ Performance statistics are collected for each Java application or cache server t
 
 -   **[Locator (LocatorStatistics)](#section_C48B654F973E4B44AD825D459C23A6CD)**
 
+-   **[Lucene Indexes (LuceneIndexStats)](#LuceneStats)**
+
 -   **[Off-Heap (OffHeapMemoryStats)](#topic_ohc_tjk_w5)**
 
 -   **[Operating System Statistics - Linux](#section_923B28F01BC3416786D3AFBD87F22A5E)**
@@ -1006,6 +1008,28 @@ These statistics are on the Geode locator. The primary statistics are:
 | `RESPONSES_FROM_LOCATOR` | Number of responses this locator has sent to clients.                         |
 | `SERVER_LOAD_UPDATES`    | Total number of times a server load update has been received.                 |
 
+## <a id="LuceneStats" class="no-quick-link"></a>Lucene Indexes (LuceneIndexStats)
+
+These statistics quantify the use of Lucene indexes. The primary statistics are:
+
+| Statistic             | Description                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                         |
+|-----------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `queryExecutions`         | The number of Lucene queries executed on this member.    |
+| `queryExecutionTime`         | The amount of time in nanoseconds spent executing Lucene queries.    |
+| `queryExecutionsInProgress`  | The number of query executions currently in progress.    |
+| `queryExecutionTotalHits`  | The total number of documents returned by query executions.    |
+| `repositoryQueryExecutions`  | The number of Lucene repository queries executed on this member.    |
+| `repositoryQueryExecutionTime`  | The amount of time in nanoseconds spent executing Lucene repository queries.    |
+| `repositoryQueryExecutionsInProgress`  | The number of repository query executions currently in progress.    |
+| `repositoryQueryExecutionTotalHits`  | The total number of documents returned by repository query executions.    |
+| `updates`  | The number of Lucene index documents added or removed on this member.    |
+| `updateTime`  | The amount of time in nanoseconds spent adding or removing documents from the index.    |
+| `updatesInProgress`  | The number of index updates in progress.    |
+| `commits`  | The number of Lucene index commits on this member.    |
+| `commitTime`  | The amount of time in nanoseconds spent in Lucene index commits.    |
+| `commitsInProgress`  | The number of Lucene index commits in progress.    |
+| `documents`  | The number of documents in the index.    |
+
 ## <a id="topic_ohc_tjk_w5" class="no-quick-link"></a>Off-Heap (OffHeapMemoryStats)
 
 These statistics quantify the use of off-heap memory. The primary statistics are:

http://git-wip-us.apache.org/repos/asf/geode/blob/096c22d5/geode-docs/reference/topics/cache-elements-list.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/reference/topics/cache-elements-list.html.md.erb b/geode-docs/reference/topics/cache-elements-list.html.md.erb
index 71e092b..2b1c035 100644
--- a/geode-docs/reference/topics/cache-elements-list.html.md.erb
+++ b/geode-docs/reference/topics/cache-elements-list.html.md.erb
@@ -151,7 +151,9 @@ For details, see [&lt;cache&gt; Element Reference](cache_xml.html#cache_xml_cach
             <config-property-value>
    <region>
       <region-attributes>
-      <index>>
+      <index>
+      <lucene:index>
+         <field>
       <entry>
          <key>
             <string>

http://git-wip-us.apache.org/repos/asf/geode/blob/096c22d5/geode-docs/reference/topics/cache_xml.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/reference/topics/cache_xml.html.md.erb b/geode-docs/reference/topics/cache_xml.html.md.erb
index a934b62..cf5d2b3 100644
--- a/geode-docs/reference/topics/cache_xml.html.md.erb
+++ b/geode-docs/reference/topics/cache_xml.html.md.erb
@@ -2685,6 +2685,7 @@ Defines a region in the cache. See [&lt;region-attributes&gt;](#region-attribute
 
 See [&lt;region-attributes&gt;](#region-attributes) for a complete listing of region attributes.
 
+
 ## <a id="index" class="no-quick-link"></a>&lt;index&gt;
 
 Describes an index to be created on a region. The index node, if any, should all come immediately after the "region-attributes" node. The "name" attribute is a required field which identifies the name of the index. See [Working with Indexes](../../developing/query_index/query_index.html) for more information on indexes.
@@ -2728,6 +2729,68 @@ Describes an index to be created on a region. The index node, if any, should all
 </region>
 ```
 
+<!-- start of Lucene index description -->
+## <a id="luceneindex" class="no-quick-link"></a>&lt;lucene:index&gt;
+
+Describes a Lucene index to be created on a region. The `lucene` namespace
+and the scoping operator (`:`) must be specified, as the Geode `cache`
+namespace also defines an `index` element (for OQL indexes).
+
+**API:** `org.apache.geode.cache.lucene` package
+
+| Attribute   | Description                                                                                                                                                                                                                                                                           | Default |
+|-------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|
+| name        | Required. Name of the Lucene index.   |         |
+
+**Example:**
+
+``` pre
+<cache
+    xmlns="http://geode.apache.org/schema/cache"
+    xmlns:lucene="http://geode.apache.org/schema/lucene"
+    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+    xsi:schemaLocation="http://geode.apache.org/schema/cache
+        http://geode.apache.org/schema/cache/cache-1.0.xsd
+        http://geode.apache.org/schema/lucene
+        http://geode.apache.org/schema/lucene/lucene-1.0.xsd"
+    version="1.0">
+
+    <region name="regionA" refid="PARTITION">
+        <lucene:index name="myIndex">
+            <lucene:field name="x" />
+            <lucene:field name="y" />
+        </lucene:index>
+    </region>
+</cache>
+```
+<!-- end of Lucene index description -->
+
+<!-- start of Lucene field description -->
+## <a id="lucenefield" class="no-quick-link"></a>&lt;lucene:field&gt;
+
+Describes a field to be included in a Lucene index. Including the
+`lucene` namespace and the scoping operator (`:`) clarifies,
+but is not required.
+
+**API:** `org.apache.geode.cache.lucene` package
+
+| Attribute   | Description                                                                                                                                                                                                                                                                           | Default |
+|-------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|
+| name        | Required. A string that defines the name of the field. If a single field is defined by the value `"__REGION_VALUE_FIELD"`, then the entire value is used as a single field.   |         |
+| analyzer    | A string that provides the path to the analyzer to use for this field. A value of `"null"` uses the default analyzer.  | `"null"` |
+
+**Example:**
+
+``` pre
+<region name="dataregion" refid="PARTITION_REDUNDANT">
+   <lucene:index name="full_value_index">
+     <lucene:field name="__REGION_VALUE_FIELD"/>
+   </lucene:index>
+</region>
+```
+
+<!-- end of Lucene field description -->
+
 ## <a id="entry" class="no-quick-link"></a>&lt;entry&gt;
 
 An "entry" element describes an entry to be added to a region. Note that if an entry with the given key already exists in the region, it will be replaced.

http://git-wip-us.apache.org/repos/asf/geode/blob/096c22d5/geode-docs/reference/topics/memory_requirements_for_cache_data.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/reference/topics/memory_requirements_for_cache_data.html.md.erb b/geode-docs/reference/topics/memory_requirements_for_cache_data.html.md.erb
index 1509a40..150814a 100644
--- a/geode-docs/reference/topics/memory_requirements_for_cache_data.html.md.erb
+++ b/geode-docs/reference/topics/memory_requirements_for_cache_data.html.md.erb
@@ -175,6 +175,8 @@ For indexes used in querying, the overhead varies greatly depending on the type
 
 -   If the index has a single value per region entry for the indexed expression, the index introduces at most 243 bytes per region entry. An example of this type of index is: `fromClause="/portfolios",               indexedExpression="id"`. The maximum of 243 bytes per region entry is reached if each entry has a unique value for the indexed expression. The overhead is reduced if the entries do not have unique index values.
 -   If each region entry has more than one value for the indexed expression, but no two region entries have the same value for it, then the index introduces at most 236 C + 75 bytes per region entry, where C is the average number of values per region entry for the expression.
+-   Lucene indexes add approximately 737 bytes per entry.
+The other index overhead estimates listed here also apply to Lucene indexes.
 
 ## <a id="topic_i1m_stz_j4" class="no-quick-link"></a>Estimating Management and Monitoring Overhead
 

http://git-wip-us.apache.org/repos/asf/geode/blob/096c22d5/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb b/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb
index 4fb4c8c..1398352 100644
--- a/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb
+++ b/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb
@@ -683,7 +683,7 @@ create lucene index --name=value --region=value --field=value(,value)* [--analyz
 |----------------------------------------------------|----------------------------------------------------------------------------------------|---------|
 | <span class="keyword parmname">\\-\\-name</span>       | *Required.* Name of the index to create.                                               |         |
 | <span class="keyword parmname">\\-\\-region</span>     | *Required.* Name/Path of the region which corresponds to the "from" clause in a query. |         |
-| <span class="keyword parmname">\\-\\-field</span>      | *Required.* Field of the region values that are referenced by the index.               |         |
+| <span class="keyword parmname">\\-\\-field</span>      | *Required.* Field of the region values that are referenced by the index. To treat the entire value as a single field, specify `__REGION_VALUE_FIELD`.     |         |
 | <span class="keyword parmname">&#8209;&#8209;analyzer</span>   | Analyzer to extract terms from text                                  |         |
 | <span class="keyword parmname">\\-\\-group</span>      | The index will be created on all the members in the specified member groups.                     |         |
 

http://git-wip-us.apache.org/repos/asf/geode/blob/096c22d5/geode-docs/tools_modules/gfsh/command-pages/destroy.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/gfsh/command-pages/destroy.html.md.erb b/geode-docs/tools_modules/gfsh/command-pages/destroy.html.md.erb
index e6de426..afd78ee 100644
--- a/geode-docs/tools_modules/gfsh/command-pages/destroy.html.md.erb
+++ b/geode-docs/tools_modules/gfsh/command-pages/destroy.html.md.erb
@@ -152,25 +152,22 @@ See also [create lucene index](create.html#create_lucene_index), [describe lucen
 **Syntax:**
 
 ``` pre
-destroy lucene index [--name=value] [--region=value]
+destroy lucene index --region=value [--name=value]
 ```
 
-**Note:**
-You must specify at least one of the parameter options. If you enter `destroy lucene index` without any parameters, the command will ask you to specify at least one option.
-
 **Parameters, destroy lucene index:**
 
 | Name                                           | Description                                                                  |
 |------------------------------------------------|------------------------------------------------------------------------------|
-| <span class="keyword parmname">\\-\\-name</span>   | Name of the index to be removed.                                            |
-| <span class="keyword parmname">\\-\\-region</span> | Name of the region from which an index or all indexes are to be removed. |
+| <span class="keyword parmname">&#8209;&#8209;region</span> | *Required.* Name of the region from which indexes are to be removed. If no `--name` option is specified, all indexes associated with the region are destroyed.|
+| <span class="keyword parmname">&#8209;&#8209;name</span>   | Name of the index to be removed.                                            |
 
 
 **Example Commands:**
 
 ``` pre
-destroy lucene index --member=server2
-destroy lucene index --name=MyKeyIndex
+destroy lucene index --region=region1
+destroy lucene index --region=region1 --name=MyKeyIndex
 ```
 
 ## <a id="topic_BEDACECF4599407794ACBC0E56B30F65" class="no-quick-link"></a>destroy region

http://git-wip-us.apache.org/repos/asf/geode/blob/096c22d5/geode-docs/tools_modules/lucene_integration.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/lucene_integration.html.md.erb b/geode-docs/tools_modules/lucene_integration.html.md.erb
index e97ce06..b83705b 100644
--- a/geode-docs/tools_modules/lucene_integration.html.md.erb
+++ b/geode-docs/tools_modules/lucene_integration.html.md.erb
@@ -35,85 +35,73 @@ The Apache Lucene integration:
 For more details, see Javadocs for the classes and interfaces that implement Apache Lucene indexes and searches, including
 `LuceneService`, `LuceneQueryFactory`, `LuceneQuery`, and `LuceneResultStruct`.
 
-## <a id="using-the-apache-lucene-integration" class="no-quick-link"></a>Using the Apache Lucene Integration
+# <a id="using-the-apache-lucene-integration" class="no-quick-link"></a>Using the Apache Lucene Integration
 
-You can create Apache Lucene indexes through a Java API, through the `gfsh` command-line utility, or by means of
-the `cache.xml` configuration file.
+You can interact with Apache Lucene indexes through a Java API,
+through the `gfsh` command-line utility,
+or by means of the `cache.xml` configuration file.
 
-To use Apache Lucene Integration, you will need two pieces of information:
+To use Apache Lucene to create and use indexes,
+you will need two pieces of information:
 
-1.  The name of the region to be indexed or searched
+1.  The name of the region to be indexed and searched
 2.  The names of the fields you wish to index
 
+## Key Points ###
 
-### Key Points ###
-
+- Apache Lucene indexes are supported only on partitioned regions.
+Replicated region types are *not* supported.
+- Lucene indexes reside on servers.
+There is no way to create a Lucene index on a client.
 - Only top level fields of objects stored in the region can be indexed.
-- Apache Lucene indexes are supported only on Partitioned regions.
 - A single index supports a single region. Indexes do not support multiple regions.
-- Heterogeneous objects in single region are supported.
-- Join queries between regions are not supported.
-- Nested objects are not supported.
-- The index needs to be created before the region is created.
+- Heterogeneous objects in a single region are supported.
+
+## <a id="lucene-index-create" class="no-quick-link"></a>Creating an Index
+
+Create the index before creating the region.
 
-## <a id="java-api-example" class="no-quick-link"></a>Java API Example
+When no analyzer is specified, the
+`org.apache.lucene.analysis.standard.StandardAnalyzer` will be used.
+
+### <a id="api-create-example" class="no-quick-link"></a>Java API Example to Create an Index
 
 ``` pre
 // Get LuceneService
 LuceneService luceneService = LuceneServiceProvider.get(cache);
  
-// Create Index on fields with default analyzer:
-luceneService.createIndex(indexName, regionName, "field1", "field2", "field3");
- 
-Region region = cache.createRegionFactory(RegionShortcut.PARTITION).create(regionName);
+// Create the index on fields with default analyzer
+//  prior to creating the region
+luceneService.createIndexFactory()
+  .addField("name")
+  .addField("zipcode")
+  .create(indexName, regionName);
  
+Region region = cache.createRegionFactory(RegionShortcut.PARTITION)
+  .create(regionName);
 ```
 
-## <a id="search-example" class="no-quick-link"></a>Search Example
-
-``` pre
-LuceneQuery<String, Person> query = luceneService.createLuceneQueryFactory()
-  .setResultLimit(10)
-  .create(indexName, regionName, "Main Street", "address");
-
-Collection<Person> results = query.findValues();
-```
-
-
-## <a id="gfsh-api" class="no-quick-link"></a>Gfsh API
-
-The gfsh command-line utility supports five Apache Lucene actions:
-
-<dt><a href="gfsh/command-pages/create.html#create_lucene_index"><b>create lucene index</b></a></dt>
-    <dd>Create a Lucene index that can be used to execute queries.</dd>
-<dt><a href="gfsh/command-pages/describe.html#describe_lucene_index"><b>describe lucene index</b></a></dt>
-    <dd>Describe a Lucene index.</dd>
-<dt><a href="gfsh/command-pages/destroy.html#destroy_lucene_index"><b>destroy lucene index</b></a></dt>
-    <dd>Destroy a Lucene index.</dd>
-<dt><a href="gfsh/command-pages/list.html#list_lucene_indexes"><b>list lucene indexes</b></a></dt>
-    <dd>List Lucene indexes created for all members.</dd>
-<dt><a href="gfsh/command-pages/search.html#search_lucene"><b>search lucene</b></a></dt>
-    <dd>Search a Lucene index.</dd>
+### <a id="gfsh-create-example" class="no-quick-link"></a>Gfsh Example to Create an Index
 
-**Gfsh command-line examples:**
+For details, see the [gfsh create lucene index](gfsh/command-pages/create.html#create_lucene_index") command reference page.
 
 ``` pre
-// List Index
-gfsh> list lucene indexes --with-stats
-
-// Create Index
 gfsh>create lucene index --name=indexName --region=/orders --field=customer,tags
-
-// Create Index, specifying a custom analyzer for the second field
-// Note: "null" in the first analyzer position means "use the default analyzer for the first field"
-gfsh>create lucene index --name=indexName --region=/orders --field=customer,tags --analyzer=null,org.apache.lucene.analysis.bg.BulgarianAnalyzer
-
-// Execute Lucene query
-gfsh> lucene search --regionName=/orders -queryStrings="John*" --defaultField=field1 --limit=100
 ```
 
+``` pre
+// Create an index, specifying a custom analyzer for the second field
+// Note: "null" in the first analyzer position uses the default analyzer
+// for the first field
+gfsh>create lucene index --name=indexName --region=/orders
+  --field=customer,tags --analyzer=null,org.apache.lucene.analysis.bg.BulgarianAnalyzer
+```
+To use the entire value as a single field set the required `--field`
+option to be `__REGION_VALUE_FIELD`.
+This is only supported when the region entry value is a `String`, `Long`,
+`Integer`, `Float`, or `Double`.
 
-## <a id="xml-configuration" class="no-quick-link"></a>XML Configuration
+### <a id="xml-configuration" class="no-quick-link"></a>XML Configuration to Create an Index
 
 ``` pre
 <cache
@@ -127,12 +115,189 @@ gfsh> lucene search --regionName=/orders -queryStrings="John*" --defaultField=fi
     version="1.0">
  
     <region name="region" refid="PARTITION">
-        <lucene:index name="index">
-          <lucene:field name="a" analyzer="org.apache.lucene.analysis.core.KeywordAnalyzer"/>
-          <lucene:field name="b" analyzer="org.apache.lucene.analysis.core.SimpleAnalyzer"/>
-          <lucene:field name="c" analyzer="org.apache.lucene.analysis.standard.ClassicAnalyzer"/>
+        <lucene:index name="myIndex">
+          <lucene:field name="a" 
+                        analyzer="org.apache.lucene.analysis.core.KeywordAnalyzer"/>
+          <lucene:field name="b" 
+                        analyzer="org.apache.lucene.analysis.core.SimpleAnalyzer"/>
+          <lucene:field name="c" 
+                        analyzer="org.apache.lucene.analysis.standard.ClassicAnalyzer"/>
+          <lucene:field name="d" />
         </lucene:index>
     </region>
 </cache>
 ```
+## <a id="lucene-index-query" class="no-quick-link"></a>Queries
+
+### <a id="gfsh-query-example" class="no-quick-link"></a>Gfsh Example to Query using a Lucene Index
+
+For details, see the [gfsh search lucene](gfsh/command-pages/search.html#search_lucene") command reference page.
+
+``` pre
+gfsh> lucene search --regionName=/orders -queryStrings="John*" --defaultField=field1
+   --limit=100
+```
+
+### <a id="api-query-example" class="no-quick-link"></a>Java API Example to Query using a Lucene Index
+
+``` pre
+LuceneQuery<String, Person> query = luceneService.createLuceneQueryFactory()
+  .setLimit(10)
+  .create(indexName, regionName, "name:John AND zipcode:97006", defaultField);
+
+Collection<Person> results = query.findValues();
+```
+
+## <a id="lucene-index-destroy" class="no-quick-link"></a>Destroying an Index
+
+Since a region destroy operation does not cause the destruction
+of any Lucene indexes,
+destroy any Lucene indexes prior to destroying the associated region.
+
+### <a id="API-destroy-example" class="no-quick-link"></a>Java API Example to Destroy a Lucene Index
+
+``` pre
+luceneService.destroyIndex(indexName, regionName);
+```
+An attempt to destroy a region with a Lucene index will result in
+an `IllegalStateException`,
+issuing an error message similar to:
+
+``` pre
+java.lang.IllegalStateException: The parent region [/orders] in colocation chain
+ cannot be destroyed, unless all its children [[/indexName#_orders.files]] are
+ destroyed
+at org.apache.geode.internal.cache.PartitionedRegion
+    .checkForColocatedChildren(PartitionedRegion.java:7231)
+at org.apache.geode.internal.cache.PartitionedRegion
+    .destroyRegion(PartitionedRegion.java:7243)
+at org.apache.geode.internal.cache.AbstractRegion
+    .destroyRegion(AbstractRegion.java:308)
+at DestroyLuceneIndexesAndRegionFunction
+    .destroyRegion(DestroyLuceneIndexesAndRegionFunction.java:46)
+```
+### <a id="gfsh-destroy-example" class="no-quick-link"></a>Gfsh Example to Destroy a Lucene Index
+
+For details, see the [gfsh destroy lucene index](gfsh/command-pages/destroy.html#destroy_lucene_index") command reference page.
+
+The error message that results from an attempt to destroy a region
+prior to destroying its associated Lucene index
+will be similar to:
+
+``` pre
+Error occurred while destroying region "orders".
+ Reason: The parent region [/orders] in colocation chain cannot be destroyed,
+ unless all its children [[/indexName#_orders.files]] are destroyed
+```
+
+## <a id="lucene-index-change" class="no-quick-link"></a>Changing an Index
+
+Changing an index requires rebuilding it.
+Implement these steps to change an index:
 
+1. Export all region data
+2. Destroy the Lucene index
+3. Destroy the region
+4. Create a new index
+5. Create a new region without the user-defined business logic callbacks
+6. Import the region data with the option to turn on callbacks. 
+The callbacks will be to invoke a Lucene async event listener to index
+the data.
+7. Alter the region to add the user-defined business logic callbacks
+
+## <a id="addl-gfsh-api" class="no-quick-link"></a>Additional Gfsh Commands
+
+See the [gfsh describe lucene index](gfsh/command-pages/describe.html#describe_lucene_index") command reference page for the command that prints details about
+a specific index.
+
+See the [gfsh list lucene index](gfsh/command-pages/list.html#list_lucene_index") command reference page
+for the command that prints details about the 
+Lucene indexes created for all members.
+
+# <a id="LuceneRandC" class="no-quick-link"></a>Requirements and Caveats
+
+- Join queries between regions are not supported.
+- Nested objects are not supported.
+- Lucene indexes will not be stored within off-heap memory.
+- Lucene queries from within transactions are not supported.
+On an attempt to query from within a transaction,
+a `LuceneQueryException` is thrown, issuing an error message
+on the client (accessor) similar to:
+
+``` pre
+Exception in thread "main" org.apache.geode.cache.lucene.LuceneQueryException:
+ Lucene Query cannot be executed within a transaction
+at org.apache.geode.cache.lucene.internal.LuceneQueryImpl
+    .findTopEntries(LuceneQueryImpl.java:124)
+at org.apache.geode.cache.lucene.internal.LuceneQueryImpl
+    .findPages(LuceneQueryImpl.java:98)
+at org.apache.geode.cache.lucene.internal.LuceneQueryImpl
+    .findPages(LuceneQueryImpl.java:94)
+at TestClient.executeQuerySingleMethod(TestClient.java:196)
+at TestClient.main(TestClient.java:59)
+```
+- Lucene indexes must be created prior to creating the region.
+If an attempt is made to create a Lucene index after creating the region,
+the error message will be similar to:
+
+``` pre
+       Member                | Status
+---------------------------- | ------------------------------------------------------
+192.0.2.0(s2:97639)<v2>:1026 | Failed: The lucene index must be created before region
+192.0.2.0(s3:97652)<v3>:1027 | Failed: The lucene index must be created before region
+192.0.2.0(s1:97626)<v1>:1025 | Failed: The lucene index must be created before region
+```
+- An invalidate operation on a region entry does not invalidate a corresponding
+Lucene index entry.
+A query on a Lucene index that contains values that
+have been invalidated can return results that no longer exist.
+Therefore, do not combine entry invalidation with queries on Lucene indexes.
+- Lucene indexes are not supported for regions that have eviction configured
+with a local destroy.
+Eviction can be configured with overflow to disk,
+but only the region data is overflowed to disk,
+not the Lucene index.
+On an attempt to create a region with eviction configured to do local destroy
+(with a Lucene index),
+an `UnsupportedOperationException` will be thrown,
+issuing an error message similar to:
+
+``` pre
+[error 2017/05/02 16:12:32.461 PDT <main> tid=0x1] 
+ java.lang.UnsupportedOperationException:
+ Lucene indexes on regions with eviction and action local destroy are not supported
+Exception in thread "main" java.lang.UnsupportedOperationException:
+ Lucene indexes on regions with eviction and action local destroy are not supported
+at org.apache.geode.cache.lucene.internal.LuceneRegionListener
+    .beforeCreate(LuceneRegionListener.java:85)
+at org.apache.geode.internal.cache.GemFireCacheImpl
+    .invokeRegionBefore(GemFireCacheImpl.java:3154)
+at org.apache.geode.internal.cache.GemFireCacheImpl
+    .createVMRegion(GemFireCacheImpl.java:3013)
+at org.apache.geode.internal.cache.GemFireCacheImpl
+    .basicCreateRegion(GemFireCacheImpl.java:2991)
+```
+- Be aware that using the same field name in different objects
+where the field has different data types 
+may have unexpected consequences.
+For example, if an index on the field SSN has the following entries
+    - `Object_1 object_1` has String SSN = "1111"
+    - `Object_2 object_2` has Integer SSN = 1111
+    - `Object_3 object_3` has Float SSN = 1111.0
+
+    Integers and floats will not be converted into strings.
+    They remain as `IntPoint` and `FloatPoint` within Lucene.
+    The standard analyzer will not try to tokenize these values.
+    The standard analyzer will only try to break up string values.
+    So, a string search for "SSN: 1111" will return `object_1`.
+    An `IntRangeQuery` for `upper limit : 1112` and `lower limit : 1110`
+will return `object_2`.
+    And, a `FloatRangeQuery` with `upper limit : 1111.5` and `lower limit : 1111.0`
+will return `object_3`.
+- Backups should only be made for regions with Lucene indexes
+when there are no puts, updates, or deletes in progress.
+Incremental backups will not be consistent for the region and
+its index upon restart if these operations were in progress,
+due to the delayed processing associated with the asynchronous event queue.
+If region data needs to be restored from a backup,
+follow the same procedure as given for changing an index.


[27/43] geode git commit: Cleanup BaseCommand

Posted by kl...@apache.org.
Cleanup BaseCommand


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/c5031d12
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/c5031d12
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/c5031d12

Branch: refs/heads/feature/GEODE-2632-17
Commit: c5031d129ad4b3d9f0fb1498d0952480c21ed8c6
Parents: d66e51d
Author: Kirk Lund <kl...@apache.org>
Authored: Fri May 19 14:57:20 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Tue May 30 10:21:08 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/geode/GemFireException.java |  93 +--
 .../org/apache/geode/cache/CacheException.java  |  14 +-
 .../geode/cache/CacheRuntimeException.java      |  15 +-
 .../org/apache/geode/internal/DSFIDFactory.java |   2 +-
 .../geode/internal/cache/PartitionedRegion.java |   3 +-
 .../geode/internal/cache/ha/HARegionQueue.java  | 157 ++--
 .../geode/internal/cache/tier/Command.java      |  17 +-
 .../cache/tier/sockets/BaseCommand.java         | 720 ++++++++-----------
 .../cache/tier/sockets/BaseCommandQuery.java    |  20 +-
 .../cache/tier/sockets/CacheClientNotifier.java | 231 ++----
 .../ServerInterestRegistrationMessage.java      | 120 ++++
 .../cache/tier/sockets/command/AddPdxEnum.java  |  22 +-
 .../cache/tier/sockets/command/AddPdxType.java  |  22 +-
 .../cache/tier/sockets/command/ClearRegion.java |  52 +-
 .../cache/tier/sockets/command/ClientReady.java |  20 +-
 .../tier/sockets/command/CloseConnection.java   |  28 +-
 .../tier/sockets/command/CommitCommand.java     |  24 +-
 .../cache/tier/sockets/command/ContainsKey.java |  46 +-
 .../tier/sockets/command/ContainsKey66.java     |  48 +-
 .../tier/sockets/command/CreateRegion.java      |  44 +-
 .../cache/tier/sockets/command/Default.java     |   9 +-
 .../cache/tier/sockets/command/Destroy.java     |  74 +-
 .../cache/tier/sockets/command/Destroy65.java   | 102 +--
 .../cache/tier/sockets/command/Destroy70.java   |   4 +-
 .../tier/sockets/command/DestroyRegion.java     |  75 +-
 .../tier/sockets/command/ExecuteFunction.java   |  26 +-
 .../tier/sockets/command/ExecuteFunction65.java |  30 +-
 .../tier/sockets/command/ExecuteFunction66.java |  40 +-
 .../tier/sockets/command/ExecuteFunction70.java |   4 +-
 .../sockets/command/ExecuteRegionFunction.java  |  36 +-
 .../command/ExecuteRegionFunction61.java        |  40 +-
 .../command/ExecuteRegionFunction65.java        |  40 +-
 .../command/ExecuteRegionFunction66.java        |  44 +-
 .../command/ExecuteRegionFunctionSingleHop.java |  46 +-
 .../sockets/command/GatewayReceiverCommand.java | 214 +++---
 .../cache/tier/sockets/command/Get70.java       |  74 +-
 .../cache/tier/sockets/command/GetAll.java      |  52 +-
 .../cache/tier/sockets/command/GetAll651.java   |  53 +-
 .../cache/tier/sockets/command/GetAll70.java    |  54 +-
 .../cache/tier/sockets/command/GetAllForRI.java |   2 +-
 .../sockets/command/GetAllWithCallback.java     |  59 +-
 .../command/GetClientPRMetadataCommand.java     |  26 +-
 .../command/GetClientPRMetadataCommand66.java   |  26 +-
 .../GetClientPartitionAttributesCommand.java    |  28 +-
 .../GetClientPartitionAttributesCommand66.java  |  28 +-
 .../sockets/command/GetFunctionAttribute.java   |  16 +-
 .../tier/sockets/command/GetPDXEnumById.java    |  24 +-
 .../tier/sockets/command/GetPDXIdForEnum.java   |  22 +-
 .../tier/sockets/command/GetPDXIdForType.java   |  24 +-
 .../tier/sockets/command/GetPDXTypeById.java    |  24 +-
 .../tier/sockets/command/GetPdxEnums70.java     |  22 +-
 .../tier/sockets/command/GetPdxTypes70.java     |  22 +-
 .../cache/tier/sockets/command/Invalid.java     |   9 +-
 .../cache/tier/sockets/command/Invalidate.java  |  74 +-
 .../tier/sockets/command/Invalidate70.java      |   4 +-
 .../cache/tier/sockets/command/KeySet.java      |  57 +-
 .../cache/tier/sockets/command/MakePrimary.java |  22 +-
 .../tier/sockets/command/ManagementCommand.java |   2 +-
 .../cache/tier/sockets/command/PeriodicAck.java |  32 +-
 .../cache/tier/sockets/command/Ping.java        |  28 +-
 .../cache/tier/sockets/command/Put.java         |  86 +--
 .../cache/tier/sockets/command/Put61.java       | 106 +--
 .../cache/tier/sockets/command/Put65.java       | 152 ++--
 .../cache/tier/sockets/command/Put70.java       |   4 +-
 .../cache/tier/sockets/command/PutAll.java      |  84 +--
 .../cache/tier/sockets/command/PutAll70.java    |  98 +--
 .../cache/tier/sockets/command/PutAll80.java    | 112 ++-
 .../sockets/command/PutUserCredentials.java     |  32 +-
 .../cache/tier/sockets/command/Query.java       |  26 +-
 .../cache/tier/sockets/command/Query651.java    |  41 +-
 .../command/RegisterDataSerializers.java        |  30 +-
 .../sockets/command/RegisterInstantiators.java  |  36 +-
 .../tier/sockets/command/RegisterInterest.java  |  88 +--
 .../sockets/command/RegisterInterest61.java     |  98 +--
 .../sockets/command/RegisterInterestList.java   |  88 +--
 .../sockets/command/RegisterInterestList61.java |  94 +--
 .../sockets/command/RegisterInterestList66.java |  92 +--
 .../cache/tier/sockets/command/RemoveAll.java   | 103 ++-
 .../tier/sockets/command/RemoveUserAuth.java    |  32 +-
 .../cache/tier/sockets/command/Request.java     |  68 +-
 .../tier/sockets/command/RequestEventValue.java |  52 +-
 .../tier/sockets/command/RollbackCommand.java   |  20 +-
 .../cache/tier/sockets/command/Size.java        |  34 +-
 .../tier/sockets/command/TXFailoverCommand.java |  28 +-
 .../command/TXSynchronizationCommand.java       |  51 +-
 .../sockets/command/UnregisterInterest.java     |  50 +-
 .../sockets/command/UnregisterInterestList.java |  50 +-
 .../command/UpdateClientNotification.java       |   4 +-
 .../cache/tier/sockets/command/CloseCQ.java     |  34 +-
 .../cache/tier/sockets/command/ExecuteCQ.java   |  42 +-
 .../cache/tier/sockets/command/ExecuteCQ61.java |  53 +-
 .../cache/tier/sockets/command/GetCQStats.java  |  29 +-
 .../tier/sockets/command/GetDurableCQs.java     |  40 +-
 .../cache/tier/sockets/command/MonitorCQ.java   |  31 +-
 .../cache/tier/sockets/command/StopCQ.java      |  34 +-
 95 files changed, 2549 insertions(+), 2739 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/GemFireException.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/GemFireException.java b/geode-core/src/main/java/org/apache/geode/GemFireException.java
index 02bf025..3a69307 100644
--- a/geode-core/src/main/java/org/apache/geode/GemFireException.java
+++ b/geode-core/src/main/java/org/apache/geode/GemFireException.java
@@ -12,74 +12,55 @@
  * or implied. See the License for the specific language governing permissions and limitations under
  * the License.
  */
-
 package org.apache.geode;
 
 /**
  * This is the abstract superclass of exceptions that are thrown to indicate incorrect usage of
  * GemFire.
- *
+ * <p>
  * Since these exceptions are unchecked, this class really <em>ought</em> to be called
- * <code>GemFireRuntimeException</code>; however, the current name is retained for compatibility's
+ * {@code GemFireRuntimeException}; however, the current name is retained for compatibility's
  * sake.
- * 
- * @see org.apache.geode.GemFireCheckedException
+ * <p>
+ * This class is abstract to enforce throwing more specific exception types. Please avoid using
+ * GemFireException to describe an arbitrary error condition
+ *
+ * @see GemFireCheckedException
  * @see org.apache.geode.cache.CacheRuntimeException
  */
-// Implementation note: This class is abstract so that we are forced
-// to have more specific exception types. We want to avoid using
-// GemFireException to describe an arbitrary error condition (think
-// GsError).
 public abstract class GemFireException extends RuntimeException {
-  public static final long serialVersionUID = -6972360779789402295L;
-
-  /** The cause of this <code>GemFireException</code> */
-  // private Throwable cause;
-
-  ////////////////////// Constructors //////////////////////
+  private static final long serialVersionUID = -6972360779789402295L;
 
   /**
-   * Creates a new <code>GemFireException</code> with no detailed message.
+   * Creates a new {@code GemFireException} with no detailed message.
    */
   public GemFireException() {
     super();
   }
 
   /**
-   * Creates a new <code>GemFireException</code> with the given detail message.
+   * Creates a new {@code GemFireException} with the given detail message.
    */
   public GemFireException(String message) {
     super(message);
   }
 
   /**
-   * Creates a new <code>GemFireException</code> with the given detail message and cause.
+   * Creates a new {@code GemFireException} with the given detail message and cause.
    */
   public GemFireException(String message, Throwable cause) {
     super(message, cause);
-    // this.cause = cause;
   }
 
   /**
-   * Creates a new <code>GemFireException</code> with the given cause and no detail message
+   * Creates a new {@code GemFireException} with the given cause and no detail message
    */
   public GemFireException(Throwable cause) {
     super(cause);
-    // this.cause = cause;
   }
 
-  //////////////////// Instance Methods ////////////////////
-
-  /**
-   * Returns the cause of this <code>GemFireException</code> or <code>null</code> if the cause is
-   * nonexistent or unknown.
-   */
-  // public Throwable getCause() {
-  // return this.cause;
-  // }
-
   /**
-   * Returns the root cause of this <code>GemFireException</code> or <code>null</code> if the cause
+   * Returns the root cause of this {@code GemFireException} or {@code null} if the cause
    * is nonexistent or unknown.
    */
   public Throwable getRootCause() {
@@ -93,52 +74,4 @@ public abstract class GemFireException extends RuntimeException {
     return root;
   }
 
-  // public void printStackTrace() {
-  // super.printStackTrace();
-  // if (this.cause != null) {
-  // System.err.println("Caused by:");
-  // this.cause.printStackTrace();
-  // }
-  // }
-
-  // public void printStackTrace(java.io.PrintWriter pw) {
-  // super.printStackTrace(pw);
-  //
-  // if (this.cause != null) {
-  // pw.println("Caused by:");
-  // this.cause.printStackTrace(pw);
-  // }
-  // }
-  //
-  // public String getMessage() {
-  // if (this.cause != null) {
-  // String ourMsg = super.getMessage();
-  // if (ourMsg == null || ourMsg.length() == 0) {
-  // //ourMsg = super.toString(); //causes inifinite recursion
-  // ourMsg = "";
-  // }
-  // StringBuffer sb = new StringBuffer(ourMsg);
-  // sb.append(" Caused by: ");
-  // String causeMsg = this.cause.getMessage();
-  // if (causeMsg == null || causeMsg.length() == 0) {
-  // causeMsg = this.cause.toString();
-  // }
-  // sb.append(causeMsg);
-  // return sb.toString();
-  // } else {
-  // return super.getMessage();
-  // }
-  // }
-
-  /**
-   * Represent the receiver as well as the cause
-   */
-  // public String toString() {
-  // String result = super.toString();
-  // if (cause != null) {
-  // result = result + ", caused by " + cause.toString();
-  // }
-  // return result;
-  // }
-
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/cache/CacheException.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/cache/CacheException.java b/geode-core/src/main/java/org/apache/geode/cache/CacheException.java
index 79591d6..6309ad1 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/CacheException.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/CacheException.java
@@ -16,36 +16,34 @@ package org.apache.geode.cache;
 
 import org.apache.geode.GemFireException;
 
-
 /**
  * A generic exception, which indicates a cache error has occurred. All the other cache exceptions
  * are subclasses of this class. This class is abstract and therefore only subclasses are
  * instantiated.
  *
- *
  * @since GemFire 2.0
  */
 public abstract class CacheException extends GemFireException {
-  public static final long serialVersionUID = 7699432887938858940L;
+  private static final long serialVersionUID = 7699432887938858940L;
 
-  /** Constructs a new <code>CacheException</code>. */
+  /** Constructs a new {@code CacheException}. */
   public CacheException() {
     super();
   }
 
-  /** Constructs a new <code>CacheException</code> with a message string. */
+  /** Constructs a new {@code CacheException} with a message string. */
   public CacheException(String s) {
     super(s);
   }
 
   /**
-   * Constructs a <code>CacheException</code> with a message string and a base exception
+   * Constructs a {@code CacheException} with a message string and a base exception
    */
   public CacheException(String s, Throwable cause) {
     super(s, cause);
   }
 
-  /** Constructs a <code>CacheException</code> with a cause */
+  /** Constructs a {@code CacheException} with a cause */
   public CacheException(Throwable cause) {
     super(cause);
   }
@@ -57,7 +55,7 @@ public abstract class CacheException extends GemFireException {
     if (cause != null) {
       String causeStr = cause.toString();
       final String glue = ", caused by ";
-      StringBuffer sb = new StringBuffer(result.length() + causeStr.length() + glue.length());
+      StringBuilder sb = new StringBuilder(result.length() + causeStr.length() + glue.length());
       sb.append(result).append(glue).append(causeStr);
       result = sb.toString();
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/cache/CacheRuntimeException.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/cache/CacheRuntimeException.java b/geode-core/src/main/java/org/apache/geode/cache/CacheRuntimeException.java
index a723b32..89b596f 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/CacheRuntimeException.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/CacheRuntimeException.java
@@ -12,7 +12,6 @@
  * or implied. See the License for the specific language governing permissions and limitations under
  * the License.
  */
-
 package org.apache.geode.cache;
 
 import org.apache.geode.GemFireException;
@@ -22,20 +21,18 @@ import org.apache.geode.GemFireException;
  * cache exceptions are the subclass of this class. This class is abstract so only subclasses can be
  * instantiated
  *
- *
  * @since GemFire 3.0
  */
 public abstract class CacheRuntimeException extends GemFireException {
-  public static final long serialVersionUID = 6750107573015376688L;
+  private static final long serialVersionUID = 6750107573015376688L;
 
   /**
-   * Creates a new instance of <code>CacheRuntimeException</code> without detail message.
+   * Creates a new instance of {@code CacheRuntimeException} without detail message.
    */
   public CacheRuntimeException() {}
 
-
   /**
-   * Constructs an instance of <code>CacheRuntimeException</code> with the specified detail message.
+   * Constructs an instance of {@code CacheRuntimeException} with the specified detail message.
    * 
    * @param msg the detail message
    */
@@ -44,7 +41,7 @@ public abstract class CacheRuntimeException extends GemFireException {
   }
 
   /**
-   * Constructs an instance of <code>CacheRuntimeException</code> with the specified detail message
+   * Constructs an instance of {@code CacheRuntimeException} with the specified detail message
    * and cause.
    * 
    * @param msg the detail message
@@ -55,7 +52,7 @@ public abstract class CacheRuntimeException extends GemFireException {
   }
 
   /**
-   * Constructs an instance of <code>CacheRuntimeException</code> with the specified cause.
+   * Constructs an instance of {@code CacheRuntimeException} with the specified cause.
    * 
    * @param cause the causal Throwable
    */
@@ -70,7 +67,7 @@ public abstract class CacheRuntimeException extends GemFireException {
     if (cause != null) {
       String causeStr = cause.toString();
       final String glue = ", caused by ";
-      StringBuffer sb = new StringBuffer(result.length() + causeStr.length() + glue.length());
+      StringBuilder sb = new StringBuilder(result.length() + causeStr.length() + glue.length());
       sb.append(result).append(glue).append(causeStr);
       result = sb.toString();
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java b/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java
index ac500e6..5b0d86b 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java
@@ -372,7 +372,7 @@ import org.apache.geode.internal.cache.snapshot.FlowController.FlowControlAbortM
 import org.apache.geode.internal.cache.snapshot.FlowController.FlowControlAckMessage;
 import org.apache.geode.internal.cache.snapshot.SnapshotPacket;
 import org.apache.geode.internal.cache.snapshot.SnapshotPacket.SnapshotRecord;
-import org.apache.geode.internal.cache.tier.sockets.CacheClientNotifier.ServerInterestRegistrationMessage;
+import org.apache.geode.internal.cache.tier.sockets.ServerInterestRegistrationMessage;
 import org.apache.geode.internal.cache.tier.sockets.ClientBlacklistProcessor.ClientBlacklistMessage;
 import org.apache.geode.internal.cache.tier.sockets.ClientDataSerializerMessage;
 import org.apache.geode.internal.cache.tier.sockets.ClientInstantiatorMessage;

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
index 8d4eaf7..02d04b3 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
@@ -183,7 +183,6 @@ import org.apache.geode.internal.cache.partitioned.ContainsKeyValueMessage;
 import org.apache.geode.internal.cache.partitioned.ContainsKeyValueMessage.ContainsKeyValueResponse;
 import org.apache.geode.internal.cache.partitioned.DestroyMessage;
 import org.apache.geode.internal.cache.partitioned.DestroyMessage.DestroyResponse;
-import org.apache.geode.internal.cache.partitioned.DestroyRegionOnDataStoreMessage;
 import org.apache.geode.internal.cache.partitioned.DumpAllPRConfigMessage;
 import org.apache.geode.internal.cache.partitioned.DumpB2NRegion;
 import org.apache.geode.internal.cache.partitioned.DumpB2NRegion.DumpB2NResponse;
@@ -4479,7 +4478,7 @@ public class PartitionedRegion extends LocalRegion
             values.addObjectPart(key, ge.value, ge.isObject, ge.versionTag);
           }
 
-          if (values.size() == BaseCommand.maximumChunkSize) {
+          if (values.size() == BaseCommand.MAXIMUM_CHUNK_SIZE) {
             BaseCommand.sendNewRegisterInterestResponseChunk(this, "keyList", values, false,
                 servConn);
             values.clear();

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java b/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java
index f75a912..c0d3342 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java
@@ -2057,6 +2057,21 @@ public class HARegionQueue implements RegionQueue {
    * a single peek thread.
    */
   private static class BlockingHARegionQueue extends HARegionQueue {
+
+    private static final String EVENT_ENQUEUE_WAIT_TIME_NAME =
+      DistributionConfig.GEMFIRE_PREFIX + "subscription.EVENT_ENQUEUE_WAIT_TIME";
+
+    private static final int DEFAULT_EVENT_ENQUEUE_WAIT_TIME = 100;
+
+    /**
+     * System property name for indicating how much frequently the "Queue full" message should be
+     * logged.
+     */
+    private static final String MAX_QUEUE_LOG_FREQUENCY =
+      DistributionConfig.GEMFIRE_PREFIX + "logFrequency.clientQueueReachedMaxLimit";
+
+    private static final long DEFAULT_LOG_FREQUENCY = 1000;
+
     /**
      * Guards the Put permits
      */
@@ -2079,14 +2094,26 @@ public class HARegionQueue implements RegionQueue {
      */
     private final Object permitMon = new Object();
 
-    // Lock on which the take & remove threads block awaiting data from put
-    // operations
+    /**
+     * Lock on which the take & remove threads block awaiting data from put
+     * operations
+     */
     private final StoppableReentrantLock lock;
 
     /**
      * Condition object on which peek & take threads will block
      */
-    protected final StoppableCondition blockCond;
+    final StoppableCondition blockCond;
+
+    /**
+     * System property value denoting the time in milliseconds. Any thread putting an event into a
+     * subscription queue, which is full, will wait this much time for the queue to make space. It'll
+     * then enqueue the event possibly causing the queue to grow beyond its capacity/max-size. See
+     * #51400.
+     */
+    private final int enqueueEventWaitTime;
+
+    private final long logFrequency;
 
     /**
      * @param hrqa HARegionQueueAttributes through which expiry time etc for the HARegionQueue can
@@ -2097,16 +2124,43 @@ public class HARegionQueue implements RegionQueue {
         HARegionQueueAttributes hrqa, Map haContainer, ClientProxyMembershipID clientProxyId,
         final byte clientConflation, boolean isPrimary)
         throws IOException, ClassNotFoundException, CacheException, InterruptedException {
+
       super(regionName, cache, hrqa, haContainer, clientProxyId, clientConflation, isPrimary);
       this.capacity = hrqa.getBlockingQueueCapacity();
       this.putPermits = this.capacity;
       this.lock = new StoppableReentrantLock(this.region.getCancelCriterion());
-      this.blockCond = lock.newCondition();
+      this.blockCond = this.lock.newCondition();
 
       super.putGIIDataInRegion();
-      if (this.getClass() == BlockingHARegionQueue.class) {
-        initialized.set(true);
+
+      if (getClass() == BlockingHARegionQueue.class) {
+        this.initialized.set(true);
       }
+
+      this.enqueueEventWaitTime = calcEnqueueEventWaitTime();
+      this.logFrequency = calcLogFrequency();
+    }
+
+    private static int calcEnqueueEventWaitTime() {
+      int value =
+        Integer.getInteger(EVENT_ENQUEUE_WAIT_TIME_NAME, DEFAULT_EVENT_ENQUEUE_WAIT_TIME);
+      if (value < 0) {
+        value = DEFAULT_EVENT_ENQUEUE_WAIT_TIME;
+      }
+      return value;
+    }
+
+    private static long calcLogFrequency() {
+      long value;
+      try {
+        value = Long.valueOf(System.getProperty(MAX_QUEUE_LOG_FREQUENCY));
+        if (value <= 0) {
+          value = DEFAULT_LOG_FREQUENCY;
+        }
+      } catch (NumberFormatException ignore) {
+        value = DEFAULT_LOG_FREQUENCY;
+      }
+      return value;
     }
 
     @Override
@@ -2134,56 +2188,55 @@ public class HARegionQueue implements RegionQueue {
      * in the HARegionQueue.
      */
     @Override
-    @edu.umd.cs.findbugs.annotations.SuppressWarnings("TLW_TWO_LOCK_WAIT")
+    @SuppressWarnings("TLW_TWO_LOCK_WAIT")
     void checkQueueSizeConstraint() throws InterruptedException {
-      if (this.haContainer instanceof HAContainerMap && isPrimary()) { // Fix for bug 39413
-        if (Thread.interrupted())
-          throw new InterruptedException();
-        synchronized (this.putGuard) {
-          if (putPermits <= 0) {
-            synchronized (this.permitMon) {
-              if (reconcilePutPermits() <= 0) {
-                if (region.getSystem().getConfig().getRemoveUnresponsiveClient()) {
-                  isClientSlowReciever = true;
-                } else {
-                  try {
-                    long logFrequency = CacheClientNotifier.DEFAULT_LOG_FREQUENCY;
-                    CacheClientNotifier ccn = CacheClientNotifier.getInstance();
-                    if (ccn != null) { // check needed for junit tests
-                      logFrequency = ccn.getLogFrequency();
-                    }
-                    if ((this.maxQueueSizeHitCount % logFrequency) == 0) {
-                      logger.warn(LocalizedMessage.create(
-                          LocalizedStrings.HARegionQueue_CLIENT_QUEUE_FOR_0_IS_FULL,
-                          new Object[] {region.getName()}));
-                      this.maxQueueSizeHitCount = 0;
-                    }
-                    ++this.maxQueueSizeHitCount;
-                    this.region.checkReadiness(); // fix for bug 37581
-                    // TODO: wait called while holding two locks
-                    this.permitMon.wait(CacheClientNotifier.eventEnqueueWaitTime);
-                    this.region.checkReadiness(); // fix for bug 37581
-                    // Fix for #51400. Allow the queue to grow beyond its
-                    // capacity/maxQueueSize, if it is taking a long time to
-                    // drain the queue, either due to a slower client or the
-                    // deadlock scenario mentioned in the ticket.
-                    reconcilePutPermits();
-                    if ((this.maxQueueSizeHitCount % logFrequency) == 1) {
-                      logger.info(LocalizedMessage
-                          .create(LocalizedStrings.HARegionQueue_RESUMING_WITH_PROCESSING_PUTS));
-                    }
-                  } catch (InterruptedException ex) {
-                    // TODO: The line below is meaningless. Comment it out later
-                    this.permitMon.notifyAll();
-                    throw ex;
+      if (!(this.haContainer instanceof HAContainerMap && isPrimary())) {
+        // Fix for bug 39413
+        return;
+      }
+      if (Thread.interrupted()) {
+        throw new InterruptedException();
+      }
+
+      synchronized (this.putGuard) {
+        if (this.putPermits <= 0) {
+          synchronized (this.permitMon) {
+            if (reconcilePutPermits() <= 0) {
+              if (this.region.getSystem().getConfig().getRemoveUnresponsiveClient()) {
+                this.isClientSlowReciever = true;
+              } else {
+                try {
+                  if ((this.maxQueueSizeHitCount % this.logFrequency) == 0) {
+                    logger.warn(LocalizedMessage.create(
+                        LocalizedStrings.HARegionQueue_CLIENT_QUEUE_FOR_0_IS_FULL,
+                        new Object[] { this.region.getName() }));
+                    this.maxQueueSizeHitCount = 0;
+                  }
+                  ++this.maxQueueSizeHitCount;
+                  this.region.checkReadiness(); // fix for bug 37581
+                  // TODO: wait called while holding two locks
+                  this.permitMon.wait(this.enqueueEventWaitTime);
+                  this.region.checkReadiness(); // fix for bug 37581
+                  // Fix for #51400. Allow the queue to grow beyond its
+                  // capacity/maxQueueSize, if it is taking a long time to
+                  // drain the queue, either due to a slower client or the
+                  // deadlock scenario mentioned in the ticket.
+                  reconcilePutPermits();
+                  if (this.maxQueueSizeHitCount % this.logFrequency == 1) {
+                    logger.info(LocalizedMessage
+                        .create(LocalizedStrings.HARegionQueue_RESUMING_WITH_PROCESSING_PUTS));
                   }
+                } catch (InterruptedException ex) {
+                  // TODO: The line below is meaningless. Comment it out later
+                  this.permitMon.notifyAll();
+                  throw ex;
                 }
               }
-            } // synchronized (this.permitMon)
-          } // if (putPermits <= 0)
-          --putPermits;
-        } // synchronized (this.putGuard)
-      }
+            }
+          } // synchronized (this.permitMon)
+        } // if (putPermits <= 0)
+        --this.putPermits;
+      } // synchronized (this.putGuard)
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/Command.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/Command.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/Command.java
index 0c1c42a..d7f7c7b 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/Command.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/Command.java
@@ -12,22 +12,17 @@
  * or implied. See the License for the specific language governing permissions and limitations under
  * the License.
  */
-/**
- * 
- */
 package org.apache.geode.internal.cache.tier;
 
-import org.apache.geode.internal.cache.tier.sockets.*;
+import org.apache.geode.internal.cache.tier.sockets.Message;
+import org.apache.geode.internal.cache.tier.sockets.ServerConnection;
 
-/**
- * 
- */
 public interface Command {
-  public void execute(Message msg, ServerConnection servConn);
+  void execute(Message message, ServerConnection serverConnection);
 
-  public final int RESPONDED = 1;
+  int RESPONDED = 1;
 
-  public final int REQUIRES_RESPONSE = 2;
+  int REQUIRES_RESPONSE = 2;
 
-  public final int REQUIRES_CHUNKED_RESPONSE = 3;
+  int REQUIRES_CHUNKED_RESPONSE = 3;
 }


[11/43] geode git commit: GEODE-2957: Lucene create index DEFAULT keyword added for standardAnalyzer

Posted by kl...@apache.org.
GEODE-2957: Lucene create index DEFAULT keyword added for standardAnalyzer

	This closes #537


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/29ea88a2
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/29ea88a2
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/29ea88a2

Branch: refs/heads/feature/GEODE-2632-17
Commit: 29ea88a23ef0feb29e8d7684c4061ac54dc66874
Parents: 5ab4a69
Author: David Anuta <da...@gmail.com>
Authored: Thu May 25 13:45:23 2017 -0700
Committer: nabarun <nn...@pivotal.io>
Committed: Thu May 25 16:54:25 2017 -0700

----------------------------------------------------------------------
 .../functions/LuceneCreateIndexFunction.java    |  2 +-
 .../cli/LuceneIndexCommandsDUnitTest.java       | 68 +++++++++++++++++---
 2 files changed, 59 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/29ea88a2/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
index d49f7f9..5e36efa 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
@@ -105,7 +105,7 @@ public class LuceneCreateIndexFunction extends FunctionAdapter implements Intern
       className = StandardAnalyzer.class.getCanonicalName();
     else {
       String trimmedClassName = StringUtils.trim(className);
-      if (trimmedClassName.equals("") || trimmedClassName.equals("null"))
+      if (trimmedClassName.equals("") || trimmedClassName.equals("DEFAULT"))
         className = StandardAnalyzer.class.getCanonicalName();
       else
         className = trimmedClassName;

http://git-wip-us.apache.org/repos/asf/geode/blob/29ea88a2/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
index 5cbe31c..009c74c 100755
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
@@ -298,33 +298,81 @@ public class LuceneIndexCommandsDUnitTest extends CliCommandTestBase {
   }
 
   @Test
-  public void createIndexWithNullAnalyzerShouldUseStandardAnalyzer() throws Exception {
-    final VM vm1 = Host.getHost(0).getVM(1);
+  public void createIndexWithWhitespaceOrDefaultKeywordAnalyzerShouldUseStandardAnalyzer()
+      throws Exception {
+    final VM vm1 = Host.getHost(0).getVM(-1);
     vm1.invoke(() -> {
       getCache();
     });
 
-    String analyzerList = StandardAnalyzer.class.getCanonicalName() + ",null,"
+    // Test whitespace analyzer name
+    String analyzerList = StandardAnalyzer.class.getCanonicalName() + ",     ,"
         + KeywordAnalyzer.class.getCanonicalName();
     CommandStringBuilder csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
-    csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, INDEX_NAME);
+    csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, "space");
     csb.addOption(LuceneCliStrings.LUCENE__REGION_PATH, REGION_NAME);
     csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
-    csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__ANALYZER, analyzerList);
+    csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__ANALYZER, "'" + analyzerList + "'");
 
     String resultAsString = executeCommandAndLogResult(csb);
 
+    // Test empty analyzer name
+    analyzerList =
+        StandardAnalyzer.class.getCanonicalName() + ",," + KeywordAnalyzer.class.getCanonicalName();
+    csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
+    csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, "empty");
+    csb.addOption(LuceneCliStrings.LUCENE__REGION_PATH, REGION_NAME);
+    csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
+    csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__ANALYZER, analyzerList);
+
+    resultAsString = executeCommandAndLogResult(csb);
+
+    // Test keyword analyzer name
+    analyzerList = StandardAnalyzer.class.getCanonicalName() + ",DEFAULT,"
+        + KeywordAnalyzer.class.getCanonicalName();
+    csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
+    csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, "keyword");
+    csb.addOption(LuceneCliStrings.LUCENE__REGION_PATH, REGION_NAME);
+    csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
+    csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__ANALYZER, analyzerList);
+
+    resultAsString = executeCommandAndLogResult(csb);
+
     vm1.invoke(() -> {
       LuceneService luceneService = LuceneServiceProvider.get(getCache());
       createRegion();
-      final LuceneIndex index = luceneService.getIndex(INDEX_NAME, REGION_NAME);
-      final Map<String, Analyzer> fieldAnalyzers = index.getFieldAnalyzers();
+      final LuceneIndex spaceIndex = luceneService.getIndex("space", REGION_NAME);
+      final Map<String, Analyzer> spaceFieldAnalyzers = spaceIndex.getFieldAnalyzers();
+
+      final LuceneIndex emptyIndex = luceneService.getIndex("empty", REGION_NAME);
+      final Map<String, Analyzer> emptyFieldAnalyzers2 = emptyIndex.getFieldAnalyzers();
+
+      final LuceneIndex keywordIndex = luceneService.getIndex("keyword", REGION_NAME);
+      final Map<String, Analyzer> keywordFieldAnalyzers = keywordIndex.getFieldAnalyzers();
+
+      // Test whitespace analyzers
+      assertEquals(StandardAnalyzer.class.getCanonicalName(),
+          spaceFieldAnalyzers.get("field1").getClass().getCanonicalName());
+      assertEquals(StandardAnalyzer.class.getCanonicalName(),
+          spaceFieldAnalyzers.get("field2").getClass().getCanonicalName());
+      assertEquals(KeywordAnalyzer.class.getCanonicalName(),
+          spaceFieldAnalyzers.get("field3").getClass().getCanonicalName());
+
+      // Test empty analyzers
+      assertEquals(StandardAnalyzer.class.getCanonicalName(),
+          emptyFieldAnalyzers2.get("field1").getClass().getCanonicalName());
+      assertEquals(StandardAnalyzer.class.getCanonicalName(),
+          emptyFieldAnalyzers2.get("field2").getClass().getCanonicalName());
+      assertEquals(KeywordAnalyzer.class.getCanonicalName(),
+          emptyFieldAnalyzers2.get("field3").getClass().getCanonicalName());
+
+      // Test keyword analyzers
       assertEquals(StandardAnalyzer.class.getCanonicalName(),
-          fieldAnalyzers.get("field1").getClass().getCanonicalName());
+          keywordFieldAnalyzers.get("field1").getClass().getCanonicalName());
       assertEquals(StandardAnalyzer.class.getCanonicalName(),
-          fieldAnalyzers.get("field2").getClass().getCanonicalName());
+          keywordFieldAnalyzers.get("field2").getClass().getCanonicalName());
       assertEquals(KeywordAnalyzer.class.getCanonicalName(),
-          fieldAnalyzers.get("field3").getClass().getCanonicalName());
+          keywordFieldAnalyzers.get("field3").getClass().getCanonicalName());
     });
   }
 


[20/43] geode git commit: Cleanup BaseCommand

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterest.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterest.java
index 52a929f..afb0f2c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterest.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterest.java
@@ -46,51 +46,51 @@ public class RegisterInterest extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
     Part regionNamePart = null, keyPart = null;
     String regionName = null;
     Object key = null;
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
 
     // bserverStats.incLong(readDestroyRequestTimeId,
     // DistributionStats.getStatTime() - start);
     // bserverStats.incInt(destroyRequestsId, 1);
     // start = DistributionStats.getStatTime();
     // Retrieve the data from the message parts
-    regionNamePart = msg.getPart(0);
+    regionNamePart = clientMessage.getPart(0);
     regionName = regionNamePart.getString();
     InterestResultPolicy policy = null;
     // Retrieve the interest type
-    int interestType = msg.getPart(1).getInt();
+    int interestType = clientMessage.getPart(1).getInt();
 
     // Retrieve the InterestResultPolicy
     try {
-      policy = (InterestResultPolicy) msg.getPart(2).getObject();
+      policy = (InterestResultPolicy) clientMessage.getPart(2).getObject();
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     boolean isDurable = false;
     try {
-      Part durablePart = msg.getPart(3);
+      Part durablePart = clientMessage.getPart(3);
       byte[] durablePartBytes = (byte[]) durablePart.getObject();
       isDurable = durablePartBytes[0] == 0x01;
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     // Retrieve the key
-    keyPart = msg.getPart(4);
+    keyPart = clientMessage.getPart(4);
     regionName = regionNamePart.getString();
     try {
       key = keyPart.getStringOrObject();
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -98,21 +98,21 @@ public class RegisterInterest extends BaseCommand {
 
     // VJR: Check for a sixth part for client version 6.0.3 onwards for the
     // time being until refactoring into a new command version.
-    if (msg.getNumberOfParts() > 5) {
+    if (clientMessage.getNumberOfParts() > 5) {
       try {
-        Part notifyPart = msg.getPart(5);
+        Part notifyPart = clientMessage.getPart(5);
         byte[] notifyPartBytes = (byte[]) notifyPart.getObject();
         sendUpdatesAsInvalidates = notifyPartBytes[0] == 0x01;
       } catch (Exception e) {
-        writeChunkedException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeChunkedException(clientMessage, e, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
     }
 
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received register interest request ({} bytes) from {} for region {} key {}",
-          servConn.getName(), msg.getPayloadLength(), servConn.getSocketString(), regionName, key);
+          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName, key);
     }
 
     // Process the register interest request
@@ -126,19 +126,19 @@ public class RegisterInterest extends BaseCommand {
         message =
             LocalizedStrings.RegisterInterest_THE_INPUT_REGION_NAME_FOR_THE_REGISTER_INTEREST_REQUEST_IS_NULL;
       }
-      logger.warn("{}: {}", servConn.getName(), message.toLocalizedString());
-      writeChunkedErrorResponse(msg, MessageType.REGISTER_INTEREST_DATA_ERROR,
-          message.toLocalizedString(), servConn);
-      servConn.setAsTrue(RESPONDED);
+      logger.warn("{}: {}", serverConnection.getName(), message.toLocalizedString());
+      writeChunkedErrorResponse(clientMessage, MessageType.REGISTER_INTEREST_DATA_ERROR,
+          message.toLocalizedString(), serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     // input key not null
-    LocalRegion region = (LocalRegion) servConn.getCache().getRegion(regionName);
+    LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
     if (region == null) {
       logger.info(LocalizedMessage.create(
           LocalizedStrings.RegisterInterest_0_REGION_NAMED_1_WAS_NOT_FOUND_DURING_REGISTER_INTEREST_REQUEST,
-          new Object[] {servConn.getName(), regionName}));
+          new Object[] { serverConnection.getName(), regionName}));
       // writeChunkedErrorResponse(msg,
       // MessageType.REGISTER_INTEREST_DATA_ERROR, message);
       // responded = true;
@@ -151,7 +151,7 @@ public class RegisterInterest extends BaseCommand {
         this.securityService.authorizeRegionRead(regionName, key.toString());
       }
 
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         if (!DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
           RegisterInterestOperationContext registerContext =
@@ -159,14 +159,14 @@ public class RegisterInterest extends BaseCommand {
           key = registerContext.getKey();
         }
       }
-      servConn.getAcceptor().getCacheClientNotifier().registerClientInterest(regionName, key,
-          servConn.getProxyID(), interestType, isDurable, sendUpdatesAsInvalidates, false, 0, true);
+      serverConnection.getAcceptor().getCacheClientNotifier().registerClientInterest(regionName, key,
+          serverConnection.getProxyID(), interestType, isDurable, sendUpdatesAsInvalidates, false, 0, true);
     } catch (Exception e) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, e);
+      checkForInterrupt(serverConnection, e);
       // Otherwise, write an exception message and continue
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -178,45 +178,45 @@ public class RegisterInterest extends BaseCommand {
     // start = DistributionStats.getStatTime();
 
     CacheClientProxy ccp =
-        servConn.getAcceptor().getCacheClientNotifier().getClientProxy(servConn.getProxyID());
+        serverConnection.getAcceptor().getCacheClientNotifier().getClientProxy(serverConnection.getProxyID());
     if (ccp == null) {
       // fix for 37593
       IOException ioex = new IOException(
           LocalizedStrings.RegisterInterest_CACHECLIENTPROXY_FOR_THIS_CLIENT_IS_NO_LONGER_ON_THE_SERVER_SO_REGISTERINTEREST_OPERATION_IS_UNSUCCESSFUL
               .toLocalizedString());
-      writeChunkedException(msg, ioex, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, ioex, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     boolean isPrimary = ccp.isPrimary();
-    ChunkedMessage chunkedResponseMsg = servConn.getRegisterInterestResponseMessage();
+    ChunkedMessage chunkedResponseMsg = serverConnection.getRegisterInterestResponseMessage();
     if (!isPrimary) {
       chunkedResponseMsg.setMessageType(MessageType.RESPONSE_FROM_SECONDARY);
-      chunkedResponseMsg.setTransactionId(msg.getTransactionId());
+      chunkedResponseMsg.setTransactionId(clientMessage.getTransactionId());
       chunkedResponseMsg.sendHeader();
       chunkedResponseMsg.setLastChunk(true);
 
       if (logger.isDebugEnabled()) {
         logger.debug(
             "{}: Sending register interest response chunk from secondary for region: {} for key: {} chunk=<{}>",
-            servConn.getName(), regionName, key, chunkedResponseMsg);
+            serverConnection.getName(), regionName, key, chunkedResponseMsg);
       }
-      chunkedResponseMsg.sendChunk(servConn);
+      chunkedResponseMsg.sendChunk(serverConnection);
     } // !isPrimary
     else { // isPrimary
 
       // Send header which describes how many chunks will follow
       chunkedResponseMsg.setMessageType(MessageType.RESPONSE_FROM_PRIMARY);
-      chunkedResponseMsg.setTransactionId(msg.getTransactionId());
+      chunkedResponseMsg.setTransactionId(clientMessage.getTransactionId());
       chunkedResponseMsg.sendHeader();
 
       // Send chunk response
       try {
-        fillAndSendRegisterInterestResponseChunks(region, key, interestType, policy, servConn);
-        servConn.setAsTrue(RESPONDED);
+        fillAndSendRegisterInterestResponseChunks(region, key, interestType, policy, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
       } catch (Exception e) {
-        writeChunkedException(msg, e, false, servConn, chunkedResponseMsg);
-        servConn.setAsTrue(RESPONDED);
+        writeChunkedException(clientMessage, e, serverConnection, chunkedResponseMsg);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
 
@@ -224,7 +224,7 @@ public class RegisterInterest extends BaseCommand {
         // logger.debug(getName() + ": Sent chunk (1 of 1) of register interest
         // response (" + chunkedResponseMsg.getBufferLength() + " bytes) for
         // region " + regionName + " key " + key);
-        logger.debug("{}: Sent register interest response for region {} key {}", servConn.getName(),
+        logger.debug("{}: Sent register interest response for region {} key {}", serverConnection.getName(),
             regionName, key);
       }
       // bserverStats.incLong(writeDestroyResponseTimeId,

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterest61.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterest61.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterest61.java
index 5ddb241..af423ca 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterest61.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterest61.java
@@ -60,67 +60,67 @@ public class RegisterInterest61 extends BaseCommand {
   RegisterInterest61() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
     Part regionNamePart = null, keyPart = null;
     String regionName = null;
     Object key = null;
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
 
     // bserverStats.incLong(readDestroyRequestTimeId,
     // DistributionStats.getStatTime() - start);
     // bserverStats.incInt(destroyRequestsId, 1);
     // start = DistributionStats.getStatTime();
     // Retrieve the data from the message parts
-    regionNamePart = msg.getPart(0);
+    regionNamePart = clientMessage.getPart(0);
     regionName = regionNamePart.getString();
     InterestResultPolicy policy = null;
     // Retrieve the interest type
-    int interestType = msg.getPart(1).getInt();
+    int interestType = clientMessage.getPart(1).getInt();
 
     // Retrieve the InterestResultPolicy
     try {
-      policy = (InterestResultPolicy) msg.getPart(2).getObject();
+      policy = (InterestResultPolicy) clientMessage.getPart(2).getObject();
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     boolean isDurable = false;
     try {
-      Part durablePart = msg.getPart(3);
+      Part durablePart = clientMessage.getPart(3);
       byte[] durablePartBytes = (byte[]) durablePart.getObject();
       isDurable = durablePartBytes[0] == 0x01;
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     // region data policy
     byte[] regionDataPolicyPartBytes;
     boolean serializeValues = false;
     try {
-      Part regionDataPolicyPart = msg.getPart(msg.getNumberOfParts() - 1);
+      Part regionDataPolicyPart = clientMessage.getPart(clientMessage.getNumberOfParts() - 1);
       regionDataPolicyPartBytes = (byte[]) regionDataPolicyPart.getObject();
-      if (servConn.getClientVersion().compareTo(Version.GFE_80) >= 0) {
+      if (serverConnection.getClientVersion().compareTo(Version.GFE_80) >= 0) {
         // The second byte here is serializeValues
         serializeValues = regionDataPolicyPartBytes[1] == (byte) 0x01;
       }
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     // Retrieve the key
-    keyPart = msg.getPart(4);
+    keyPart = clientMessage.getPart(4);
     regionName = regionNamePart.getString();
     try {
       key = keyPart.getStringOrObject();
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -128,14 +128,14 @@ public class RegisterInterest61 extends BaseCommand {
 
     // VJR: Check for a sixth part for client version 6.0.3 onwards for the
     // time being until refactoring into a new command version.
-    if (msg.getNumberOfParts() > 5) {
+    if (clientMessage.getNumberOfParts() > 5) {
       try {
-        Part notifyPart = msg.getPart(5);
+        Part notifyPart = clientMessage.getPart(5);
         byte[] notifyPartBytes = (byte[]) notifyPart.getObject();
         sendUpdatesAsInvalidates = notifyPartBytes[0] == 0x01;
       } catch (Exception e) {
-        writeChunkedException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeChunkedException(clientMessage, e, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
     }
@@ -143,7 +143,7 @@ public class RegisterInterest61 extends BaseCommand {
     if (logger.isDebugEnabled()) {
       logger.debug(
           "{}: Received register interest 61 request ({} bytes) from {} for region {} key {}",
-          servConn.getName(), msg.getPayloadLength(), servConn.getSocketString(), regionName, key);
+          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName, key);
     }
 
     // test hook to trigger vMotion during register Interest
@@ -164,19 +164,19 @@ public class RegisterInterest61 extends BaseCommand {
         message =
             LocalizedStrings.RegisterInterest_THE_INPUT_REGION_NAME_FOR_THE_REGISTER_INTEREST_REQUEST_IS_NULL;
       }
-      logger.warn("{}: {}", servConn.getName(), message.toLocalizedString());
-      writeChunkedErrorResponse(msg, MessageType.REGISTER_INTEREST_DATA_ERROR,
-          message.toLocalizedString(), servConn);
-      servConn.setAsTrue(RESPONDED);
+      logger.warn("{}: {}", serverConnection.getName(), message.toLocalizedString());
+      writeChunkedErrorResponse(clientMessage, MessageType.REGISTER_INTEREST_DATA_ERROR,
+          message.toLocalizedString(), serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     // input key not null
-    LocalRegion region = (LocalRegion) servConn.getCache().getRegion(regionName);
+    LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
     if (region == null) {
       logger.info(LocalizedMessage.create(
           LocalizedStrings.RegisterInterest_0_REGION_NAMED_1_WAS_NOT_FOUND_DURING_REGISTER_INTEREST_REQUEST,
-          new Object[] {servConn.getName(), regionName}));
+          new Object[] { serverConnection.getName(), regionName}));
       // writeChunkedErrorResponse(msg,
       // MessageType.REGISTER_INTEREST_DATA_ERROR, message);
       // responded = true;
@@ -190,7 +190,7 @@ public class RegisterInterest61 extends BaseCommand {
         this.securityService.authorizeRegionRead(regionName, key.toString());
       }
 
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         if (!DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
           RegisterInterestOperationContext registerContext =
@@ -198,15 +198,15 @@ public class RegisterInterest61 extends BaseCommand {
           key = registerContext.getKey();
         }
       }
-      servConn.getAcceptor().getCacheClientNotifier().registerClientInterest(regionName, key,
-          servConn.getProxyID(), interestType, isDurable, sendUpdatesAsInvalidates, true,
+      serverConnection.getAcceptor().getCacheClientNotifier().registerClientInterest(regionName, key,
+          serverConnection.getProxyID(), interestType, isDurable, sendUpdatesAsInvalidates, true,
           regionDataPolicyPartBytes[0], true);
     } catch (Exception e) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, e);
+      checkForInterrupt(serverConnection, e);
       // Otherwise, write an exception message and continue
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -218,46 +218,46 @@ public class RegisterInterest61 extends BaseCommand {
     // start = DistributionStats.getStatTime();
 
     CacheClientProxy ccp =
-        servConn.getAcceptor().getCacheClientNotifier().getClientProxy(servConn.getProxyID());
+        serverConnection.getAcceptor().getCacheClientNotifier().getClientProxy(serverConnection.getProxyID());
     if (ccp == null) {
       // fix for 37593
       IOException ioex = new IOException(
           LocalizedStrings.RegisterInterest_CACHECLIENTPROXY_FOR_THIS_CLIENT_IS_NO_LONGER_ON_THE_SERVER_SO_REGISTERINTEREST_OPERATION_IS_UNSUCCESSFUL
               .toLocalizedString());
-      writeChunkedException(msg, ioex, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, ioex, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     boolean isPrimary = ccp.isPrimary();
-    ChunkedMessage chunkedResponseMsg = servConn.getRegisterInterestResponseMessage();
+    ChunkedMessage chunkedResponseMsg = serverConnection.getRegisterInterestResponseMessage();
     if (!isPrimary) {
       chunkedResponseMsg.setMessageType(MessageType.RESPONSE_FROM_SECONDARY);
-      chunkedResponseMsg.setTransactionId(msg.getTransactionId());
+      chunkedResponseMsg.setTransactionId(clientMessage.getTransactionId());
       chunkedResponseMsg.sendHeader();
       chunkedResponseMsg.setLastChunk(true);
 
       if (logger.isDebugEnabled()) {
         logger.debug(
             "{}: Sending register interest response chunk from secondary for region: {} for key: {} chunk=<{}>",
-            servConn.getName(), regionName, key, chunkedResponseMsg);
+            serverConnection.getName(), regionName, key, chunkedResponseMsg);
       }
-      chunkedResponseMsg.sendChunk(servConn);
+      chunkedResponseMsg.sendChunk(serverConnection);
     } // !isPrimary
     else { // isPrimary
 
       // Send header which describes how many chunks will follow
       chunkedResponseMsg.setMessageType(MessageType.RESPONSE_FROM_PRIMARY);
-      chunkedResponseMsg.setTransactionId(msg.getTransactionId());
+      chunkedResponseMsg.setTransactionId(clientMessage.getTransactionId());
       chunkedResponseMsg.sendHeader();
 
       // Send chunk response
       try {
         fillAndSendRegisterInterestResponseChunks(region, key, interestType, serializeValues,
-            policy, servConn);
-        servConn.setAsTrue(RESPONDED);
+            policy, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
       } catch (Exception e) {
-        writeChunkedException(msg, e, false, servConn, chunkedResponseMsg);
-        servConn.setAsTrue(RESPONDED);
+        writeChunkedException(clientMessage, e, serverConnection, chunkedResponseMsg);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
 
@@ -265,7 +265,7 @@ public class RegisterInterest61 extends BaseCommand {
         // logger.debug(getName() + ": Sent chunk (1 of 1) of register interest
         // response (" + chunkedResponseMsg.getBufferLength() + " bytes) for
         // region " + regionName + " key " + key);
-        logger.debug("{}: Sent register interest response for region {} key {}", servConn.getName(),
+        logger.debug("{}: Sent register interest response for region {} key {}", serverConnection.getName(),
             regionName, key);
       }
       // bserverStats.incLong(writeDestroyResponseTimeId,

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList.java
index cd16790..4206e19 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList.java
@@ -49,57 +49,57 @@ public class RegisterInterestList extends BaseCommand {
   RegisterInterestList() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
     Part regionNamePart = null, keyPart = null, numberOfKeysPart = null;
     String regionName = null;
     Object key = null;
     InterestResultPolicy policy;
     List keys = null;
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
     int numberOfKeys = 0, partNumber = 0;
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
-    ChunkedMessage chunkedResponseMsg = servConn.getRegisterInterestResponseMessage();
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
+    ChunkedMessage chunkedResponseMsg = serverConnection.getRegisterInterestResponseMessage();
 
     // bserverStats.incLong(readDestroyRequestTimeId,
     // DistributionStats.getStatTime() - start);
     // bserverStats.incInt(destroyRequestsId, 1);
     // start = DistributionStats.getStatTime();
     // Retrieve the data from the message parts
-    regionNamePart = msg.getPart(0);
+    regionNamePart = clientMessage.getPart(0);
     regionName = regionNamePart.getString();
 
     // Retrieve the InterestResultPolicy
     try {
-      policy = (InterestResultPolicy) msg.getPart(1).getObject();
+      policy = (InterestResultPolicy) clientMessage.getPart(1).getObject();
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     boolean isDurable = false;
     try {
-      Part durablePart = msg.getPart(2);
+      Part durablePart = clientMessage.getPart(2);
       byte[] durablePartBytes = (byte[]) durablePart.getObject();
       isDurable = durablePartBytes[0] == 0x01;
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
-    numberOfKeysPart = msg.getPart(3);
+    numberOfKeysPart = clientMessage.getPart(3);
     numberOfKeys = numberOfKeysPart.getInt();
 
     partNumber = 4;
     keys = new ArrayList();
     for (int i = 0; i < numberOfKeys; i++) {
-      keyPart = msg.getPart(partNumber + i);
+      keyPart = clientMessage.getPart(partNumber + i);
       try {
         key = keyPart.getStringOrObject();
       } catch (Exception e) {
-        writeChunkedException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeChunkedException(clientMessage, e, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
       keys.add(key);
@@ -109,14 +109,14 @@ public class RegisterInterestList extends BaseCommand {
 
     // VJR: Check for an extra part for client version 6.0.3 onwards for the
     // time being until refactoring into a new command version.
-    if (msg.getNumberOfParts() > (numberOfKeys + partNumber)) {
+    if (clientMessage.getNumberOfParts() > (numberOfKeys + partNumber)) {
       try {
-        Part notifyPart = msg.getPart(numberOfKeys + partNumber);
+        Part notifyPart = clientMessage.getPart(numberOfKeys + partNumber);
         byte[] notifyPartBytes = (byte[]) notifyPart.getObject();
         sendUpdatesAsInvalidates = notifyPartBytes[0] == 0x01;
       } catch (Exception e) {
-        writeChunkedException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeChunkedException(clientMessage, e, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
     }
@@ -124,7 +124,7 @@ public class RegisterInterestList extends BaseCommand {
     if (logger.isDebugEnabled()) {
       logger.debug(
           "{}: Received register interest request ({} bytes) from {} for the following {} keys in region {}: {}",
-          servConn.getName(), msg.getPayloadLength(), servConn.getSocketString(), numberOfKeys,
+          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), numberOfKeys,
           regionName, keys);
     }
 
@@ -154,25 +154,25 @@ public class RegisterInterestList extends BaseCommand {
             LocalizedStrings.RegisterInterest_THE_INPUT_REGION_NAME_FOR_THE_REGISTER_INTEREST_REQUEST_IS_NULL;
       }
       String s = errMessage.toLocalizedString();
-      logger.warn("{}: {}", servConn.getName(), s);
-      writeChunkedErrorResponse(msg, MessageType.REGISTER_INTEREST_DATA_ERROR, s, servConn);
-      servConn.setAsTrue(RESPONDED);
+      logger.warn("{}: {}", serverConnection.getName(), s);
+      writeChunkedErrorResponse(clientMessage, MessageType.REGISTER_INTEREST_DATA_ERROR, s, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     // key not null
-    LocalRegion region = (LocalRegion) servConn.getCache().getRegion(regionName);
+    LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
     if (region == null) {
       logger.info(LocalizedMessage.create(
           LocalizedStrings.RegisterInterestList_0_REGION_NAMED_1_WAS_NOT_FOUND_DURING_REGISTER_INTEREST_LIST_REQUEST,
-          new Object[] {servConn.getName(), regionName}));
+          new Object[] { serverConnection.getName(), regionName}));
       // writeChunkedErrorResponse(msg,
       // MessageType.REGISTER_INTEREST_DATA_ERROR, message);
       // responded = true;
     } // else { // region not null
     try {
       this.securityService.authorizeRegionRead(regionName);
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         if (!DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
           RegisterInterestOperationContext registerContext =
@@ -181,14 +181,14 @@ public class RegisterInterestList extends BaseCommand {
         }
       }
       // Register interest
-      servConn.getAcceptor().getCacheClientNotifier().registerClientInterest(regionName, keys,
-          servConn.getProxyID(), isDurable, sendUpdatesAsInvalidates, false, 0, true);
+      serverConnection.getAcceptor().getCacheClientNotifier().registerClientInterest(regionName, keys,
+          serverConnection.getProxyID(), isDurable, sendUpdatesAsInvalidates, false, 0, true);
     } catch (Exception ex) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, ex);
+      checkForInterrupt(serverConnection, ex);
       // Otherwise, write an exception message and continue
-      writeChunkedException(msg, ex, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, ex, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -197,36 +197,36 @@ public class RegisterInterestList extends BaseCommand {
     // DistributionStats.getStatTime() - start);
     // start = DistributionStats.getStatTime();
 
-    boolean isPrimary = servConn.getAcceptor().getCacheClientNotifier()
-        .getClientProxy(servConn.getProxyID()).isPrimary();
+    boolean isPrimary = serverConnection.getAcceptor().getCacheClientNotifier()
+                                        .getClientProxy(serverConnection.getProxyID()).isPrimary();
     if (!isPrimary) {
       chunkedResponseMsg.setMessageType(MessageType.RESPONSE_FROM_SECONDARY);
-      chunkedResponseMsg.setTransactionId(msg.getTransactionId());
+      chunkedResponseMsg.setTransactionId(clientMessage.getTransactionId());
       chunkedResponseMsg.sendHeader();
       chunkedResponseMsg.setLastChunk(true);
       if (logger.isDebugEnabled()) {
         logger.debug(
             "{}: Sending register interest response chunk from secondary for region: {} for key: {} chunk=<{}>",
-            servConn.getName(), regionName, key, chunkedResponseMsg);
+            serverConnection.getName(), regionName, key, chunkedResponseMsg);
       }
-      chunkedResponseMsg.sendChunk(servConn);
+      chunkedResponseMsg.sendChunk(serverConnection);
     } else { // isPrimary
       // Send header which describes how many chunks will follow
       chunkedResponseMsg.setMessageType(MessageType.RESPONSE_FROM_PRIMARY);
-      chunkedResponseMsg.setTransactionId(msg.getTransactionId());
+      chunkedResponseMsg.setTransactionId(clientMessage.getTransactionId());
       chunkedResponseMsg.sendHeader();
 
       // Send chunk response
       try {
-        fillAndSendRegisterInterestResponseChunks(region, keys, InterestType.KEY, policy, servConn);
-        servConn.setAsTrue(RESPONDED);
+        fillAndSendRegisterInterestResponseChunks(region, keys, InterestType.KEY, policy, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
       } catch (Exception e) {
         // If an interrupted exception is thrown , rethrow it
-        checkForInterrupt(servConn, e);
+        checkForInterrupt(serverConnection, e);
 
         // otherwise send the exception back to client
-        writeChunkedException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeChunkedException(clientMessage, e, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
 
@@ -236,7 +236,7 @@ public class RegisterInterestList extends BaseCommand {
         // region " + regionName + " key " + key);
         logger.debug(
             "{}: Sent register interest response for the following {} keys in region {}: {}",
-            servConn.getName(), numberOfKeys, regionName, keys);
+            serverConnection.getName(), numberOfKeys, regionName, keys);
       }
       // bserverStats.incLong(writeDestroyResponseTimeId,
       // DistributionStats.getStatTime() - start);

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList61.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList61.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList61.java
index 6e006ca..8eb6c4a 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList61.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList61.java
@@ -49,67 +49,67 @@ public class RegisterInterestList61 extends BaseCommand {
   RegisterInterestList61() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
     Part regionNamePart = null, keyPart = null, numberOfKeysPart = null;
     String regionName = null;
     Object key = null;
     InterestResultPolicy policy;
     List keys = null;
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
     int numberOfKeys = 0, partNumber = 0;
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
-    ChunkedMessage chunkedResponseMsg = servConn.getRegisterInterestResponseMessage();
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
+    ChunkedMessage chunkedResponseMsg = serverConnection.getRegisterInterestResponseMessage();
 
     // bserverStats.incLong(readDestroyRequestTimeId,
     // DistributionStats.getStatTime() - start);
     // bserverStats.incInt(destroyRequestsId, 1);
     // start = DistributionStats.getStatTime();
     // Retrieve the data from the message parts
-    regionNamePart = msg.getPart(0);
+    regionNamePart = clientMessage.getPart(0);
     regionName = regionNamePart.getString();
 
     // Retrieve the InterestResultPolicy
     try {
-      policy = (InterestResultPolicy) msg.getPart(1).getObject();
+      policy = (InterestResultPolicy) clientMessage.getPart(1).getObject();
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     boolean isDurable = false;
     try {
-      Part durablePart = msg.getPart(2);
+      Part durablePart = clientMessage.getPart(2);
       byte[] durablePartBytes = (byte[]) durablePart.getObject();
       isDurable = durablePartBytes[0] == 0x01;
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     // region data policy
     byte[] regionDataPolicyPartBytes;
     try {
-      Part regionDataPolicyPart = msg.getPart(msg.getNumberOfParts() - 1);
+      Part regionDataPolicyPart = clientMessage.getPart(clientMessage.getNumberOfParts() - 1);
       regionDataPolicyPartBytes = (byte[]) regionDataPolicyPart.getObject();
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
-    numberOfKeysPart = msg.getPart(3);
+    numberOfKeysPart = clientMessage.getPart(3);
     numberOfKeys = numberOfKeysPart.getInt();
 
     partNumber = 4;
     keys = new ArrayList();
     for (int i = 0; i < numberOfKeys; i++) {
-      keyPart = msg.getPart(partNumber + i);
+      keyPart = clientMessage.getPart(partNumber + i);
       try {
         key = keyPart.getStringOrObject();
       } catch (Exception e) {
-        writeChunkedException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeChunkedException(clientMessage, e, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
       keys.add(key);
@@ -119,14 +119,14 @@ public class RegisterInterestList61 extends BaseCommand {
 
     // VJR: Check for an extra part for client version 6.0.3 onwards for the
     // time being until refactoring into a new command version.
-    if (msg.getNumberOfParts() > (numberOfKeys + partNumber)) {
+    if (clientMessage.getNumberOfParts() > (numberOfKeys + partNumber)) {
       try {
-        Part notifyPart = msg.getPart(numberOfKeys + partNumber);
+        Part notifyPart = clientMessage.getPart(numberOfKeys + partNumber);
         byte[] notifyPartBytes = (byte[]) notifyPart.getObject();
         sendUpdatesAsInvalidates = notifyPartBytes[0] == 0x01;
       } catch (Exception e) {
-        writeChunkedException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeChunkedException(clientMessage, e, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
     }
@@ -134,7 +134,7 @@ public class RegisterInterestList61 extends BaseCommand {
     if (logger.isDebugEnabled()) {
       logger.debug(
           "{}: Received register interest 61 request ({} bytes) from {} for the following {} keys in region {}: {}",
-          servConn.getName(), msg.getPayloadLength(), servConn.getSocketString(), numberOfKeys,
+          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), numberOfKeys,
           regionName, keys);
     }
 
@@ -164,25 +164,25 @@ public class RegisterInterestList61 extends BaseCommand {
             LocalizedStrings.RegisterInterest_THE_INPUT_REGION_NAME_FOR_THE_REGISTER_INTEREST_REQUEST_IS_NULL;
       }
       String s = errMessage.toLocalizedString();
-      logger.warn("{}: {}", servConn.getName(), s);
-      writeChunkedErrorResponse(msg, MessageType.REGISTER_INTEREST_DATA_ERROR, s, servConn);
-      servConn.setAsTrue(RESPONDED);
+      logger.warn("{}: {}", serverConnection.getName(), s);
+      writeChunkedErrorResponse(clientMessage, MessageType.REGISTER_INTEREST_DATA_ERROR, s, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
 
-    LocalRegion region = (LocalRegion) servConn.getCache().getRegion(regionName);
+    LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
     if (region == null) {
       logger.info(LocalizedMessage.create(
           LocalizedStrings.RegisterInterestList_0_REGION_NAMED_1_WAS_NOT_FOUND_DURING_REGISTER_INTEREST_LIST_REQUEST,
-          new Object[] {servConn.getName(), regionName}));
+          new Object[] { serverConnection.getName(), regionName}));
       // writeChunkedErrorResponse(msg,
       // MessageType.REGISTER_INTEREST_DATA_ERROR, message);
       // responded = true;
     } // else { // region not null
     try {
       this.securityService.authorizeRegionRead(regionName);
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         if (!DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
           RegisterInterestOperationContext registerContext =
@@ -191,15 +191,15 @@ public class RegisterInterestList61 extends BaseCommand {
         }
       }
       // Register interest
-      servConn.getAcceptor().getCacheClientNotifier().registerClientInterest(regionName, keys,
-          servConn.getProxyID(), isDurable, sendUpdatesAsInvalidates, true,
+      serverConnection.getAcceptor().getCacheClientNotifier().registerClientInterest(regionName, keys,
+          serverConnection.getProxyID(), isDurable, sendUpdatesAsInvalidates, true,
           regionDataPolicyPartBytes[0], true);
     } catch (Exception ex) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, ex);
+      checkForInterrupt(serverConnection, ex);
       // Otherwise, write an exception message and continue
-      writeChunkedException(msg, ex, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, ex, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -208,36 +208,36 @@ public class RegisterInterestList61 extends BaseCommand {
     // DistributionStats.getStatTime() - start);
     // start = DistributionStats.getStatTime();
 
-    boolean isPrimary = servConn.getAcceptor().getCacheClientNotifier()
-        .getClientProxy(servConn.getProxyID()).isPrimary();
+    boolean isPrimary = serverConnection.getAcceptor().getCacheClientNotifier()
+                                        .getClientProxy(serverConnection.getProxyID()).isPrimary();
     if (!isPrimary) {
       chunkedResponseMsg.setMessageType(MessageType.RESPONSE_FROM_SECONDARY);
-      chunkedResponseMsg.setTransactionId(msg.getTransactionId());
+      chunkedResponseMsg.setTransactionId(clientMessage.getTransactionId());
       chunkedResponseMsg.sendHeader();
       chunkedResponseMsg.setLastChunk(true);
       if (logger.isDebugEnabled()) {
         logger.debug(
             "{}: Sending register interest response chunk from secondary for region: {} for key: {} chunk=<{}>",
-            servConn.getName(), regionName, key, chunkedResponseMsg);
+            serverConnection.getName(), regionName, key, chunkedResponseMsg);
       }
-      chunkedResponseMsg.sendChunk(servConn);
+      chunkedResponseMsg.sendChunk(serverConnection);
     } else { // isPrimary
       // Send header which describes how many chunks will follow
       chunkedResponseMsg.setMessageType(MessageType.RESPONSE_FROM_PRIMARY);
-      chunkedResponseMsg.setTransactionId(msg.getTransactionId());
+      chunkedResponseMsg.setTransactionId(clientMessage.getTransactionId());
       chunkedResponseMsg.sendHeader();
 
       // Send chunk response
       try {
-        fillAndSendRegisterInterestResponseChunks(region, keys, InterestType.KEY, policy, servConn);
-        servConn.setAsTrue(RESPONDED);
+        fillAndSendRegisterInterestResponseChunks(region, keys, InterestType.KEY, policy, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
       } catch (Exception e) {
         // If an interrupted exception is thrown , rethrow it
-        checkForInterrupt(servConn, e);
+        checkForInterrupt(serverConnection, e);
 
         // otherwise send the exception back to client
-        writeChunkedException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeChunkedException(clientMessage, e, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
 
@@ -247,7 +247,7 @@ public class RegisterInterestList61 extends BaseCommand {
         // region " + regionName + " key " + key);
         logger.debug(
             "{}: Sent register interest response for the following {} keys in region {}: {}",
-            servConn.getName(), numberOfKeys, regionName, keys);
+            serverConnection.getName(), numberOfKeys, regionName, keys);
       }
       // bserverStats.incLong(writeDestroyResponseTimeId,
       // DistributionStats.getStatTime() - start);

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList66.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList66.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList66.java
index 8a61364..14198cc 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList66.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList66.java
@@ -55,87 +55,87 @@ public class RegisterInterestList66 extends BaseCommand {
   RegisterInterestList66() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
     Part regionNamePart = null, keyPart = null;// numberOfKeysPart = null;
     String regionName = null;
     Object key = null;
     InterestResultPolicy policy;
     List keys = null;
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
     int numberOfKeys = 0, partNumber = 0;
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
-    ChunkedMessage chunkedResponseMsg = servConn.getRegisterInterestResponseMessage();
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
+    ChunkedMessage chunkedResponseMsg = serverConnection.getRegisterInterestResponseMessage();
 
     // bserverStats.incLong(readDestroyRequestTimeId,
     // DistributionStats.getStatTime() - start);
     // bserverStats.incInt(destroyRequestsId, 1);
     // start = DistributionStats.getStatTime();
     // Retrieve the data from the message parts
-    regionNamePart = msg.getPart(0);
+    regionNamePart = clientMessage.getPart(0);
     regionName = regionNamePart.getString();
 
     // Retrieve the InterestResultPolicy
     try {
-      policy = (InterestResultPolicy) msg.getPart(1).getObject();
+      policy = (InterestResultPolicy) clientMessage.getPart(1).getObject();
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     boolean isDurable = false;
     try {
-      Part durablePart = msg.getPart(2);
+      Part durablePart = clientMessage.getPart(2);
       byte[] durablePartBytes = (byte[]) durablePart.getObject();
       isDurable = durablePartBytes[0] == 0x01;
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     // region data policy
     byte[] regionDataPolicyPartBytes;
     boolean serializeValues = false;
     try {
-      Part regionDataPolicyPart = msg.getPart(msg.getNumberOfParts() - 1);
+      Part regionDataPolicyPart = clientMessage.getPart(clientMessage.getNumberOfParts() - 1);
       regionDataPolicyPartBytes = (byte[]) regionDataPolicyPart.getObject();
-      if (servConn.getClientVersion().compareTo(Version.GFE_80) >= 0) {
+      if (serverConnection.getClientVersion().compareTo(Version.GFE_80) >= 0) {
         // The second byte here is serializeValues
         serializeValues = regionDataPolicyPartBytes[1] == (byte) 0x01;
       }
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     partNumber = 3;
-    Part list = msg.getPart(partNumber);
+    Part list = clientMessage.getPart(partNumber);
     try {
       keys = (List) list.getObject();
       numberOfKeys = keys.size();
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     boolean sendUpdatesAsInvalidates = false;
     try {
-      Part notifyPart = msg.getPart(partNumber + 1);
+      Part notifyPart = clientMessage.getPart(partNumber + 1);
       byte[] notifyPartBytes = (byte[]) notifyPart.getObject();
       sendUpdatesAsInvalidates = notifyPartBytes[0] == 0x01;
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     if (logger.isDebugEnabled()) {
       logger.debug(
           "{}: Received register interest 66 request ({} bytes) from {} for the following {} keys in region {}: {}",
-          servConn.getName(), msg.getPayloadLength(), servConn.getSocketString(), numberOfKeys,
+          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), numberOfKeys,
           regionName, keys);
     }
 
@@ -165,24 +165,24 @@ public class RegisterInterestList66 extends BaseCommand {
             LocalizedStrings.RegisterInterest_THE_INPUT_REGION_NAME_FOR_THE_REGISTER_INTEREST_REQUEST_IS_NULL;
       }
       String s = errMessage.toLocalizedString();
-      logger.warn("{}: {}", servConn.getName(), s);
-      writeChunkedErrorResponse(msg, MessageType.REGISTER_INTEREST_DATA_ERROR, s, servConn);
-      servConn.setAsTrue(RESPONDED);
+      logger.warn("{}: {}", serverConnection.getName(), s);
+      writeChunkedErrorResponse(clientMessage, MessageType.REGISTER_INTEREST_DATA_ERROR, s, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
     }
 
     // key not null
-    LocalRegion region = (LocalRegion) servConn.getCache().getRegion(regionName);
+    LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
     if (region == null) {
       logger.info(LocalizedMessage.create(
           LocalizedStrings.RegisterInterestList_0_REGION_NAMED_1_WAS_NOT_FOUND_DURING_REGISTER_INTEREST_LIST_REQUEST,
-          new Object[] {servConn.getName(), regionName}));
+          new Object[] { serverConnection.getName(), regionName}));
       // writeChunkedErrorResponse(msg,
       // MessageType.REGISTER_INTEREST_DATA_ERROR, message);
       // responded = true;
     } // else { // region not null
     try {
       this.securityService.authorizeRegionRead(regionName);
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         if (!DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
           RegisterInterestOperationContext registerContext =
@@ -191,15 +191,15 @@ public class RegisterInterestList66 extends BaseCommand {
         }
       }
       // Register interest
-      servConn.getAcceptor().getCacheClientNotifier().registerClientInterest(regionName, keys,
-          servConn.getProxyID(), isDurable, sendUpdatesAsInvalidates, true,
+      serverConnection.getAcceptor().getCacheClientNotifier().registerClientInterest(regionName, keys,
+          serverConnection.getProxyID(), isDurable, sendUpdatesAsInvalidates, true,
           regionDataPolicyPartBytes[0], true);
     } catch (Exception ex) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, ex);
+      checkForInterrupt(serverConnection, ex);
       // Otherwise, write an exception message and continue
-      writeChunkedException(msg, ex, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, ex, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -208,37 +208,37 @@ public class RegisterInterestList66 extends BaseCommand {
     // DistributionStats.getStatTime() - start);
     // start = DistributionStats.getStatTime();
 
-    boolean isPrimary = servConn.getAcceptor().getCacheClientNotifier()
-        .getClientProxy(servConn.getProxyID()).isPrimary();
+    boolean isPrimary = serverConnection.getAcceptor().getCacheClientNotifier()
+                                        .getClientProxy(serverConnection.getProxyID()).isPrimary();
     if (!isPrimary) {
       chunkedResponseMsg.setMessageType(MessageType.RESPONSE_FROM_SECONDARY);
-      chunkedResponseMsg.setTransactionId(msg.getTransactionId());
+      chunkedResponseMsg.setTransactionId(clientMessage.getTransactionId());
       chunkedResponseMsg.sendHeader();
       chunkedResponseMsg.setLastChunk(true);
       if (logger.isDebugEnabled()) {
         logger.debug(
             "{}: Sending register interest response chunk from secondary for region: {} for key: {} chunk=<{}>",
-            servConn.getName(), regionName, key, chunkedResponseMsg);
+            serverConnection.getName(), regionName, key, chunkedResponseMsg);
       }
-      chunkedResponseMsg.sendChunk(servConn);
+      chunkedResponseMsg.sendChunk(serverConnection);
     } else { // isPrimary
       // Send header which describes how many chunks will follow
       chunkedResponseMsg.setMessageType(MessageType.RESPONSE_FROM_PRIMARY);
-      chunkedResponseMsg.setTransactionId(msg.getTransactionId());
+      chunkedResponseMsg.setTransactionId(clientMessage.getTransactionId());
       chunkedResponseMsg.sendHeader();
 
       // Send chunk response
       try {
         fillAndSendRegisterInterestResponseChunks(region, keys, InterestType.KEY, serializeValues,
-            policy, servConn);
-        servConn.setAsTrue(RESPONDED);
+            policy, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
       } catch (Exception e) {
         // If an interrupted exception is thrown , rethrow it
-        checkForInterrupt(servConn, e);
+        checkForInterrupt(serverConnection, e);
 
         // otherwise send the exception back to client
-        writeChunkedException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeChunkedException(clientMessage, e, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
 
@@ -248,7 +248,7 @@ public class RegisterInterestList66 extends BaseCommand {
         // region " + regionName + " key " + key);
         logger.debug(
             "{}: Sent register interest response for the following {} keys in region {}: {}",
-            servConn.getName(), numberOfKeys, regionName, keys);
+            serverConnection.getName(), numberOfKeys, regionName, keys);
       }
       // bserverStats.incLong(writeDestroyResponseTimeId,
       // DistributionStats.getStatTime() - start);

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RemoveAll.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RemoveAll.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RemoveAll.java
index 88386a1..52a1df3 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RemoveAll.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RemoveAll.java
@@ -61,7 +61,7 @@ public class RemoveAll extends BaseCommand {
   protected RemoveAll() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long startp)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long startp)
       throws IOException, InterruptedException {
     long start = startp; // copy this since we need to modify it
     Part regionNamePart = null, numberOfKeysPart = null, keyPart = null;
@@ -73,11 +73,11 @@ public class RemoveAll extends BaseCommand {
     VersionedObjectList response = null;
 
     StringBuffer errMessage = new StringBuffer();
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    CacheServerStats stats = servConn.getCacheServerStats();
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
 
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
     {
       long oldStart = start;
       start = DistributionStats.getStatTime();
@@ -87,7 +87,7 @@ public class RemoveAll extends BaseCommand {
     try {
       // Retrieve the data from the message parts
       // part 0: region name
-      regionNamePart = msg.getPart(0);
+      regionNamePart = clientMessage.getPart(0);
       regionName = regionNamePart.getString();
 
       if (regionName == null) {
@@ -95,67 +95,66 @@ public class RemoveAll extends BaseCommand {
             LocalizedStrings.RemoveAll_THE_INPUT_REGION_NAME_FOR_THE_REMOVEALL_REQUEST_IS_NULL
                 .toLocalizedString();
         logger.warn(LocalizedMessage.create(LocalizedStrings.TWO_ARG_COLON,
-            new Object[] {servConn.getName(), txt}));
+            new Object[] { serverConnection.getName(), txt}));
         errMessage.append(txt);
-        writeChunkedErrorResponse(msg, MessageType.PUT_DATA_ERROR, errMessage.toString(), servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
-      LocalRegion region = (LocalRegion) servConn.getCache().getRegion(regionName);
+      LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
       if (region == null) {
         String reason = " was not found during removeAll request";
-        writeRegionDestroyedEx(msg, regionName, reason, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
 
       // part 1: eventID
-      eventPart = msg.getPart(1);
+      eventPart = clientMessage.getPart(1);
       ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
       long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
       long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-      EventID eventId = new EventID(servConn.getEventMemberIDByteArray(), threadId, sequenceId);
+      EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
       Breadcrumbs.setEventId(eventId);
 
       // part 2: flags
-      int flags = msg.getPart(2).getInt();
+      int flags = clientMessage.getPart(2).getInt();
       boolean clientIsEmpty = (flags & PutAllOp.FLAG_EMPTY) != 0;
       boolean clientHasCCEnabled = (flags & PutAllOp.FLAG_CONCURRENCY_CHECKS) != 0;
 
       // part 3: callbackArg
-      Object callbackArg = msg.getPart(3).getObject();
+      Object callbackArg = clientMessage.getPart(3).getObject();
 
       // part 4: number of keys
-      numberOfKeysPart = msg.getPart(4);
+      numberOfKeysPart = clientMessage.getPart(4);
       numberOfKeys = numberOfKeysPart.getInt();
 
       if (logger.isDebugEnabled()) {
         StringBuilder buffer = new StringBuilder();
-        buffer.append(servConn.getName()).append(": Received removeAll request from ")
-            .append(servConn.getSocketString()).append(" for region ").append(regionName)
-            .append(callbackArg != null ? (" callbackArg " + callbackArg) : "").append(" with ")
-            .append(numberOfKeys).append(" keys.");
+        buffer.append(serverConnection.getName()).append(": Received removeAll request from ")
+              .append(serverConnection.getSocketString()).append(" for region ").append(regionName)
+              .append(callbackArg != null ? (" callbackArg " + callbackArg) : "").append(" with ")
+              .append(numberOfKeys).append(" keys.");
         logger.debug(buffer);
       }
       ArrayList<Object> keys = new ArrayList<Object>(numberOfKeys);
       ArrayList<VersionTag> retryVersions = new ArrayList<VersionTag>(numberOfKeys);
       for (int i = 0; i < numberOfKeys; i++) {
-        keyPart = msg.getPart(5 + i);
+        keyPart = clientMessage.getPart(5 + i);
         key = keyPart.getStringOrObject();
         if (key == null) {
           String txt =
               LocalizedStrings.RemoveAll_ONE_OF_THE_INPUT_KEYS_FOR_THE_REMOVEALL_REQUEST_IS_NULL
                   .toLocalizedString();
           logger.warn(LocalizedMessage.create(LocalizedStrings.TWO_ARG_COLON,
-              new Object[] {servConn.getName(), txt}));
+              new Object[] { serverConnection.getName(), txt}));
           errMessage.append(txt);
-          writeChunkedErrorResponse(msg, MessageType.PUT_DATA_ERROR, errMessage.toString(),
-              servConn);
-          servConn.setAsTrue(RESPONDED);
+          writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+          serverConnection.setAsTrue(RESPONDED);
           return;
         }
-        if (msg.isRetry()) {
+        if (clientMessage.isRetry()) {
           // Constuct the thread id/sequence id information for this element of the bulk op
 
           // The sequence id is constructed from the base sequence id and the offset
@@ -181,15 +180,15 @@ public class RemoveAll extends BaseCommand {
         keys.add(key);
       } // for
 
-      if (msg.getNumberOfParts() == (5 + numberOfKeys + 1)) {// it means optional timeout has been
+      if (clientMessage.getNumberOfParts() == (5 + numberOfKeys + 1)) {// it means optional timeout has been
                                                              // added
-        int timeout = msg.getPart(5 + numberOfKeys).getInt();
-        servConn.setRequestSpecificTimeout(timeout);
+        int timeout = clientMessage.getPart(5 + numberOfKeys).getInt();
+        serverConnection.setRequestSpecificTimeout(timeout);
       }
 
       this.securityService.authorizeRegionWrite(regionName);
 
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         if (DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
           authzRequest.createRegionAuthorize(regionName);
@@ -200,7 +199,7 @@ public class RemoveAll extends BaseCommand {
         }
       }
 
-      response = region.basicBridgeRemoveAll(keys, retryVersions, servConn.getProxyID(), eventId,
+      response = region.basicBridgeRemoveAll(keys, retryVersions, serverConnection.getProxyID(), eventId,
           callbackArg);
       if (!region.getConcurrencyChecksEnabled() || clientIsEmpty || !clientHasCCEnabled) {
         // the client only needs this if versioning is being used and the client
@@ -216,33 +215,33 @@ public class RemoveAll extends BaseCommand {
       if (region instanceof PartitionedRegion) {
         PartitionedRegion pr = (PartitionedRegion) region;
         if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
-          writeReplyWithRefreshMetadata(msg, response, servConn, pr, pr.getNetworkHopType());
+          writeReplyWithRefreshMetadata(clientMessage, response, serverConnection, pr, pr.getNetworkHopType());
           pr.clearNetworkHopData();
           replyWithMetaData = true;
         }
       }
     } catch (RegionDestroyedException rde) {
-      writeChunkedException(msg, rde, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, rde, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (ResourceException re) {
-      writeChunkedException(msg, re, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, re, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (PutAllPartialResultException pre) {
-      writeChunkedException(msg, pre, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, pre, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (Exception ce) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, ce);
+      checkForInterrupt(serverConnection, ce);
 
       // If an exception occurs during the op, preserve the connection
-      writeChunkedException(msg, ce, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, ce, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       // if (logger.fineEnabled()) {
       logger.warn(LocalizedMessage.create(LocalizedStrings.Generic_0_UNEXPECTED_EXCEPTION,
-          servConn.getName()), ce);
+          serverConnection.getName()), ce);
       // }
       return;
     } finally {
@@ -251,20 +250,20 @@ public class RemoveAll extends BaseCommand {
       stats.incProcessRemoveAllTime(start - oldStart);
     }
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sending removeAll response back to {} for region {}{}", servConn.getName(),
-          servConn.getSocketString(), regionName, (logger.isTraceEnabled() ? ": " + response : ""));
+      logger.debug("{}: Sending removeAll response back to {} for region {}{}", serverConnection.getName(),
+          serverConnection.getSocketString(), regionName, (logger.isTraceEnabled() ? ": " + response : ""));
     }
 
     // Increment statistics and write the reply
     if (!replyWithMetaData) {
-      writeReply(msg, response, servConn);
+      writeReply(clientMessage, response, serverConnection);
     }
-    servConn.setAsTrue(RESPONDED);
+    serverConnection.setAsTrue(RESPONDED);
     stats.incWriteRemoveAllResponseTime(DistributionStats.getStatTime() - start);
   }
 
   @Override
-  protected void writeReply(Message origMsg, ServerConnection servConn) throws IOException {
+  protected void writeReply(Message origMsg, ServerConnection serverConnection) throws IOException {
     throw new UnsupportedOperationException();
   }
 
@@ -285,7 +284,7 @@ public class RemoveAll extends BaseCommand {
     }
     replyMsg.sendHeader();
     if (listSize > 0) {
-      int chunkSize = 2 * maximumChunkSize;
+      int chunkSize = 2 * MAXIMUM_CHUNK_SIZE;
       // Chunker will stream over the list in its toData method
       VersionedObjectList.Chunker chunk =
           new VersionedObjectList.Chunker(response, chunkSize, false, false);
@@ -317,7 +316,7 @@ public class RemoveAll extends BaseCommand {
   }
 
   @Override
-  protected void writeReplyWithRefreshMetadata(Message origMsg, ServerConnection servConn,
+  protected void writeReplyWithRefreshMetadata(Message origMsg, ServerConnection serverConnection,
       PartitionedRegion pr, byte nwHop) throws IOException {
     throw new UnsupportedOperationException();
   }
@@ -345,7 +344,7 @@ public class RemoveAll extends BaseCommand {
       replyMsg.setLastChunk(false);
       replyMsg.sendChunk(servConn);
 
-      int chunkSize = 2 * maximumChunkSize; // maximumChunkSize
+      int chunkSize = 2 * MAXIMUM_CHUNK_SIZE; // MAXIMUM_CHUNK_SIZE
       // Chunker will stream over the list in its toData method
       VersionedObjectList.Chunker chunk =
           new VersionedObjectList.Chunker(response, chunkSize, false, false);
@@ -371,7 +370,7 @@ public class RemoveAll extends BaseCommand {
     }
     pr.getPrStats().incPRMetaDataSentCount();
     if (logger.isTraceEnabled()) {
-      logger.trace("{}: rpl with REFRESH_METADAT tx: {}", servConn.getName(),
+      logger.trace("{}: rpl with REFRESH_METADATA tx: {}", servConn.getName(),
           origMsg.getTransactionId());
     }
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RemoveUserAuth.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RemoveUserAuth.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RemoveUserAuth.java
index 42a5bec..16333ac 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RemoveUserAuth.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RemoveUserAuth.java
@@ -33,9 +33,9 @@ public class RemoveUserAuth extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException, InterruptedException {
-    boolean isSecureMode = msg.isSecureMode();
+    boolean isSecureMode = clientMessage.isSecureMode();
 
     if (!isSecureMode) {
       // need to throw exception
@@ -43,29 +43,29 @@ public class RemoveUserAuth extends BaseCommand {
     }
 
     try {
-      servConn.setAsTrue(REQUIRES_RESPONSE);
-      Part keepalivePart = msg.getPart(0);
+      serverConnection.setAsTrue(REQUIRES_RESPONSE);
+      Part keepalivePart = clientMessage.getPart(0);
       byte[] keepaliveByte = keepalivePart.getSerializedForm();
       boolean keepalive = (keepaliveByte == null || keepaliveByte[0] == 0) ? false : true;
-      servConn.getSecurityLogWriter().fine("remove user auth keep alive " + keepalive);
-      servConn.removeUserAuth(msg, keepalive);
-      writeReply(msg, servConn);
+      serverConnection.getSecurityLogWriter().fine("remove user auth keep alive " + keepalive);
+      serverConnection.removeUserAuth(clientMessage, keepalive);
+      writeReply(clientMessage, serverConnection);
     } catch (GemFireSecurityException gfse) {
-      if (servConn.getSecurityLogWriter().warningEnabled()) {
-        servConn.getSecurityLogWriter().warning(LocalizedStrings.ONE_ARG,
-            servConn.getName() + ": Security exception: " + gfse.getMessage());
+      if (serverConnection.getSecurityLogWriter().warningEnabled()) {
+        serverConnection.getSecurityLogWriter().warning(LocalizedStrings.ONE_ARG,
+          serverConnection.getName() + ": Security exception: " + gfse.getMessage());
       }
-      writeException(msg, gfse, false, servConn);
+      writeException(clientMessage, gfse, false, serverConnection);
     } catch (Exception ex) {
       // TODO Auto-generated catch block
-      if (servConn.getLogWriter().warningEnabled()) {
-        servConn.getLogWriter().warning(
+      if (serverConnection.getLogWriter().warningEnabled()) {
+        serverConnection.getLogWriter().warning(
             LocalizedStrings.CacheClientNotifier_AN_EXCEPTION_WAS_THROWN_FOR_CLIENT_0_1,
-            new Object[] {servConn.getProxyID(), ""}, ex);
+            new Object[] { serverConnection.getProxyID(), ""}, ex);
       }
-      writeException(msg, ex, false, servConn);
+      writeException(clientMessage, ex, false, serverConnection);
     } finally {
-      servConn.setAsTrue(RESPONDED);
+      serverConnection.setAsTrue(RESPONDED);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Request.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Request.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Request.java
index f7baba4..964b7a4 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Request.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Request.java
@@ -52,15 +52,15 @@ public class Request extends BaseCommand {
   Request() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
     Part regionNamePart = null, keyPart = null, valuePart = null;
     String regionName = null;
     Object callbackArg = null, key = null;
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    CacheServerStats stats = servConn.getCacheServerStats();
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
     StringId errMessage = null;
 
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
     // requiresResponse = true;
     {
       long oldStart = start;
@@ -68,18 +68,18 @@ public class Request extends BaseCommand {
       stats.incReadGetRequestTime(start - oldStart);
     }
     // Retrieve the data from the message parts
-    int parts = msg.getNumberOfParts();
-    regionNamePart = msg.getPart(0);
-    keyPart = msg.getPart(1);
+    int parts = clientMessage.getNumberOfParts();
+    regionNamePart = clientMessage.getPart(0);
+    keyPart = clientMessage.getPart(1);
     // valuePart = null; (redundant assignment)
     if (parts > 2) {
-      valuePart = msg.getPart(2);
+      valuePart = clientMessage.getPart(2);
       try {
         callbackArg = valuePart.getObject();
       } catch (Exception e) {
-        writeException(msg, e, false, servConn);
+        writeException(clientMessage, e, false, serverConnection);
         // responded = true;
-        servConn.setAsTrue(RESPONDED);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
     }
@@ -87,15 +87,15 @@ public class Request extends BaseCommand {
     try {
       key = keyPart.getStringOrObject();
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
+      writeException(clientMessage, e, false, serverConnection);
       // responded = true;
-      servConn.setAsTrue(RESPONDED);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received get request ({} bytes) from {} for region {} key {} txId {}",
-          servConn.getName(), msg.getPayloadLength(), servConn.getSocketString(), regionName, key,
-          msg.getTransactionId());
+          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName, key,
+          clientMessage.getTransactionId());
     }
 
     // Process the get request
@@ -109,31 +109,31 @@ public class Request extends BaseCommand {
         errMessage = LocalizedStrings.Request_THE_INPUT_REGION_NAME_FOR_THE_GET_REQUEST_IS_NULL;
       }
       String s = errMessage.toLocalizedString();
-      logger.warn("{}: {}", servConn.getName(), s);
-      writeErrorResponse(msg, MessageType.REQUESTDATAERROR, s, servConn);
+      logger.warn("{}: {}", serverConnection.getName(), s);
+      writeErrorResponse(clientMessage, MessageType.REQUESTDATAERROR, s, serverConnection);
       // responded = true;
-      servConn.setAsTrue(RESPONDED);
+      serverConnection.setAsTrue(RESPONDED);
     } else {
-      Region region = servConn.getCache().getRegion(regionName);
+      Region region = serverConnection.getCache().getRegion(regionName);
       if (region == null) {
         String reason = LocalizedStrings.Request__0_WAS_NOT_FOUND_DURING_GET_REQUEST
             .toLocalizedString(regionName);
-        writeRegionDestroyedEx(msg, regionName, reason, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
       } else {
 
         GetOperationContext getContext = null;
 
         try {
           this.securityService.authorizeRegionRead(regionName, key.toString());
-          AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+          AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
           if (authzRequest != null) {
             getContext = authzRequest.getAuthorize(regionName, key, callbackArg);
             callbackArg = getContext.getCallbackArg();
           }
         } catch (NotAuthorizedException ex) {
-          writeException(msg, ex, false, servConn);
-          servConn.setAsTrue(RESPONDED);
+          writeException(clientMessage, ex, false, serverConnection);
+          serverConnection.setAsTrue(RESPONDED);
           return;
         }
 
@@ -141,10 +141,10 @@ public class Request extends BaseCommand {
         // the value if it is a byte[].
         Object[] valueAndIsObject = new Object[3];
         try {
-          getValueAndIsObject(region, key, callbackArg, servConn, valueAndIsObject);
+          getValueAndIsObject(region, key, callbackArg, serverConnection, valueAndIsObject);
         } catch (Exception e) {
-          writeException(msg, e, false, servConn);
-          servConn.setAsTrue(RESPONDED);
+          writeException(clientMessage, e, false, serverConnection);
+          serverConnection.setAsTrue(RESPONDED);
           return;
         }
 
@@ -154,7 +154,7 @@ public class Request extends BaseCommand {
 
 
         try {
-          AuthorizeRequestPP postAuthzRequest = servConn.getPostAuthzRequest();
+          AuthorizeRequestPP postAuthzRequest = serverConnection.getPostAuthzRequest();
           if (postAuthzRequest != null) {
             getContext = postAuthzRequest.getAuthorize(regionName, key, data, isObject, getContext);
             byte[] serializedValue = getContext.getSerializedValue();
@@ -166,8 +166,8 @@ public class Request extends BaseCommand {
             isObject = getContext.isObject();
           }
         } catch (NotAuthorizedException ex) {
-          writeException(msg, ex, false, servConn);
-          servConn.setAsTrue(RESPONDED);
+          writeException(clientMessage, ex, false, serverConnection);
+          serverConnection.setAsTrue(RESPONDED);
           return;
         }
         {
@@ -179,20 +179,20 @@ public class Request extends BaseCommand {
         if (region instanceof PartitionedRegion) {
           PartitionedRegion pr = (PartitionedRegion) region;
           if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
-            writeResponseWithRefreshMetadata(data, callbackArg, msg, isObject, servConn, pr,
+            writeResponseWithRefreshMetadata(data, callbackArg, clientMessage, isObject, serverConnection, pr,
                 pr.getNetworkHopType());
             pr.clearNetworkHopData();
           } else {
-            writeResponse(data, callbackArg, msg, isObject, servConn);
+            writeResponse(data, callbackArg, clientMessage, isObject, serverConnection);
           }
         } else {
-          writeResponse(data, callbackArg, msg, isObject, servConn);
+          writeResponse(data, callbackArg, clientMessage, isObject, serverConnection);
         }
 
-        servConn.setAsTrue(RESPONDED);
+        serverConnection.setAsTrue(RESPONDED);
         if (logger.isDebugEnabled()) {
           logger.debug("{}: Wrote get response back to {} for region {} key {} value: {}",
-              servConn.getName(), servConn.getSocketString(), regionName, key, data);
+              serverConnection.getName(), serverConnection.getSocketString(), regionName, key, data);
         }
         stats.incWriteGetResponseTime(DistributionStats.getStatTime() - start);
       }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RequestEventValue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RequestEventValue.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RequestEventValue.java
index 3fd84d6..3753ed6 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RequestEventValue.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RequestEventValue.java
@@ -49,57 +49,57 @@ public class RequestEventValue extends BaseCommand {
 
   private RequestEventValue() {}
 
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
     Part eventIDPart = null, valuePart = null;
     EventID event = null;
     Object callbackArg = null;
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
     StringBuffer errMessage = new StringBuffer();
 
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
 
     // Retrieve the data from the message parts
-    int parts = msg.getNumberOfParts();
-    eventIDPart = msg.getPart(0);
+    int parts = clientMessage.getNumberOfParts();
+    eventIDPart = clientMessage.getPart(0);
 
     if (eventIDPart == null) {
       logger.warn(LocalizedMessage.create(
           LocalizedStrings.RequestEventValue_0_THE_EVENT_ID_FOR_THE_GET_EVENT_VALUE_REQUEST_IS_NULL,
-          servConn.getName()));
+          serverConnection.getName()));
       errMessage.append(" The event id for the get event value request is null.");
-      writeErrorResponse(msg, MessageType.REQUESTDATAERROR, errMessage.toString(), servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.REQUESTDATAERROR, errMessage.toString(), serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
     } else {
       try {
         event = (EventID) eventIDPart.getObject();
       } catch (Exception e) {
-        writeException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeException(clientMessage, e, false, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
       if (parts > 1) {
-        valuePart = msg.getPart(1);
+        valuePart = clientMessage.getPart(1);
         try {
           if (valuePart != null) {
             callbackArg = valuePart.getObject();
           }
         } catch (Exception e) {
-          writeException(msg, e, false, servConn);
-          servConn.setAsTrue(RESPONDED);
+          writeException(clientMessage, e, false, serverConnection);
+          serverConnection.setAsTrue(RESPONDED);
           return;
         }
       }
       if (logger.isTraceEnabled()) {
-        logger.trace("{}: Received get event value request ({} bytes) from {}", servConn.getName(),
-            msg.getPayloadLength(), servConn.getSocketString());
+        logger.trace("{}: Received get event value request ({} bytes) from {}", serverConnection.getName(),
+            clientMessage.getPayloadLength(), serverConnection.getSocketString());
       }
-      CacheClientNotifier ccn = servConn.getAcceptor().getCacheClientNotifier();
+      CacheClientNotifier ccn = serverConnection.getAcceptor().getCacheClientNotifier();
       // Get the ha container.
       HAContainerWrapper haContainer = (HAContainerWrapper) ccn.getHaContainer();
       if (haContainer == null) {
         String reason = " was not found during get event value request";
-        writeRegionDestroyedEx(msg, "ha container", reason, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeRegionDestroyedEx(clientMessage, "ha container", reason, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
       } else {
         Object[] valueAndIsObject = new Object[2];
         try {
@@ -110,8 +110,8 @@ public class RequestEventValue extends BaseCommand {
                 LocalizedStrings.RequestEventValue_UNABLE_TO_FIND_A_CLIENT_UPDATE_MESSAGE_FOR_0,
                 event));
             String msgStr = "No value found for " + event + " in " + haContainer.getName();
-            writeErrorResponse(msg, MessageType.REQUEST_EVENT_VALUE_ERROR, msgStr, servConn);
-            servConn.setAsTrue(RESPONDED);
+            writeErrorResponse(clientMessage, MessageType.REQUEST_EVENT_VALUE_ERROR, msgStr, serverConnection);
+            serverConnection.setAsTrue(RESPONDED);
             return;
           } else {
             if (logger.isDebugEnabled()) {
@@ -130,20 +130,20 @@ public class RequestEventValue extends BaseCommand {
             valueAndIsObject[1] = Boolean.valueOf(((ClientUpdateMessageImpl) data).valueIsObject());
           }
         } catch (Exception e) {
-          writeException(msg, e, false, servConn);
-          servConn.setAsTrue(RESPONDED);
+          writeException(clientMessage, e, false, serverConnection);
+          serverConnection.setAsTrue(RESPONDED);
           return;
         }
 
         Object data = valueAndIsObject[0];
         boolean isObject = (Boolean) valueAndIsObject[1];
 
-        writeResponse(data, callbackArg, msg, isObject, servConn);
-        servConn.setAsTrue(RESPONDED);
-        ccn.getClientProxy(servConn.getProxyID()).getStatistics().incDeltaFullMessagesSent();
+        writeResponse(data, callbackArg, clientMessage, isObject, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
+        ccn.getClientProxy(serverConnection.getProxyID()).getStatistics().incDeltaFullMessagesSent();
         if (logger.isDebugEnabled()) {
           logger.debug("{}: Wrote get event value response back to {} for ha container {}",
-              servConn.getName(), servConn.getSocketString(), haContainer.getName());
+              serverConnection.getName(), serverConnection.getSocketString(), haContainer.getName());
         }
       }
     }


[06/43] geode git commit: GEODE-2941 Update Pulse documentation

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/pulse/quickstart.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/quickstart.html.md.erb b/geode-docs/tools_modules/pulse/quickstart.html.md.erb
deleted file mode 100644
index 6bcf1dc..0000000
--- a/geode-docs/tools_modules/pulse/quickstart.html.md.erb
+++ /dev/null
@@ -1,827 +0,0 @@
----
-title: Pulse Quick Start (Embedded Mode)
----
-
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-<a id="topic_523F6DE33FE54307BBE8F83BB7D9355D"></a>
-
-
-Use Pulse in embedded mode to monitor a Geode deployment directly from a Geode JMX Manager. By default, the embedded Pulse application connects to the local JMX Manager that hosts the Pulse application. Optionally, configure Pulse to connect to a Geode system of your choice.
-
-To run Pulse in embedded mode:
-
-1.  Configure a Geode member to run as a JMX Manager node, specifying the HTTP port on which you will access the Pulse Web application (port 7070 by default). For example, the following command starts a Geode locator as a JMX Manager node, using the default HTTP port 7070 for the Pulse application:
-
-    ``` pre
-    gfsh
-    gfsh> start locator --name=loc1
-    ```
-
-    **Note:**
-    Geode locators become JMX Manager nodes by default. To start a non-locator member as a JMX Manager node, include the `--J=-Dgemfire.jmx-manager=true` option. To specify a non-default port number for the HTTP service that hosts the Pulse application, include the `--J=-Dgemfire.http-service-port=port_number` option when starting the JMX Manager node.
-
-    When the JMX Manager node boots, it starts an embedded Jetty instance and deploys the Pulse Web application at the specified or default HTTP port or 7070 by default.
-
-    `gfsh` automatically connects to the manager when you start it in this way. If you already started a manager process earlier, use the `connect` command in `gfsh` to connect to that process.
-
-2.  Access the embedded Pulse application from a Web browser. If you are connected to the Geode cluster using gfsh, use the `start pulse` command to load the correct URL in your browser:
-
-    ``` pre
-    gfsh> start pulse
-    ```
-
-    Or, enter the URL http://*address*:*http-service-port*/pulse directly in your Web browser, substituting the address and HTTP port of the manager. For example, you access Pulse on the local locator machine from Step 1 at the URL http://localhost:7070/pulse.
-
-3.  If you have configured authentication for the Pulse application, enter the username and password of a valid Pulse account in the login screen. Otherwise, enter the default "admin" in both fields. Click **Sign In** to continue.
-
-    See [Configuring Pulse Authentication](quickstart.html#topic_AC9FFAA6FB044279BAED7A3E099E07AC).
-
-4.  After you log in, Pulse displays the main cluster view for the local distributed system. See [Using Pulse Views](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404).
-
-**Note:**
-When running in embedded mode, the Pulse application connects only to the JMX Manager running in the locator or member that hosts Pulse. This enables you to monitor all members of that distributed system. You can also view (but not monitor) connected WAN clusters, and can view gateway senders and receivers that are configured in the local cluster.
-
-## <a id="topic_795C97B46B9843528961A094EE520782" class="no-quick-link"></a>Hosting Pulse on a Web Application Server
-
-Host Pulse on a dedicated Web application server to make the Pulse application available at a consistent address, or to use SSL for accessing the Pulse application. When you host Pulse in this way, you also configure Pulse to connect to a specific locator or JMX Manager node for monitoring.
-
-To host Pulse on a Web application server:
-
-1.  Set the `http-service-port` property to zero (`-Dgemfire.http-service-port=0`) when you start your Geode JMX Manager nodes. Setting this property to zero disables the embedded Web server for hosting the Pulse application.
-2.  Create a `pulse.properties` file somewhere in the classpath of your Web application server. For example, if you are hosting Pulse on Tomcat, create the `pulse.properties` file in the `$TOMCAT_SERVER/lib` directory.
-
-3.  Define the following configuration properties in the `pulse.properties` file:
-
-    <table>
-    <colgroup>
-    <col width="50%" />
-    <col width="50%" />
-    </colgroup>
-    <thead>
-    <tr class="header">
-    <th>Property</th>
-    <th>Description</th>
-    </tr>
-    </thead>
-    <tbody>
-    <tr class="odd">
-    <td><code class="ph codeph">pulse.useLocator</code></td>
-    <td>Specify &quot;true&quot; to configure Pulse to connect to a Geode Locator member, or &quot;false&quot; to connect directly to a JMX Manager.
-    <p>When Pulse connects to a Geode locator, the locator provides the address and port of an available JMX Manager to use for monitoring the distributed system. In most production deployments, you should connect Pulse to a locator instance; this allows Pulse to provide monitoring services using any available JMX Manager.</p>
-    <p>If you specify &quot;false,&quot; Pulse connects directly to a specific JMX Manager. If this manager is not available, the Pulse connection fails, even if another JMX Manager is available in the distributed system.</p></td>
-    </tr>
-    <tr class="even">
-    <td><code class="ph codeph">pulse.host</code></td>
-    <td>Specify the DNS name or IP address of the Geode locator or JMX Manager machine to which Pulse should connect. You specify either a locator or JMX Manager address depending on how you configured the <code class="ph codeph">pulse.useLocator</code> property.</td>
-    </tr>
-    <tr class="odd">
-    <td><code class="ph codeph">pulse.port</code></td>
-    <td>Specify the port number of the Geode locator or the HTTP port number of the JMX Manager to which Pulse should connect. You specify either a locator or JMX Manager port depending on how you configured the <code class="ph codeph">pulse.useLocator</code> property.
-    <p>If you configured <code class="ph codeph">pulse.useLocator=false</code>, then <code class="ph codeph">pulse.port</code> must correspond to the <code class="ph codeph">http-service-port</code> setting of the JMX Manager.</p></td>
-    </tr>
-    <tr class="even">
-    <td><code class="ph codeph">pulse.jmxUserName</code></td>
-    <td>If you configured authentication for the Geode JMX Manager node, specify a valid JMX user name that the Pulse application will use to authenticate to the JMX Manager.
-    <div class="note note">
-    **Note:**
-    <p>The JMX account that Pulse uses must have both read and write privileges.</p>
-    </div>
-    <p>See <a href="../../managing/management/jmx_manager_operations.html#topic_263072624B8D4CDBAD18B82E07AA44B6">Configuring a JMX Manager</a> for information about configuring authentication for JMX Manager nodes.</p></td>
-    </tr>
-    <tr class="odd">
-    <td><code class="ph codeph">pulse.jmxUserPassword</code></td>
-    <td>Specify the password of the JMX user account to use for authentication at startup.</td>
-    </tr>
-    </tbody>
-    </table>
-
-    For example, with this configuration Pulse connects to the locator at mylocator\[10334\] and accesses any available JMX Manager:
-
-    ``` pre
-    pulse.useLocator=true
-    pulse.host=locsrv.gemstone.com
-    pulse.port=10334
-    pulse.jmxUserName=pulseapp
-    pulse.jmxUserPassword=pulsepass
-    ```
-
-    With this configuration Pulse accesses only the JMX Manager instance at manager1\[8080\]:
-
-    ``` pre
-    pulse.useLocator=false
-    pulse.host=jmxsrv.gemstone.com
-    pulse.port=8080
-    pulse.jmxUserName=pulseapp
-    pulse.jmxUserPassword=pulsepass
-    ```
-
-4.  (Optional.) Configure authentication for the Pulse Web application using the instructions in [Configuring Pulse Authentication](quickstart.html#topic_AC9FFAA6FB044279BAED7A3E099E07AC).
-5.  Deploy the Pulse Web application to your application server. Geode installs the `pulse.war` file in the `tools/Pulse` subdirectory of your Geode installation directory. Depending on your application server, you may need to copy the `pulse.war` file to a deployment directory or use a configuration tool to deploy the file.
-6.  Access the Pulse application using the address, port, and application URL that you configure in your Web application server. For example, with Tomcat the default URL is http://*address*:8080/pulse. Your application server provides options for configuring the address, port, and application name; substitute the correct items to access the deployed Pulse application.
-
-    Pulse connects to the locator or JMX Manager that you configured in the `pulse.properties` file, authenticating using the credentials that you configured in the file.
-
-7.  If you have configured authentication for the Pulse application, enter the username and password of a valid Pulse account in the login screen. Otherwise, enter the default "admin" in both fields. Click **Sign In** to continue.
-
-    See [Configuring Pulse Authentication](quickstart.html#topic_AC9FFAA6FB044279BAED7A3E099E07AC).
-
-8.  After you log in, Pulse displays the main cluster view for the distributed system to which it has connected. See [Using Pulse Views](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404).
-
-## <a id="topic_AC9FFAA6FB044279BAED7A3E099E07AC" class="no-quick-link"></a>Configuring Pulse Authentication
-
-Pulse requires all users to authenticate themselves before they can use the Pulse Web application. If you have configured JMX authentication on the Geode JMX Manager node, the Pulse Web application itself may also need to authenticate itself to the Geode JMX Manager node on startup.
-
-## <a id="topic_AC9FFAA6FB044279BAED7A3E099E07AC__section_D31C25130C3D470083DAC76AE64DD1B6" class="no-quick-link"></a>Authenticating the Pulse Application to the JMX Manager
-
-If you run Pulse in embedded mode, the Pulse application runs on the JMX Manager node and no JMX authentication is required. You do not need to specify valid JMX credentials to start an embedded Pulse application.
-
-If you host Pulse on a Web Application server (non-embedded mode) and you configure JMX authentication on the Geode manager node, then the Pulse Web application must authenticate itself with the manager node when it starts. Specify the credentials of a valid JMX user account in the `pulse.properties` file, as described in [Hosting Pulse on a Web Application Server](quickstart.html#topic_795C97B46B9843528961A094EE520782).
-
-**Note:**
-The credentials that you specify must have both read and write privileges in the JMX Manager node. See [Configuring a JMX Manager](../../managing/management/jmx_manager_operations.html#topic_263072624B8D4CDBAD18B82E07AA44B6).
-
-## <a id="topic_AC9FFAA6FB044279BAED7A3E099E07AC__section_E3703ED899354839BE51278D3AE79062" class="no-quick-link"></a>Authenticating Pulse Users
-
-Pulse implements user authentication using the Spring security framework. The authentication configuration is specified in the `spring-security.xml` file, which is stored in the `WEB-INF` directory of Pulse WAR file. The `spring-security.xml` file contains bean definitions for role-based resource access, authentication profiles, and authentication handlers. The file also contains a default authentication manager bean definition.
-
-Pulse uses a profile-based authentication configuration. You can can choose to use either the default configuration profile or a custom configuration. The default profile uses the Spring security simple in-memory User Details Service to define a single user with the credentials:
-
-|            |            |
-|------------|------------|
-| User Name: | admin      |
-| Password:  | admin      |
-| Role:      | ROLE\_USER |
-
-Pulse uses this default authentication profile if you do not specify a profile when starting the application, or if you specify the default profile at startup using the system property:
-
-``` pre
--Dspring.profiles.active=pulse.authentication.default
-```
-
-You can also configure Pulse to use a custom authentication configuration by specifying activating the custom profile at startup with the system property:
-
-``` pre
--Dspring.profiles.active=pulse.authentication.custom
-```
-
-Using a custom configuration enables you to use either the simple in-memory User Details Service or an external properties file to authenticate users to the application. Even if you choose to use the default Spring security simple in-memory User Details Service, using a custom authentication configuration enables you to define your own user credentials rather than using the default "admin" account.
-
-**Note:**
-Geode also supports using an LDAP provider for Pulse authentication. See [Using LDAP Authentication in Pulse](#topic_AC9FFAA6FB044279BAED7A3E099E07AC__section_kjx_ylq_kq)
-
-To configure and use a custom authentication configuration:
-
-1.  Create a directory in which you will store the custom authentication configuration. For example:
-
-    ``` pre
-    $ mkdir /opt/pulse-config
-    ```
-
-2.  Ensure that the new directory you created is available on the Java CLASSPATH:
-
-    ``` pre
-    $ export CLASSPATH=$CLASSPATH:/opt/pulse-config
-    ```
-
-3.  Create a new text file named `pulse-authentication-custom.xml` in the new directory:
-
-    ``` pre
-    $ touch /opt/pulse-config/pulse-authentication-custom.xml
-    ```
-
-4.  Use a text editor to add the bean definitions for the authentication managers and providers that you want to use. The following listings show the example file contents for using the in-memory User Details Service and an external properties file:
-
-    **Example pulse-authentication-custom.xml for Spring simple in-memory User Details Service**
-
-    ``` pre
-    <beans:beans >
-      <authentication-manager>
-        <authentication-provider>
-          <user-service id="userDetailsService">
-            <user name="john" password="johnspassword" authorities="ROLE_USER " />
-            <user name="bob" password="bobspassword" authorities="ROLE_USER" />
-          </user-service>
-        </authentication-provider>
-      </authentication-manager>
-    </beans:beans>
-    ```
-
-    **Example pulse-authentication-custom.xml for external properties file**
-
-    ``` pre
-    <beans:beans >
-      <authentication-manager>
-        <authentication-provider>
-          <user-service properties="classpath:pulse-users.properties">
-          </user-service>
-        </authentication-provider>
-      </authentication-manager>
-    </beans:beans>
-    ```
-
-    With file-based authentication mechanism, you define the names and passwords for valid Pulse users in a `pulse-users.properties` file, which must be available in the classpath of the Pulse application. Each line in the `pulse-users.properties` file defines the username, password, and access level for a Pulse user with the format:
-
-    ``` pre
-    username=password,role,{enabled | disabled}
-    ```
-
-    The *role* entry must correspond to a valid Spring security role. For example, this entry shows the default "admin" user enabled with basic user access:
-
-    ``` pre
-    admin=admin,ROLE_USER,enabled
-    ```
-
-5.  When you start Geode members, specify the custom authentication profile using the `-Dspring.profiles.active=pulse.authentication.custom` system property. For example:
-
-    ``` pre
-    gfsh> start server --name=server1 --J=-Dspring.profiles.active=pulse.authentication.custom
-    ```
-
-6.  Start Pulse and log in using credentials that are authorized in the custom configuration.
-
-## <a id="topic_AC9FFAA6FB044279BAED7A3E099E07AC__section_kjx_ylq_kq" class="no-quick-link"></a>Using LDAP Authentication in Pulse
-
-This section provides instructions for using LDAP authentication with Pulse in either embedded and non-embedded mode.
-
-**Embedded Mode (Jetty)**
-
-To configure LDAP for Pulse:
-
-1.  Create a directory in which you will store the LDAP authentication configuration. For example:
-
-    ``` pre
-    $ mkdir /opt/pulse-config
-    ```
-
-    The directory name and location are up to you-- just make sure you use the same name when specifying the CLASSPATH for the Geode JMX Manager process.
-
-2.  Create a file named `pulse-authentication-custom.xml` with contents similar to the following and place it under the directory you created in step 1. For example:
-
-    ``` pre
-    <beans:beans xmlns="http://www.springframework.org/schema/security"
-        xmlns:beans="http://www.springframework.org/schema/beans"
-        xmlns:context="http://www.springframework.org/schema/context"
-        xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-        xsi:schemaLocation="http://www.springframework.org/schema/beans
-        http://www.springframework.org/schema/beans/spring-beans-3.2.xsd
-                   http://www.springframework.org/schema/security
-                   http://www.springframework.org/schema/security/spring-security-3.1.xsd
-                   http://www.springframework.org/schema/context
-        http://www.springframework.org/schema/context/spring-context-3.2.xsd">
-        
-            <ldap-server url="ldap://ldap.gemstone.com:389/dc=gemstone,dc=com" />
-
-            <authentication-manager>
-               <ldap-authentication-provider user-dn-pattern="uid={0},ou=ldapTesting"  
-                group-search-base="cn=PULSEUSER,ou=Groups" group-search-filter="memberUid={1}">
-               </ldap-authentication-provider>
-           </authentication-manager>
-     
-    </beans:beans>
-    ```
-
-    LDAP authentication in Pulse is hardcoded to use the PULSEUSER user group. Make sure you have have created users for this group.
-
-3.  When starting the JMX Manager from gfsh, use the following commands:
-
-    ``` pre
-    gfsh>start locator --name=loc --J=-Dspring.profiles.active=pulse.authentication.custom --classpath=/opt/pulse-config
-    ```
-
-    or
-
-    ``` pre
-    gfsh>start server --name=server1 --J=-Dspring.profiles.active=pulse.authentication.custom --classpath=/opt/pulse-config
-    ```
-
-4.  Start Pulse and log in using credentials that are authorized in the LDAP configuration.
-
-**Non-Embedded (Standalone Web Server) Mode (Tomcat)**
-
-To configure LDAP for Pulse:
-
-1.  Create a directory in which you will store the LDAP authentication configuration. For example:
-
-    ``` pre
-    $ mkdir /opt/pulse-config
-    ```
-
-2.  The directory name and location of the Pulse configuration files are up to you-- just make sure you use the same name when specifying the CLASSPATH for the Tomcat server.
-3.  Pass in the Spring profile when starting the web server. In Tomcat, all the VM arguments are set in the variable CATALINA\_OPTS, which you can define in your environment configuration file setenv.bat or setenv.sh.
-
-    For example, under %CATALINA\_HOME%/bin or $CATALINA\_HOME/bin, you can create a setenv batch file or script file (if not already present) that sets the following. On Windows:
-
-    ``` pre
-    set CATALINA_OPTS=-Dspring.profiles.active=pulse.authentication.custom
-    set "CLASSPATH=C:\pulse-config"
-    ```
-
-    or in Unix/Linux:
-
-    ``` pre
-    CATALINA_OPTS=-Dspring.profiles.active=pulse.authentication.custom
-    export CATALINA_OPTS
-    CLASSPATH=$CLASSPATH:/opt/pulse-config
-    ```
-
-4.  Create a file named `pulse-authentication-custom.xml` with contents similar to the following and place it under the directory you created in step 1. For example:
-
-    ``` pre
-    <beans:beans xmlns="http://www.springframework.org/schema/security"
-        xmlns:beans="http://www.springframework.org/schema/beans"
-        xmlns:context="http://www.springframework.org/schema/context"
-        xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-        xsi:schemaLocation="http://www.springframework.org/schema/beans
-        http://www.springframework.org/schema/beans/spring-beans-3.2.xsd
-                   http://www.springframework.org/schema/security
-                   http://www.springframework.org/schema/security/spring-security-3.1.xsd
-                   http://www.springframework.org/schema/context
-        http://www.springframework.org/schema/context/spring-context-3.2.xsd">
-        
-            <ldap-server url="ldap://ldap.gemstone.com:389/dc=gemstone,dc=com" />
-
-            <authentication-manager>
-               <ldap-authentication-provider user-dn-pattern="uid={0},ou=ldapTesting"  
-                group-search-base="cn=PULSEUSER,ou=Groups" group-search-filter="memberUid={1}">
-               </ldap-authentication-provider>
-           </authentication-manager>
-     
-    </beans:beans>
-    ```
-
-    LDAP authentication in Pulse is hardcoded to use the PULSEUSER user group. Make sure you have have created users for this group.
-
-5.  Deploy the application and start the server.
-
-## Configuring Pulse to Use HTTPS
-
-You can configure Pulse to use HTTPS in either embedded or non-embedded mode.
-
-In non-embedded mode where you are running Pulse on a standalone Web application server, you must use the Web server's SSL configuration to make the HTTP requests secure.
-
-In embedded mode, Geode uses an embedded Jetty server to host the
-Pulse Web application. To make the embedded server use HTTPS, you must
-enable the `http` SSL component in
-`gemfire.properties` or `gfsecurity-properties`.
-See [SSL](../../managing/security/ssl_overview.html) for details on configuring these parameters.
-
-These SSL parameters apply to all HTTP services hosted on the JMX Manager, which includes the following:
-
--   Developer REST API service
--   Management REST API service (for remote cluster management)
--   Pulse monitoring tool
-
-When the `http` SSL component is enabled, all HTTP services become
-SSL-enabled and you must configure your client applications
-accordingly. For SSL-enabled Pulse, you will need to configure your
-browsers with proper certificates.
-
-## <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404" class="no-quick-link"></a>Using Pulse Views
-
-Pulse provides a variety of different views to help you monitor Geode clusters, members, and regions.
-
-The following sections provide an overview of the main Pulse views:
-
--   [Cluster View](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_9794B5754E474E10ABFBCD8B1DA240F8)
--   [Member View](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_3629814A3DF64D31A190495782DB0DBF)
--   [Region View](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_D151776BAC8B4704A71F37F8B5CE063D)
--   [Data Browser](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser)
--   [Alerts Widget](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_bfk_sc3_wn)
-
-## <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_9794B5754E474E10ABFBCD8B1DA240F8" class="no-quick-link"></a>Cluster View
-
-The cluster view is a high-level overview of the Geode distributed system. It is displayed immediately after you log into Pulse. Information displays around the perimeter of the cluster view show statistics such as memory usage, JVM pauses, and throughput. You can use the cluster view to drill down into details for individual members and regions in the distributed system.
-
-<img src="../../images/pulse_cluster_view.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_CC7B54903DF24030850E55965CDB6EC4" class="image imageleft" width="624" />
-
-Use these basic controls while in Cluster view:
-
-1.  Click Members or Data to display information about Geode members or data regions in the distributed system.
-2.  Click the display icons to display the Geode members using icon view, block view, or table view. Note that icon view is available only when displaying Members.
-
-    For example, the following shows Geode Members displayed in table view:
-
-    <img src="../../images/member_view_list.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_npw_sq3_wn" class="image" />
-    -   While in block view or table view, click the name of a Geode member to display additional information in the [Member View](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_3629814A3DF64D31A190495782DB0DBF).
-    -   Click Topology, Server Groups, or Redundancy Zones to filter the view based on all members in the topology, configured server groups, or configured redundancy zones.
-    The following shows Geode Regions displayed in table view:
-    <img src="../../images/pulse-region-detail.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_glp_1jr_54" class="image" />
-    -   While in block view or table view, click the name of a Geode region to display additional information in the [Region View](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_D151776BAC8B4704A71F37F8B5CE063D).
-
-3.  While in icon view, click a host machine icon to display the Geode members on that machine.
-4.  In the Alerts pane, click the severity tabs to filter the message display by the level of severity.
-
-**Cluster View Screen Components**
-
-The following table describes the data pieces displayed on the Cluster View screen.
-
-<table>
-<colgroup>
-<col width="50%" />
-<col width="50%" />
-</colgroup>
-<thead>
-<tr class="header">
-<th>Screen Component</th>
-<th>Description</th>
-</tr>
-</thead>
-<tbody>
-<tr class="odd">
-<td><strong>Cluster Status</strong></td>
-<td>Overall status of the distributed system being monitored. Possible statuses include Normal, Warning, or Severe.</td>
-</tr>
-<tr class="even">
-<td>Total Heap</td>
-<td>Total amount of memory (in GB) allocated to the Java heap across all members.</td>
-</tr>
-<tr class="odd">
-<td>Members</td>
-<td>Total number of members in the cluster.</td>
-</tr>
-<tr class="even">
-<td>Servers</td>
-<td>Total number of servers in the cluster.</td>
-</tr>
-<tr class="odd">
-<td>Clients</td>
-<td>Total number of clients in the cluster.</td>
-</tr>
-<tr class="even">
-<td>Locators</td>
-<td>Total number of locators in the cluster.</td>
-</tr>
-<tr class="odd">
-<td>Regions</td>
-<td>Total number of regions in the cluster.</td>
-</tr>
-<tr class="even">
-<td>Functions</td>
-<td>Total number of functions registered in the cluster.</td>
-</tr>
-<tr class="odd">
-<td>Unique CQs</td>
-<td>Total number of unique CQs. Corresponds to the UNIQUE _CQ_QUERY statistic.</td>
-</tr>
-<tr class="even">
-<td>Subscriptions</td>
-<td>Total number of client event subscriptions.</td>
-</tr>
-<tr class="odd">
-<td><strong>Cluster Members</strong></td>
-<td>Graphical, block, or table view of the members in the cluster.</td>
-</tr>
-<tr class="even">
-<td>Topology</td>
-<td>Organizes cluster members by DistributedMember Id.</td>
-</tr>
-<tr class="odd">
-<td>Server Groups</td>
-<td>Organizes cluster members by server group membership. If no server groups are configured, all members appear under the &quot;Default&quot; server group.</td>
-</tr>
-<tr class="even">
-<td>Redundancy Zones</td>
-<td>Organizes cluster members by redundancy zones. If no redundancy zones are configured, all members appear under the &quot;Default&quot; zone.</td>
-</tr>
-<tr class="odd">
-<td>Host Machine</td>
-<td>When you mouse over a machine icon in Topology View, a pop-up appears with the following machine statistics:
-<ul>
-<li><em>CPU Usage</em>. Percentage of CPU being used by Geode processes on the machine.</li>
-<li><em>Memory Usage</em>. Amount of memory (in MB) being used by Geode processes.</li>
-<li><em>Load Avg</em>. Average number of threads on the host machine that are in the run queue or are waiting for disk I/O over the last minutes. Corresponds to the Linux System statistic loadAverage1. If the load average is not available, a negative value is shown.</li>
-<li><em>Sockets</em>. Number of sockets currently open on the machine.</li>
-</ul></td>
-</tr>
-<tr class="even">
-<td>Member</td>
-<td>When you mouse over a member icon in Graphical View, a pop-up appears with the following member statistics:
-<ul>
-<li><em>CPU Usage</em>. Percentage of CPU being used by the Geode member process.</li>
-<li><em>Threads</em>. Number of threads running on the member.</li>
-<li><em>JVM Pauses</em>. Number of times the JVM used by the member process has paused due to garbage collection or excessive CPU usage.</li>
-<li><em>Regions</em>. Number of regions hosted on the member process.</li>
-<li><em>Clients</em>. Number of client currently connected to the member process.</li>
-<li><em>Gateway Sender</em>. Number of gateway senders configured on the member.</li>
-<li><em>Port</em>. Server port of the cache server member where clients can connect and perform cache operations.</li>
-<li><em>GemFire Version</em>. The version of the Geode member.</li>
-</ul></td>
-</tr>
-<tr class="odd">
-<td>Member</td>
-<td>In List View, the following data fields are displayed for each member:
-<ul>
-<li><em>ID</em>. DistributedMember Id of the member.</li>
-<li><em>Name</em>. Name of the member.</li>
-<li><em>Host</em>. Hostname or IP address where the member is running.</li>
-<li><em>Heap Usage</em>. Amount of JVM heap memory being used by the member process.</li>
-<li><em>CPU Usage</em>. Percentage of CPU being used by the Geode member process.</li>
-<li><em>Uptime</em>. How long the member has been up and running.</li>
-<li><em>Clients</em>. Number of clients currently connected to the member. It will have a value only if the member acts as a CacheServer.</li>
-</ul></td>
-</tr>
-<tr class="even">
-<td><strong>Key Statistics</strong></td>
-<td>Displays a few key performance measurements of the distributed system (over the last 15 minutes).</td>
-</tr>
-<tr class="odd">
-<td>Write/Sec</td>
-<td>Number of write operations per second that have occurred across the cluster. Each put/putAll operation counts as a write; for example, a putAll of 50 entries is counted as one write.</td>
-</tr>
-<tr class="even">
-<td>Read/Sec</td>
-<td>Number of read operations per second that have occurred across the cluster.</td>
-</tr>
-<tr class="odd">
-<td>Queries/Sec</td>
-<td>Number of queries per second that have been executed across the cluster.</td>
-</tr>
-<tr class="even">
-<td><strong>No. of JVM Pauses</strong></td>
-<td>Number of times the JVM has paused during the last five minutes to perform garbage collection.</td>
-</tr>
-<tr class="odd">
-<td><strong>WAN Information</strong></td>
-<td>If you have configured gateway senders or receivers for a multi-site (WAN) deployment, this box displays whether the remote cluster is reachable (working connectivity represented by a green triangle).</td>
-</tr>
-<tr class="even">
-<td><strong>Disk Throughput</strong></td>
-<td>Total disk throughput for all disks in cluster.</td>
-</tr>
-<tr class="odd">
-<td><strong>Alerts View</strong></td>
-<td>Displays alerts for the cluster.</td>
-</tr>
-</tbody>
-</table>
-
-## <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_3629814A3DF64D31A190495782DB0DBF" class="no-quick-link"></a>Member View
-
-When you select an individual Geode member in Cluster View, Pulse displays the regions available on that member, as well as member-specific information such as the configured listen ports.
-
-<img src="../../images/pulse_member_view.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_EDBD3D333B2741DCAA5CB94719B507B7" class="image imageleft" width="624" />
-
-Use these basic controls while in Member View:
-
-1.  Click the display icons to display regions using block view or table view.
-2.  Use the drop down menu to select a specific member or search for specific members by name.
-3.  Click **Cluster View** to return to Cluster View. See [Cluster View](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_9794B5754E474E10ABFBCD8B1DA240F8).
-4.  Click **Data Browser** to query region data. See [Data Browser](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser).
-
-**Member View Screen Components**
-
-The following table describes the data elements displayed on the Member View screen.
-
-<table>
-<colgroup>
-<col width="50%" />
-<col width="50%" />
-</colgroup>
-<thead>
-<tr class="header">
-<th>Screen Component</th>
-<th>Description</th>
-</tr>
-</thead>
-<tbody>
-<tr class="odd">
-<td><strong>Member Status</strong></td>
-<td>Overall status of the member being monitored. Possible statuses include Normal, Warning, or Severe.</td>
-</tr>
-<tr class="even">
-<td>Regions</td>
-<td>Total number of regions hosted on the member.</td>
-</tr>
-<tr class="odd">
-<td>Threads</td>
-<td>Total number of threads being executed on the member.</td>
-</tr>
-<tr class="even">
-<td>Sockets</td>
-<td>Total number of sockets currently open on the member.</td>
-</tr>
-<tr class="odd">
-<td>Load Avg.</td>
-<td>Average number of threads on the member that are in the run queue or are waiting for disk I/O over the last minute. Corresponds to the Linux System statistic loadAverage1. If the load average is not available, a negative value is shown.</td>
-</tr>
-<tr class="even">
-<td>Clients</td>
-<td>Current number of client connections to the member.</td>
-</tr>
-<tr class="odd">
-<td><strong>Member Regions</strong></td>
-<td>Block or table view of the regions hosted on the member.</td>
-</tr>
-<tr class="even">
-<td>Regions</td>
-<td>When you mouse over a region in block view, a pop-up appears with the following data fields:
-<ul>
-<li><em>Name</em>. Region name.</li>
-<li><em>Type</em>. For example, REPLICATE, PARTITION.</li>
-<li><em>EntryCount</em>. Number of entries in the region.</li>
-<li><em>EntrySize</em>. The aggregate entry size (in bytes) of all entries. For replicated regions this field will only provide a value if the eviction algorithm has been set to EvictionAlgorithm#LRU_ MEMORY. All partition regions will have this value. However, the value includes redundant entries and will also count the size of all the secondary entries on the node.</li>
-</ul></td>
-</tr>
-<tr class="odd">
-<td>Regions</td>
-<td>In table view, the following fields are listed for each region:
-<ul>
-<li><em>Name</em>. Region name.</li>
-<li><em>Type</em>. For example, REPLICATE, PARTITION.</li>
-<li><em>EntryCount</em>. Number of entries in the region.</li>
-<li><em>EntrySize</em>. The aggregate entry size (in bytes) of all entries. For replicated regions this field will only provide a value if the eviction algorithm has been set to EvictionAlgorithm#LRU_ MEMORY. All partition regions will have this value. However, the value includes redundant entries and will also count the size of all the secondary entries on the node.</li>
-<li><em>Scope</em>. Scope configured for the region.</li>
-<li><em>Disk Store Name</em>. Name of disk stores (if any) associated with the region.</li>
-<li><em>Disk Synchronous</em>. True if writes to disk are set to synchronous and false if not. This field reflects the configured disk-synchronous region attribute.</li>
-<li><em>Gateway Enabled</em>. Whether gateway sender and receiver configurations have been defined on members hosting this region.</li>
-</ul></td>
-</tr>
-<tr class="even">
-<td><strong>Member Clients</strong></td>
-<td>In table view, the following fields are listed for each client:
-<ul>
-<li><em>Id</em>. DistributedMember ID of the client process.</li>
-<li><em>Name</em>. Name of the client process.</li>
-<li><em>Host</em>. Hostname or IP address of the client process.</li>
-<li><em>Connected</em>. Whether the client process is currently connected to the member.</li>
-<li><em>Queue Size</em>. The size of the queue used by server to send events in case of a subscription enabled client or a client that has continuous queries running on the server.</li>
-<li><em>CPU Usage</em>. Percentage of CPU being used by the client process.</li>
-<li><em>Uptime</em>. Amount of time the client process has been running.</li>
-<li><em>Threads</em>. Threads being used by the member clients</li>
-<li><em>Gets</em>. Total number of successful get requests completed.</li>
-<li><em>Puts</em>. Total number of successful put requests completed.</li>
-</ul></td>
-</tr>
-<tr class="odd">
-<td><strong>Key Statistics</strong></td>
-<td>Displays a few key performance measurements for the member (over the last 15 minutes).</td>
-</tr>
-<tr class="even">
-<td>% CPU Usage</td>
-<td>Percentage of CPU used by the member.</td>
-</tr>
-<tr class="odd">
-<td>Read/Sec</td>
-<td>Number of read operations per second that have occurred on the member.</td>
-</tr>
-<tr class="even">
-<td>Write/Sec</td>
-<td>Number of write operations per second that have occurred on the member. Each put/putAll operation counts as a write; for example, a putAll of 50 entries is counted as one write.</td>
-</tr>
-<tr class="odd">
-<td><strong>Memory Usage</strong></td>
-<td>Total memory used on the member in MB.</td>
-</tr>
-<tr class="even">
-<td><strong>No. of JVM Pauses</strong></td>
-<td>Number of times the JVM has paused during the last five minutes due to garbage collection or excessive CPU usage.</td>
-</tr>
-<tr class="odd">
-<td><strong>WAN Information</strong></td>
-<td>Displays cluster information. This dialog box only appears if you have configured WAN functionality (gateway senders and gateway receivers).</td>
-</tr>
-<tr class="even">
-<td><strong>Disk Throughput</strong></td>
-<td>Rate of disk writes on the member.</td>
-</tr>
-</tbody>
-</table>
-
-## <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_D151776BAC8B4704A71F37F8B5CE063D" class="no-quick-link"></a>Region View
-
-The Pulse Region View provides a comprehensive overview of all regions in the Geode distributed system:
-
-<img src="../../images/pulse_data_view.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_A533852E38654E79BE5628E938E170EB" class="image imageleft" width="624" />
-
-Use these basic controls while in Region View:
-
-1.  Click the display icons to display all members that host the region using block view or table view.
-
-    (Click the name of a member to change to that member's [Member View](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_3629814A3DF64D31A190495782DB0DBF).)
-
-2.  Search for specific members that host the current region.
-3.  Hover over a member name to display information such as the region entry count, entry size, and throughput on that member.
-4.  Click [Cluster View](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_9794B5754E474E10ABFBCD8B1DA240F8) or [Data Browser](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser) to go to those screens.
-
-**Region View Screen Components**
-
-The following table describes the data elements displayed on the Region View screen.
-
-<table>
-<colgroup>
-<col width="50%" />
-<col width="50%" />
-</colgroup>
-<thead>
-<tr class="header">
-<th>Screen Component</th>
-<th>Description</th>
-</tr>
-</thead>
-<tbody>
-<tr class="odd">
-<td><strong>Region Members</strong></td>
-<td>Lists information about Geode members that host the region, either in block view or table view.</td>
-</tr>
-<tr class="even">
-<td>Region Member (Detail View)</td>
-<td>When you hover over a region member in block view, a pop-up appears with the following data fields:
-<ul>
-<li><em>Member Name</em>. The name of the Geode member hosting the region.</li>
-<li><em>EntryCount</em>. Number of entries for the region on that member.</li>
-<li><em>EntrySize</em>. The aggregate entry size (in bytes) of all entries on that member. For replicated regions this field will only provide a value if the eviction algorithm has been set to EvictionAlgorithm#LRU_ MEMORY. All partition regions will have this value. However, the value includes redundant entries and will also count the size of all the secondary entries on the node.</li>
-<li><em>Accessor</em>. Indicates whether the member is an accessor member.</li>
-<li><em>Reads/Writes</em>. Summary of reads and writes served from memory and from disk stores over the last 15 minutes.</li>
-</ul></td>
-</tr>
-<tr class="odd">
-<td>Region Member (Table View)</td>
-<td>In table view, the following fields are listed for each region member:
-<ul>
-<li><em>ID</em>. The unique member ID.</li>
-<li><em>Name</em>. Region name.</li>
-<li><em>Host</em>. Member hostname.</li>
-<li><em>Heap Usage</em>. The total amount of heap used on the member in MB.</li>
-<li><em>CPU Usage</em>. CPU usage as a percent of available CPU.</li>
-<li><em>Uptime</em>. The amount of time elapsed since the member started.</li>
-<li><em>Accessor</em>. Indicates whether the member is an accessor member.</li>
-</ul></td>
-</tr>
-<tr class="even">
-<td><strong>Region Detail</strong></td>
-<td>When you have selected a region, the right hand pane displays the following information about the region:
-<ul>
-<li><em>Name</em>. Name of the region.</li>
-<li><em>Region Path</em>. Path for the region.</li>
-<li><em>Type</em>. For example, REPLICATE, PARTITION</li>
-<li><em>Members</em>. Number of members that are hosting the region.</li>
-<li><em>Empty Nodes</em>. Nodes where the region DataPolicy is defined as EMPTY or where LocalMaxMemory is set to 0.</li>
-<li><em>Entry Count</em>. Total number of entries in the region.</li>
-<li><em>Disk Usage</em>. Persistent data usage.</li>
-<li><em>Persistence</em>. Indicates whether the region's data is persisted to disk.</li>
-<li><em>Memory Usage</em>. The amount of memory used and total available memory (also shown as a percentage).</li>
-<li><em>Reads/Writes</em>. Summary of reads and writes served from memory and from disk stores over the last 15 minutes.</li>
-</ul></td>
-</tr>
-</tbody>
-</table>
-
-## <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser" class="no-quick-link"></a>Data Browser
-
-The Pulse Data Browser enables you to query region data. Note that there are two key attributes available on DistributedSystemMXBean (see [List of Geode JMX MBeans](../../managing/management/list_of_mbeans.html#topic_4BCF867697C3456D96066BAD7F39FC8B)) that you can use to configure limits for the result sets displayed in Data Browser:
-
--   `QueryResultSetLimit` limits the number of rows that Data Browser queries return. 1000 rows are displayed by default.
--   `QueryCollectionsDepth` limits the number of elements of a collection that Data Browser queries return. This attribute applies to query results contain collections such as Map, List, and so forth. The default value is 100 elements.
-
-See the `org.apache.geode.management.DistributedSystemMXBean` JavaDocs for information on available MBean methods and attributes.
-
-The following shows an example Data Browser view:
-
-<img src="../../images/pulse-data-browser.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_hhp_dz2_44" class="image imageleft" width="624" />
-
-Use these basic controls while in Data Browser view:
-
-1.  Search for the name of a specific region.
-2.  Select one or more regions to display the Geode members that host those regions. The hosting Geode members appear in the Region Members section.
-3.  Select one or more members from the Region Members section to restrict query results to those members.
-4.  Type in the text of a query to execute. See [Querying](../../developing/querying_basics/chapter_overview.html).
-5.  Display a list of previously-executed queries. Double-click on a query from the history list to copy it to the Query Editor, or delete the query from your history.
-6.  Execute your query or clear the contents of the Query Editor.
-7.  View the current query results.
-8.  Export the query results to a text file.
-9.  Return to [Cluster View](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_9794B5754E474E10ABFBCD8B1DA240F8).
-
-## <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_bfk_sc3_wn" class="no-quick-link"></a>Alerts Widget
-
-The Alerts Widget appears in the right portion of the screen and displays a list of alerts.
-
-The alerts displayed for the cluster appear based on the alertLevel field set in the DistributedSystemMXBean. By default, log messages with the level of SEVERE are shown as alerts. You can modify the level by using the `DistributedMXBean.changeAlertLevel` method. See [System Alert Notifications](../../managing/management/notification_federation_and_alerts.html#topic_212EE5A2ABAB4E8E8EF71807C9ECEF1A__section_7463D13112D54406953416356835E290) for more information.
-
-<img src="../../images/pulse_alerts_widget.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_jrc_smt_qn" class="image" />
-
-Use these basic controls in the Alerts Widget:
-
-1.  Select an alert level to view only alerts with a specific severity.
-2.  Enter text in the search box to filter the list of alerts.
-3.  Select an alert and click Clear to remove it from the alert list.
-4.  Click **Clear All** to remove all alerts from the widget.
-5.  Double-click an alert to open a pop-up window that displays the full text of the alert message.
-6.  Click the check mark in an alert pop-up window to acknowledge the alert. Acknowledged alerts display a check mark in the list of alerts.
-7.  Triple-click the alert in the pop-up or in the alert list to select the message text. You can then copy and paste the text into another application.
-8.  Click the **X** to close the pop-up alert window.
-

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/pulse/system_requirements.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/system_requirements.html.md.erb b/geode-docs/tools_modules/pulse/system_requirements.html.md.erb
deleted file mode 100644
index 05dffe6..0000000
--- a/geode-docs/tools_modules/pulse/system_requirements.html.md.erb
+++ /dev/null
@@ -1,35 +0,0 @@
----
-title:  Pulse System Requirements
----
-
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-Verify that your system meets the installation and runtime requirements for Pulse.
-
-<a id="system_requirements__section_CBD5B04ACC554029B5C710CE8E244FEA"></a>
-The Pulse Web application has been tested for compatibility with the following Web browsers:
-
--   Internet Explorer 9.0.8112.16421
--   Safari 5.1.7 for Windows
--   Google Chrome 22.0.1229.79 m
--   Mozilla Firefox 16.0.1
-
-Pulse has been tested for standalone deployment on Tomcat and Jetty.
-Pulse may work with other operating systems and browsers upon which it has not been tested.
-
-

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/redis_adapter.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/redis_adapter.html.md.erb b/geode-docs/tools_modules/redis_adapter.html.md.erb
index 47da3fc..697fc4e 100644
--- a/geode-docs/tools_modules/redis_adapter.html.md.erb
+++ b/geode-docs/tools_modules/redis_adapter.html.md.erb
@@ -18,8 +18,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 -->
-<a id="topic_523F6DE33FE54307BBE8F83BB7D9355D"></a>
-
 
 The Geode Redis adapter allows Geode to function as a drop-in replacement for a Redis data store, letting Redis applications take advantage of Geode’s scaling capabilities without changing their client code. Redis clients connect to a Geode server in the same way they connect to a Redis server, using an IP address and a port number.
 


[35/43] geode git commit: Cleanup CacheClientUpdater

Posted by kl...@apache.org.
Cleanup CacheClientUpdater


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/07efaa8e
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/07efaa8e
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/07efaa8e

Branch: refs/heads/feature/GEODE-2632-17
Commit: 07efaa8ee933bb83d39db39bacf91f3a481509ad
Parents: f8786f5
Author: Kirk Lund <kl...@apache.org>
Authored: Mon May 22 14:49:21 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Tue May 30 10:21:10 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/geode/Instantiator.java     |  99 ++-
 .../internal/cache/tier/CachedRegionHelper.java |  18 +-
 .../cache/tier/sockets/CacheClientUpdater.java  | 850 ++++++++++---------
 .../cache/tier/sockets/ChunkedMessage.java      |   6 +-
 .../internal/cache/tier/sockets/Message.java    |  61 +-
 .../cache/tier/sockets/ServerConnection.java    |   5 +-
 ...arallelGatewaySenderOperationsDUnitTest.java |   3 +-
 7 files changed, 528 insertions(+), 514 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/07efaa8e/geode-core/src/main/java/org/apache/geode/Instantiator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/Instantiator.java b/geode-core/src/main/java/org/apache/geode/Instantiator.java
index c727e5b..ea42057 100644
--- a/geode-core/src/main/java/org/apache/geode/Instantiator.java
+++ b/geode-core/src/main/java/org/apache/geode/Instantiator.java
@@ -20,16 +20,16 @@ import org.apache.geode.internal.cache.tier.sockets.ClientProxyMembershipID;
 import org.apache.geode.internal.i18n.LocalizedStrings;
 
 /**
- * {@code Instantiator} allows classes that implement {@link DataSerializable} to be registered
- * with the data serialization framework. Knowledge of {@code DataSerializable} classes allows
- * the framework to optimize how instances of those classes are data serialized.
+ * {@code Instantiator} allows classes that implement {@link DataSerializable} to be registered with
+ * the data serialization framework. Knowledge of {@code DataSerializable} classes allows the
+ * framework to optimize how instances of those classes are data serialized.
  *
  * <P>
  *
  * Ordinarily, when a {@code DataSerializable} object is written using
  * {@link DataSerializer#writeObject(Object, java.io.DataOutput)}, a special marker class id is
- * written to the stream followed by the class name of the {@code DataSerializable} object.
- * After the marker class id is read by {@link DataSerializer#readObject} it performs the following
+ * written to the stream followed by the class name of the {@code DataSerializable} object. After
+ * the marker class id is read by {@link DataSerializer#readObject} it performs the following
  * operations,
  *
  * <OL>
@@ -44,20 +44,20 @@ import org.apache.geode.internal.i18n.LocalizedStrings;
  *
  * </OL>
  *
- * However, if a {@code DataSerializable} class is {@linkplain #register(Instantiator)
- * registered} with the data serialization framework and assigned a unique class id, an important
- * optimization can be performed that avoid the expense of using reflection to instantiate the
+ * However, if a {@code DataSerializable} class is {@linkplain #register(Instantiator) registered}
+ * with the data serialization framework and assigned a unique class id, an important optimization
+ * can be performed that avoid the expense of using reflection to instantiate the
  * {@code DataSerializable} class. When the object is written using
  * {@link DataSerializer#writeObject(Object, java.io.DataOutput)}, the object's registered class id
  * is written to the stream. Consequently, when the data is read from the stream, the
- * {@link #newInstance} method of the appropriate {@code Instantiator} instance is invoked to
- * create an "empty" instance of the {@code DataSerializable} instead of using reflection to
- * create the new instance.
+ * {@link #newInstance} method of the appropriate {@code Instantiator} instance is invoked to create
+ * an "empty" instance of the {@code DataSerializable} instead of using reflection to create the new
+ * instance.
  *
  * <P>
  *
- * Commonly, a {@code DataSerializable} class will register itself with the
- * {@code Instantiator} in a static initializer as shown in the below example code.
+ * Commonly, a {@code DataSerializable} class will register itself with the {@code Instantiator} in
+ * a static initializer as shown in the below example code.
  *
  * <PRE>
 public class User implements DataSerializable {
@@ -98,20 +98,19 @@ public class User implements DataSerializable {
 }
  * </PRE>
  *
- * {@code Instantiator}s may be distributed to other members of the distributed system when
- * they are registered. Consider the following scenario in which VM1 and VM2 are members of the same
+ * {@code Instantiator}s may be distributed to other members of the distributed system when they are
+ * registered. Consider the following scenario in which VM1 and VM2 are members of the same
  * distributed system. Both VMs define the sameRegion and VM2's region replicates the contents of
- * VM1's using replication. VM1 puts an instance of the above {@code User} class into the
- * region. The act of instantiating {@code User} will load the {@code User} class and
- * invoke its static initializer, thus registering the {@code Instantiator} with the data
- * serialization framework. Because the region is a replicate, the {@code User} will be data
- * serialized and sent to VM2. However, when VM2 attempts to data deserialize the {@code User},
- * its {@code Instantiator} will not necessarily be registered because {@code User}'s
- * static initializer may not have been invoked yet. As a result, an exception would be logged while
- * deserializing the {@code User} and the replicate would not appear to have the new value. So,
- * in order to ensure that the {@code Instantiator} is registered in VM2, the data
- * serialization framework distributes a message to each member when an {@code Instantiator} is
- * {@linkplain #register(Instantiator) registered}.
+ * VM1's using replication. VM1 puts an instance of the above {@code User} class into the region.
+ * The act of instantiating {@code User} will load the {@code User} class and invoke its static
+ * initializer, thus registering the {@code Instantiator} with the data serialization framework.
+ * Because the region is a replicate, the {@code User} will be data serialized and sent to VM2.
+ * However, when VM2 attempts to data deserialize the {@code User}, its {@code Instantiator} will
+ * not necessarily be registered because {@code User}'s static initializer may not have been invoked
+ * yet. As a result, an exception would be logged while deserializing the {@code User} and the
+ * replicate would not appear to have the new value. So, in order to ensure that the
+ * {@code Instantiator} is registered in VM2, the data serialization framework distributes a message
+ * to each member when an {@code Instantiator} is {@linkplain #register(Instantiator) registered}.
  * <p>
  * Note that the framework does not require that an {@code Instantiator} be
  * {@link java.io.Serializable}, but it does require that it provide a
@@ -140,15 +139,15 @@ public abstract class Instantiator {
   private ClientProxyMembershipID context;
 
   /**
-   * Registers a {@code DataSerializable} class with the data serialization framework. This
-   * method is usually invoked from the static initializer of a class that implements
+   * Registers a {@code DataSerializable} class with the data serialization framework. This method
+   * is usually invoked from the static initializer of a class that implements
    * {@code DataSerializable}.
    *
-   * @param instantiator An {@code Instantiator} whose {@link #newInstance} method is invoked
-   *        when an object is data deserialized.
+   * @param instantiator An {@code Instantiator} whose {@link #newInstance} method is invoked when
+   *        an object is data deserialized.
    *
-   * @throws IllegalStateException If class {@code c} is already registered with a different
-   *         class id, or another class has already been registered with id {@code classId}
+   * @throws IllegalStateException If class {@code c} is already registered with a different class
+   *         id, or another class has already been registered with id {@code classId}
    * @throws NullPointerException If {@code instantiator} is {@code null}.
    */
   public static synchronized void register(Instantiator instantiator) {
@@ -156,16 +155,16 @@ public abstract class Instantiator {
   }
 
   /**
-   * Registers a {@code DataSerializable} class with the data serialization framework. This
-   * method is usually invoked from the static initializer of a class that implements
+   * Registers a {@code DataSerializable} class with the data serialization framework. This method
+   * is usually invoked from the static initializer of a class that implements
    * {@code DataSerializable}.
    *
-   * @param instantiator An {@code Instantiator} whose {@link #newInstance} method is invoked
-   *        when an object is data deserialized.
+   * @param instantiator An {@code Instantiator} whose {@link #newInstance} method is invoked when
+   *        an object is data deserialized.
    *
-   * @param distribute True if the registered {@code Instantiator} has to be distributed to
-   *        other members of the distributed system. Note that if distribute is set to false it may
-   *        still be distributed in some cases.
+   * @param distribute True if the registered {@code Instantiator} has to be distributed to other
+   *        members of the distributed system. Note that if distribute is set to false it may still
+   *        be distributed in some cases.
    *
    * @throws IllegalArgumentException If class {@code c} is already registered with a different
    *         class id, or another class has already been registered with id {@code classId}
@@ -182,11 +181,11 @@ public abstract class Instantiator {
    *
    * @param c The {@code DataSerializable} class to register. This class must have a static
    *        initializer that registers this {@code Instantiator}.
-   * @param classId A unique id for class {@code c}. The {@code classId} must not be zero.
-   *        This has been an {@code int} since dsPhase1.
+   * @param classId A unique id for class {@code c}. The {@code classId} must not be zero. This has
+   *        been an {@code int} since dsPhase1.
    *
-   * @throws IllegalArgumentException If {@code c} does not implement
-   *         {@code DataSerializable}, {@code classId} is less than or equal to zero.
+   * @throws IllegalArgumentException If {@code c} does not implement {@code DataSerializable},
+   *         {@code classId} is less than or equal to zero.
    * @throws NullPointerException If {@code c} is {@code null}
    */
   public Instantiator(Class<? extends DataSerializable> c, int classId) {
@@ -202,8 +201,8 @@ public abstract class Instantiator {
     }
 
     if (classId == 0) {
-      throw new IllegalArgumentException(LocalizedStrings.Instantiator_CLASS_ID_0_MUST_NOT_BE_0
-          .toLocalizedString(classId));
+      throw new IllegalArgumentException(
+          LocalizedStrings.Instantiator_CLASS_ID_0_MUST_NOT_BE_0.toLocalizedString(classId));
     }
 
     this.clazz = c;
@@ -211,16 +210,15 @@ public abstract class Instantiator {
   }
 
   /**
-   * Creates a new "empty" instance of a {@code DataSerializable} class whose state will be
-   * filled in by invoking its {@link DataSerializable#fromData fromData} method.
+   * Creates a new "empty" instance of a {@code DataSerializable} class whose state will be filled
+   * in by invoking its {@link DataSerializable#fromData fromData} method.
    *
    * @see DataSerializer#readObject
    */
   public abstract DataSerializable newInstance();
 
   /**
-   * Returns the {@code DataSerializable} class that is instantiated by this
-   * {@code Instantiator}.
+   * Returns the {@code DataSerializable} class that is instantiated by this {@code Instantiator}.
    */
   public Class<? extends DataSerializable> getInstantiatedClass() {
     return this.clazz;
@@ -241,8 +239,7 @@ public abstract class Instantiator {
   }
 
   /**
-   * Returns the unique {@code eventId} of this {@code Instantiator}. For internal use
-   * only.
+   * Returns the unique {@code eventId} of this {@code Instantiator}. For internal use only.
    */
   public Object/* EventID */ getEventId() {
     return this.eventId;

http://git-wip-us.apache.org/repos/asf/geode/blob/07efaa8e/geode-core/src/main/java/org/apache/geode/internal/cache/tier/CachedRegionHelper.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/CachedRegionHelper.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/CachedRegionHelper.java
index a82a804..940da95 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/CachedRegionHelper.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/CachedRegionHelper.java
@@ -27,21 +27,19 @@ import org.apache.geode.internal.cache.InternalCache;
 public class CachedRegionHelper {
 
   private final InternalCache cache;
+
   private volatile boolean shutdown = false;
-  // private Map regions;
-  private volatile int slowEmulationSleep = 0;
 
-  public CachedRegionHelper(InternalCache c) {
-    this.cache = c;
-    // this.regions = new WeakHashMap();
+  public CachedRegionHelper(InternalCache cache) {
+    this.cache = cache;
   }
 
   public void checkCancelInProgress(Throwable e) throws CancelException {
-    cache.getCancelCriterion().checkCancelInProgress(e);
+    this.cache.getCancelCriterion().checkCancelInProgress(e);
   }
 
   public Region getRegion(String name) {
-    return cache.getRegion(name);
+    return this.cache.getRegion(name);
   }
 
   public InternalCache getCache() {
@@ -53,12 +51,14 @@ public class CachedRegionHelper {
   }
 
   public boolean isShutdown() {
-    return shutdown || cache.getCancelCriterion().isCancelInProgress();
+    return this.shutdown || this.cache.getCancelCriterion().isCancelInProgress();
   }
 
+  /**
+   * CachedRegionHelper#close() does nothing
+   */
   public void close() {
     // cache = null;
-    // regions = null;
   }
 
   /**


[37/43] geode git commit: Cleanup Message class

Posted by kl...@apache.org.
Cleanup Message class


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/f8786f53
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/f8786f53
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/f8786f53

Branch: refs/heads/feature/GEODE-2632-17
Commit: f8786f53ac951dd27dc8b0cd6e466783cbd7b1ad
Parents: 7ca7c2c
Author: Kirk Lund <kl...@apache.org>
Authored: Mon May 22 13:47:55 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Tue May 30 10:21:10 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/geode/Instantiator.java     | 112 ++--
 .../geode/cache/client/internal/AbstractOp.java |   2 +-
 .../geode/cache/client/internal/PingOp.java     |  10 +-
 .../cache/tier/sockets/CacheClientUpdater.java  |  17 +-
 .../cache/tier/sockets/ChunkedMessage.java      |  19 +-
 .../internal/cache/tier/sockets/Message.java    | 591 ++++++++++---------
 .../cache/tier/sockets/ServerConnection.java    |  65 +-
 .../apache/geode/internal/tcp/Connection.java   |   2 +-
 .../org/apache/geode/internal/util/IOUtils.java |   6 +-
 .../cache/tier/sockets/MessageJUnitTest.java    |  64 +-
 .../internal/JUnit4DistributedTestCase.java     |   2 +-
 ...arallelGatewaySenderOperationsDUnitTest.java |  16 +-
 12 files changed, 448 insertions(+), 458 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/f8786f53/geode-core/src/main/java/org/apache/geode/Instantiator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/Instantiator.java b/geode-core/src/main/java/org/apache/geode/Instantiator.java
index 3c1ca06..c727e5b 100644
--- a/geode-core/src/main/java/org/apache/geode/Instantiator.java
+++ b/geode-core/src/main/java/org/apache/geode/Instantiator.java
@@ -20,15 +20,15 @@ import org.apache.geode.internal.cache.tier.sockets.ClientProxyMembershipID;
 import org.apache.geode.internal.i18n.LocalizedStrings;
 
 /**
- * <code>Instantiator</code> allows classes that implement {@link DataSerializable} to be registered
- * with the data serialization framework. Knowledge of <code>DataSerializable</code> classes allows
+ * {@code Instantiator} allows classes that implement {@link DataSerializable} to be registered
+ * with the data serialization framework. Knowledge of {@code DataSerializable} classes allows
  * the framework to optimize how instances of those classes are data serialized.
  *
  * <P>
  *
- * Ordinarily, when a <code>DataSerializable</code> object is written using
+ * Ordinarily, when a {@code DataSerializable} object is written using
  * {@link DataSerializer#writeObject(Object, java.io.DataOutput)}, a special marker class id is
- * written to the stream followed by the class name of the <code>DataSerializable</code> object.
+ * written to the stream followed by the class name of the {@code DataSerializable} object.
  * After the marker class id is read by {@link DataSerializer#readObject} it performs the following
  * operations,
  *
@@ -44,23 +44,20 @@ import org.apache.geode.internal.i18n.LocalizedStrings;
  *
  * </OL>
  *
- * However, if a <code>DataSerializable</code> class is {@linkplain #register(Instantiator)
+ * However, if a {@code DataSerializable} class is {@linkplain #register(Instantiator)
  * registered} with the data serialization framework and assigned a unique class id, an important
  * optimization can be performed that avoid the expense of using reflection to instantiate the
- * <code>DataSerializable</code> class. When the object is written using
+ * {@code DataSerializable} class. When the object is written using
  * {@link DataSerializer#writeObject(Object, java.io.DataOutput)}, the object's registered class id
  * is written to the stream. Consequently, when the data is read from the stream, the
- * {@link #newInstance} method of the appropriate <code>Instantiator</code> instance is invoked to
- * create an "empty" instance of the <code>DataSerializable</code> instead of using reflection to
+ * {@link #newInstance} method of the appropriate {@code Instantiator} instance is invoked to
+ * create an "empty" instance of the {@code DataSerializable} instead of using reflection to
  * create the new instance.
  *
  * <P>
  *
- * Commonly, a <code>DataSerializable</code> class will register itself with the
- * <code>Instantiator</code> in a static initializer as shown in the below example code.
- *
- * <!-- The source code for the CompanySerializer class resides in tests/com/examples/ds/User.java
- * Please keep the below code snippet in sync with that file. -->
+ * Commonly, a {@code DataSerializable} class will register itself with the
+ * {@code Instantiator} in a static initializer as shown in the below example code.
  *
  * <PRE>
 public class User implements DataSerializable {
@@ -101,22 +98,22 @@ public class User implements DataSerializable {
 }
  * </PRE>
  *
- * <code>Instantiator</code>s may be distributed to other members of the distributed system when
+ * {@code Instantiator}s may be distributed to other members of the distributed system when
  * they are registered. Consider the following scenario in which VM1 and VM2 are members of the same
  * distributed system. Both VMs define the sameRegion and VM2's region replicates the contents of
- * VM1's using replication. VM1 puts an instance of the above <code>User</code> class into the
- * region. The act of instantiating <code>User</code> will load the <code>User</code> class and
- * invoke its static initializer, thus registering the <code>Instantiator</code> with the data
- * serialization framework. Because the region is a replicate, the <code>User</code> will be data
- * serialized and sent to VM2. However, when VM2 attempts to data deserialize the <code>User</code>,
- * its <code>Instantiator</code> will not necessarily be registered because <code>User</code>'s
+ * VM1's using replication. VM1 puts an instance of the above {@code User} class into the
+ * region. The act of instantiating {@code User} will load the {@code User} class and
+ * invoke its static initializer, thus registering the {@code Instantiator} with the data
+ * serialization framework. Because the region is a replicate, the {@code User} will be data
+ * serialized and sent to VM2. However, when VM2 attempts to data deserialize the {@code User},
+ * its {@code Instantiator} will not necessarily be registered because {@code User}'s
  * static initializer may not have been invoked yet. As a result, an exception would be logged while
- * deserializing the <code>User</code> and the replicate would not appear to have the new value. So,
- * in order to ensure that the <code>Instantiator</code> is registered in VM2, the data
- * serialization framework distributes a message to each member when an <code>Instantiator</code> is
+ * deserializing the {@code User} and the replicate would not appear to have the new value. So,
+ * in order to ensure that the {@code Instantiator} is registered in VM2, the data
+ * serialization framework distributes a message to each member when an {@code Instantiator} is
  * {@linkplain #register(Instantiator) registered}.
  * <p>
- * Note that the framework does not require that an <code>Instantiator</code> be
+ * Note that the framework does not require that an {@code Instantiator} be
  * {@link java.io.Serializable}, but it does require that it provide a
  * {@linkplain #Instantiator(Class, int) two-argument constructor}.
  *
@@ -133,63 +130,64 @@ public abstract class Instantiator {
    */
   private Class<? extends DataSerializable> clazz;
 
-  /** The id of this <code>Instantiator</code> */
+  /** The id of this {@code Instantiator} */
   private int id;
 
-  /** The eventId of this <code>Instantiator</code> */
+  /** The eventId of this {@code Instantiator} */
   private EventID eventId;
 
-  /** The originator of this <code>Instantiator</code> */
+  /** The originator of this {@code Instantiator} */
   private ClientProxyMembershipID context;
 
   /**
-   * Registers a <code>DataSerializable</code> class with the data serialization framework. This
+   * Registers a {@code DataSerializable} class with the data serialization framework. This
    * method is usually invoked from the static initializer of a class that implements
-   * <code>DataSerializable</code>.
+   * {@code DataSerializable}.
    *
-   * @param instantiator An <code>Instantiator</code> whose {@link #newInstance} method is invoked
+   * @param instantiator An {@code Instantiator} whose {@link #newInstance} method is invoked
    *        when an object is data deserialized.
    *
-   * @throws IllegalStateException If class <code>c</code> is already registered with a different
-   *         class id, or another class has already been registered with id <code>classId</code>
-   * @throws NullPointerException If <code>instantiator</code> is <code>null</code>.
+   * @throws IllegalStateException If class {@code c} is already registered with a different
+   *         class id, or another class has already been registered with id {@code classId}
+   * @throws NullPointerException If {@code instantiator} is {@code null}.
    */
   public static synchronized void register(Instantiator instantiator) {
     InternalInstantiator.register(instantiator, true);
   }
 
   /**
-   * Registers a <code>DataSerializable</code> class with the data serialization framework. This
+   * Registers a {@code DataSerializable} class with the data serialization framework. This
    * method is usually invoked from the static initializer of a class that implements
-   * <code>DataSerializable</code>.
+   * {@code DataSerializable}.
    *
-   * @param instantiator An <code>Instantiator</code> whose {@link #newInstance} method is invoked
+   * @param instantiator An {@code Instantiator} whose {@link #newInstance} method is invoked
    *        when an object is data deserialized.
    *
-   * @param distribute True if the registered <code>Instantiator</code> has to be distributed to
+   * @param distribute True if the registered {@code Instantiator} has to be distributed to
    *        other members of the distributed system. Note that if distribute is set to false it may
    *        still be distributed in some cases.
    *
-   * @throws IllegalArgumentException If class <code>c</code> is already registered with a different
-   *         class id, or another class has already been registered with id <code>classId</code>
-   * @throws NullPointerException If <code>instantiator</code> is <code>null</code>.
+   * @throws IllegalArgumentException If class {@code c} is already registered with a different
+   *         class id, or another class has already been registered with id {@code classId}
+   * @throws NullPointerException If {@code instantiator} is {@code null}.
    * @deprecated as of 9.0 use {@link Instantiator#register(Instantiator)} instead
    */
+  @Deprecated
   public static synchronized void register(Instantiator instantiator, boolean distribute) {
     InternalInstantiator.register(instantiator, distribute);
   }
 
   /**
-   * Creates a new <code>Instantiator</code> that instantiates a given class.
+   * Creates a new {@code Instantiator} that instantiates a given class.
    *
-   * @param c The <code>DataSerializable</code> class to register. This class must have a static
-   *        initializer that registers this <code>Instantiator</code>.
-   * @param classId A unique id for class <code>c</code>. The <code>classId</code> must not be zero.
-   *        This has been an <code>int</code> since dsPhase1.
+   * @param c The {@code DataSerializable} class to register. This class must have a static
+   *        initializer that registers this {@code Instantiator}.
+   * @param classId A unique id for class {@code c}. The {@code classId} must not be zero.
+   *        This has been an {@code int} since dsPhase1.
    *
-   * @throws IllegalArgumentException If <code>c</code> does not implement
-   *         <code>DataSerializable</code>, <code>classId</code> is less than or equal to zero.
-   * @throws NullPointerException If <code>c</code> is <code>null</code>
+   * @throws IllegalArgumentException If {@code c} does not implement
+   *         {@code DataSerializable}, {@code classId} is less than or equal to zero.
+   * @throws NullPointerException If {@code c} is {@code null}
    */
   public Instantiator(Class<? extends DataSerializable> c, int classId) {
     if (c == null) {
@@ -205,7 +203,7 @@ public abstract class Instantiator {
 
     if (classId == 0) {
       throw new IllegalArgumentException(LocalizedStrings.Instantiator_CLASS_ID_0_MUST_NOT_BE_0
-          .toLocalizedString(Integer.valueOf(classId)));
+          .toLocalizedString(classId));
     }
 
     this.clazz = c;
@@ -213,7 +211,7 @@ public abstract class Instantiator {
   }
 
   /**
-   * Creates a new "empty" instance of a <Code>DataSerializable</code> class whose state will be
+   * Creates a new "empty" instance of a {@code DataSerializable} class whose state will be
    * filled in by invoking its {@link DataSerializable#fromData fromData} method.
    *
    * @see DataSerializer#readObject
@@ -221,29 +219,29 @@ public abstract class Instantiator {
   public abstract DataSerializable newInstance();
 
   /**
-   * Returns the <code>DataSerializable</code> class that is instantiated by this
-   * <code>Instantiator</code>.
+   * Returns the {@code DataSerializable} class that is instantiated by this
+   * {@code Instantiator}.
    */
   public Class<? extends DataSerializable> getInstantiatedClass() {
     return this.clazz;
   }
 
   /**
-   * Returns the unique <code>id</code> of this <code>Instantiator</code>.
+   * Returns the unique {@code id} of this {@code Instantiator}.
    */
   public int getId() {
     return this.id;
   }
 
   /**
-   * sets the unique <code>eventId</code> of this <code>Instantiator</code>. For internal use only.
+   * sets the unique {@code eventId} of this {@code Instantiator}. For internal use only.
    */
   public void setEventId(Object/* EventID */ eventId) {
     this.eventId = (EventID) eventId;
   }
 
   /**
-   * Returns the unique <code>eventId</code> of this <code>Instantiator</code>. For internal use
+   * Returns the unique {@code eventId} of this {@code Instantiator}. For internal use
    * only.
    */
   public Object/* EventID */ getEventId() {
@@ -251,14 +249,14 @@ public abstract class Instantiator {
   }
 
   /**
-   * sets the context of this <code>Instantiator</code>. For internal use only.
+   * sets the context of this {@code Instantiator}. For internal use only.
    */
   public void setContext(Object/* ClientProxyMembershipID */ context) {
     this.context = (ClientProxyMembershipID) context;
   }
 
   /**
-   * Returns the context of this <code>Instantiator</code>. For internal use only.
+   * Returns the context of this {@code Instantiator}. For internal use only.
    */
   public Object/* ClientProxyMembershipID */ getContext() {
     return this.context;

http://git-wip-us.apache.org/repos/asf/geode/blob/f8786f53/geode-core/src/main/java/org/apache/geode/cache/client/internal/AbstractOp.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/cache/client/internal/AbstractOp.java b/geode-core/src/main/java/org/apache/geode/cache/client/internal/AbstractOp.java
index a0cb7d4..7af4f4f 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/client/internal/AbstractOp.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/client/internal/AbstractOp.java
@@ -228,7 +228,7 @@ public abstract class AbstractOp implements Op {
   protected abstract Object processResponse(Message msg) throws Exception;
 
   /**
-   * Return true of <code>msgType</code> indicates the operation had an error on the server.
+   * Return true of <code>messageType</code> indicates the operation had an error on the server.
    */
   protected abstract boolean isErrorResponse(int msgType);
 

http://git-wip-us.apache.org/repos/asf/geode/blob/f8786f53/geode-core/src/main/java/org/apache/geode/cache/client/internal/PingOp.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/cache/client/internal/PingOp.java b/geode-core/src/main/java/org/apache/geode/cache/client/internal/PingOp.java
index cc30f1c..2e52542 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/client/internal/PingOp.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/client/internal/PingOp.java
@@ -14,7 +14,6 @@
  */
 package org.apache.geode.cache.client.internal;
 
-import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.distributed.internal.ServerLocation;
 import org.apache.geode.internal.cache.tier.MessageType;
 import org.apache.geode.internal.cache.tier.sockets.Message;
@@ -25,6 +24,7 @@ import org.apache.geode.internal.cache.tier.sockets.Message;
  * @since GemFire 5.7
  */
 public class PingOp {
+
   /**
    * Ping the specified server to see if it is still alive
    * 
@@ -47,13 +47,13 @@ public class PingOp {
     /**
      * @throws org.apache.geode.SerializationException if serialization fails
      */
-    public PingOpImpl() {
+    PingOpImpl() {
       super(MessageType.PING, 0);
     }
 
     @Override
     protected void processSecureBytes(Connection cnx, Message message) throws Exception {
-      Message.messageType.set(null);
+      Message.MESSAGE_TYPE.set(null);
     }
 
     @Override
@@ -64,9 +64,9 @@ public class PingOp {
     @Override
     protected void sendMessage(Connection cnx) throws Exception {
       getMessage().clearMessageHasSecurePartFlag();
-      startTime = System.currentTimeMillis();
+      this.startTime = System.currentTimeMillis();
       getMessage().send(false);
-      Message.messageType.set(MessageType.PING);
+      Message.MESSAGE_TYPE.set(MessageType.PING);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/geode/blob/f8786f53/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientUpdater.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientUpdater.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientUpdater.java
index 291db65..7698550 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientUpdater.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientUpdater.java
@@ -572,20 +572,9 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
    * the server.
    */
   private Message initializeMessage() {
-    Message _message = new Message(2, Version.CURRENT);
-    try {
-      _message.setComms(socket, in, out, commBuffer, this.stats);
-    } catch (IOException e) {
-      if (!quitting()) {
-        if (logger.isDebugEnabled()) {
-          logger.debug(
-              "{}: Caught following exception while attempting to initialize a server-to-client communication socket and will exit",
-              this, e);
-        }
-        stopProcessing();
-      }
-    }
-    return _message;
+    Message message = new Message(2, Version.CURRENT);
+    message.setComms(this.socket, this.in, this.out, this.commBuffer, this.stats);
+    return message;
   }
 
   /* refinement of method inherited from Thread */

http://git-wip-us.apache.org/repos/asf/geode/blob/f8786f53/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ChunkedMessage.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ChunkedMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ChunkedMessage.java
index 2a5a3d7..be30061 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ChunkedMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ChunkedMessage.java
@@ -22,7 +22,6 @@ import org.apache.geode.internal.logging.LogService;
 
 import java.io.EOFException;
 import java.io.IOException;
-import java.net.SocketTimeoutException;
 import java.nio.ByteBuffer;
 
 import org.apache.logging.log4j.Logger;
@@ -36,7 +35,7 @@ import org.apache.logging.log4j.Logger;
  * 
  * <PRE>
  * 
- * msgType - int - 4 bytes type of message, types enumerated below
+ * messageType - int - 4 bytes type of message, types enumerated below
  * 
  * numberOfParts - int - 4 bytes number of elements (LEN-BYTE* pairs) contained
  * in the payload. Message can be a multi-part message
@@ -153,7 +152,7 @@ public class ChunkedMessage extends Message {
 
   public void setLastChunkAndNumParts(boolean lastChunk, int numParts) {
     setLastChunk(lastChunk);
-    if (this.sc != null && this.sc.getClientVersion().compareTo(Version.GFE_65) >= 0) {
+    if (this.serverConnection != null && this.serverConnection.getClientVersion().compareTo(Version.GFE_65) >= 0) {
       // we us e three bits for number of parts in last chunk byte
       // we us e three bits for number of parts in last chunk byte
       byte localLastChunk = (byte) (numParts << 5);
@@ -162,7 +161,7 @@ public class ChunkedMessage extends Message {
   }
 
   public void setServerConnection(ServerConnection servConn) {
-    if (this.sc != servConn)
+    if (this.serverConnection != servConn)
       throw new IllegalStateException("this.sc was not correctly set");
   }
 
@@ -209,7 +208,7 @@ public class ChunkedMessage extends Message {
         // Set the header and payload fields only after receiving all the
         // socket data, providing better message consistency in the face
         // of exceptional conditions (e.g. IO problems, timeouts etc.)
-        this.msgType = type;
+        this.messageType = type;
         this.numberOfParts = numParts; // Already set in setPayloadFields via setNumberOfParts
         this.transactionId = txid;
       }
@@ -241,14 +240,14 @@ public class ChunkedMessage extends Message {
     int totalBytesRead = 0;
     do {
       int bytesRead = 0;
-      bytesRead = is.read(cb.array(), totalBytesRead, CHUNK_HEADER_LENGTH - totalBytesRead);
+      bytesRead = inputStream.read(cb.array(), totalBytesRead, CHUNK_HEADER_LENGTH - totalBytesRead);
       if (bytesRead == -1) {
         throw new EOFException(
             LocalizedStrings.ChunkedMessage_CHUNK_READ_ERROR_CONNECTION_RESET.toLocalizedString());
       }
       totalBytesRead += bytesRead;
-      if (this.msgStats != null) {
-        this.msgStats.incReceivedBytes(bytesRead);
+      if (this.messageStats != null) {
+        this.messageStats.incReceivedBytes(bytesRead);
       }
     } while (totalBytesRead < CHUNK_HEADER_LENGTH);
 
@@ -315,7 +314,7 @@ public class ChunkedMessage extends Message {
    * Sends a chunk of this message.
    */
   public void sendChunk(ServerConnection servConn) throws IOException {
-    if (this.sc != servConn)
+    if (this.serverConnection != servConn)
       throw new IllegalStateException("this.sc was not correctly set");
     sendChunk();
   }
@@ -355,7 +354,7 @@ public class ChunkedMessage extends Message {
   protected void getHeaderBytesForWrite() {
     final ByteBuffer cb = getCommBuffer();
     cb.clear();
-    cb.putInt(this.msgType);
+    cb.putInt(this.messageType);
     cb.putInt(this.numberOfParts);
 
     cb.putInt(this.transactionId);

http://git-wip-us.apache.org/repos/asf/geode/blob/f8786f53/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/Message.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/Message.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/Message.java
index f102b2d..354ad0f 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/Message.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/Message.java
@@ -14,6 +14,8 @@
  */
 package org.apache.geode.internal.cache.tier.sockets;
 
+import static org.apache.geode.internal.util.IOUtils.close;
+
 import org.apache.geode.SerializationException;
 import org.apache.geode.distributed.internal.DistributionConfig;
 import org.apache.geode.internal.Assert;
@@ -34,7 +36,6 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.net.Socket;
-import java.net.SocketTimeoutException;
 import java.nio.ByteBuffer;
 import java.nio.channels.SocketChannel;
 import java.util.Map;
@@ -47,7 +48,7 @@ import java.util.concurrent.TimeUnit;
  * and serialize it out to the wire.
  *
  * <PRE>
- * msgType       - int   - 4 bytes type of message, types enumerated below
+ * messageType       - int   - 4 bytes type of message, types enumerated below
  *
  * msgLength     - int - 4 bytes   total length of variable length payload
  *
@@ -55,10 +56,10 @@ import java.util.concurrent.TimeUnit;
  *                     contained in the payload. Message can
  *                       be a multi-part message
  *
- * transId       - int - 4 bytes  filled in by the requestor, copied back into
+ * transId       - int - 4 bytes  filled in by the requester, copied back into
  *                    the response
  *
- * flags         - byte- 1 byte   filled in by the requestor
+ * flags         - byte- 1 byte   filled in by the requester
  * len1
  * part1
  * .
@@ -76,18 +77,16 @@ import java.util.concurrent.TimeUnit;
  *
  * See also <a href="package-summary.html#messages">package description</a>.
  *
- * @see org.apache.geode.internal.cache.tier.MessageType
- *
+ * @see MessageType
  */
 public class Message {
 
-  public static final int DEFAULT_MAX_MESSAGE_SIZE = 1073741824;
-  /**
-   * maximum size of an outgoing message. See GEODE-478
-   */
-  public static int MAX_MESSAGE_SIZE =
-      Integer.getInteger(DistributionConfig.GEMFIRE_PREFIX + "client.max-message-size",
-          DEFAULT_MAX_MESSAGE_SIZE).intValue();
+  // Tentative workaround to avoid OOM stated in #46754.
+  public static final ThreadLocal<Integer> MESSAGE_TYPE = new ThreadLocal<>();
+
+  public static final String MAX_MESSAGE_SIZE_PROPERTY = DistributionConfig.GEMFIRE_PREFIX + "client.max-message-size";
+
+  static final int DEFAULT_MAX_MESSAGE_SIZE = 1073741824;
 
   private static final Logger logger = LogService.getLogger();
 
@@ -97,83 +96,95 @@ public class Message {
 
   private static final ThreadLocal<ByteBuffer> tlCommBuffer = new ThreadLocal<>();
 
-  private static final byte[] TRUE;
-  private static final byte[] FALSE;
+  // These two statics are fields shoved into the flags byte for transmission.
+  // The MESSAGE_IS_RETRY bit is stripped out during deserialization but the other
+  // is left in place
+  private static final byte MESSAGE_HAS_SECURE_PART = (byte) 0x02;
+  private static final byte MESSAGE_IS_RETRY = (byte) 0x04;
 
-  static {
+  private static final byte MESSAGE_IS_RETRY_MASK = (byte) 0xFB;
+
+  private static final int DEFAULT_CHUNK_SIZE = 1024;
+
+  private static final byte[] TRUE = defineTrue();
+  private static final byte[] FALSE = defineFalse();
+
+  private static byte[] defineTrue() {
+    HeapDataOutputStream hdos = new HeapDataOutputStream(10, null);
     try {
-      HeapDataOutputStream hdos = new HeapDataOutputStream(10, null);
       BlobHelper.serializeTo(Boolean.TRUE, hdos);
-      TRUE = hdos.toByteArray();
-    } catch (Exception e) {
+      return hdos.toByteArray();
+    } catch (IOException e) {
       throw new IllegalStateException(e);
+    } finally {
+      close(hdos);
     }
+  }
 
+  private static byte[] defineFalse() {
+    HeapDataOutputStream hdos = new HeapDataOutputStream(10, null);
     try {
-      HeapDataOutputStream hdos = new HeapDataOutputStream(10, null);
       BlobHelper.serializeTo(Boolean.FALSE, hdos);
-      FALSE = hdos.toByteArray();
-    } catch (Exception e) {
+      return hdos.toByteArray();
+    } catch (IOException e) {
       throw new IllegalStateException(e);
+    } finally {
+      close(hdos);
     }
   }
 
-  protected int msgType;
-  protected int payloadLength = 0;
-  protected int numberOfParts = 0;
+  /**
+   * maximum size of an outgoing message. See GEODE-478
+   */
+  private final int maxMessageSize;
+
+  protected int messageType;
+  private int payloadLength = 0;
+  int numberOfParts = 0;
   protected int transactionId = TXManagerImpl.NOTX;
-  protected int currentPart = 0;
-  protected Part[] partsList = null;
-  protected ByteBuffer cachedCommBuffer;
+  int currentPart = 0;
+  private Part[] partsList = null;
+  private ByteBuffer cachedCommBuffer;
   protected Socket socket = null;
-  protected SocketChannel sockCh = null;
-  protected OutputStream os = null;
-  protected InputStream is = null;
-  protected boolean messageModified = true;
+  private SocketChannel socketChannel = null;
+  private OutputStream outputStream = null;
+  protected InputStream inputStream = null;
+  private boolean messageModified = true;
+
   /** is this message a retry of a previously sent message? */
-  protected boolean isRetry;
+  private boolean isRetry;
+
   private byte flags = 0x00;
-  protected MessageStats msgStats = null;
-  protected ServerConnection sc = null;
+  MessageStats messageStats = null;
+  protected ServerConnection serverConnection = null;
   private int maxIncomingMessageLength = -1;
   private Semaphore dataLimiter = null;
-  // private int MAX_MSGS = -1;
-  private Semaphore msgLimiter = null;
-  private boolean hdrRead = false;
-  private int chunkSize = 1024;// Default Chunk Size.
+  private Semaphore messageLimiter = null;
+  private boolean readHeader = false;
+  private int chunkSize = DEFAULT_CHUNK_SIZE;
 
-  protected Part securePart = null;
+  Part securePart = null;
   private boolean isMetaRegion = false;
 
-
-  // These two statics are fields shoved into the flags byte for transmission.
-  // The MESSAGE_IS_RETRY bit is stripped out during deserialization but the other
-  // is left in place
-  public static final byte MESSAGE_HAS_SECURE_PART = (byte) 0x02;
-  public static final byte MESSAGE_IS_RETRY = (byte) 0x04;
-
-  public static final byte MESSAGE_IS_RETRY_MASK = (byte) 0xFB;
-
-  // Tentative workaround to avoid OOM stated in #46754.
-  public static final ThreadLocal<Integer> messageType = new ThreadLocal<Integer>();
-
-  Version version;
+  private Version version;
 
   /**
    * Creates a new message with the given number of parts
    */
   public Message(int numberOfParts, Version destVersion) {
+    this.maxMessageSize = Integer.getInteger(MAX_MESSAGE_SIZE_PROPERTY, DEFAULT_MAX_MESSAGE_SIZE);
     this.version = destVersion;
     Assert.assertTrue(destVersion != null, "Attempt to create an unversioned message");
-    partsList = new Part[numberOfParts];
+    this.partsList = new Part[numberOfParts];
     this.numberOfParts = numberOfParts;
-    for (int i = 0; i < partsList.length; i++) {
-      partsList[i] = new Part();
+    int partsListLength = this.partsList.length;
+    for (int i = 0; i < partsListLength; i++) {
+      this.partsList[i] = new Part();
     }
   }
 
   public boolean isSecureMode() {
-    return securePart != null;
+    return this.securePart != null;
   }
 
   public byte[] getSecureBytes() throws IOException, ClassNotFoundException {
@@ -186,7 +197,7 @@ public class Message {
       throw new IllegalArgumentException(
           LocalizedStrings.Message_INVALID_MESSAGETYPE.toLocalizedString());
     }
-    this.msgType = msgType;
+    this.messageType = msgType;
   }
 
   public void setVersion(Version clientVersion) {
@@ -194,17 +205,15 @@ public class Message {
   }
 
   public void setMessageHasSecurePartFlag() {
-    this.flags = (byte) (this.flags | MESSAGE_HAS_SECURE_PART);
+    this.flags |= MESSAGE_HAS_SECURE_PART;
   }
 
   public void clearMessageHasSecurePartFlag() {
-    this.flags = (byte) (this.flags & MESSAGE_HAS_SECURE_PART);
+    this.flags &= MESSAGE_HAS_SECURE_PART;
   }
 
   /**
    * Sets and builds the {@link Part}s that are sent in the payload of the Message
-   * 
-   * @param numberOfParts
    */
   public void setNumberOfParts(int numberOfParts) {
     // hitesh: need to add security header here from server
@@ -227,9 +236,7 @@ public class Message {
   }
 
   /**
-   * For boundary testing we may need to inject mock parts
-   * 
-   * @param parts
+   * For boundary testing we may need to inject mock parts. For testing only.
    */
   void setParts(Part[] parts) {
     this.partsList = parts;
@@ -260,7 +267,7 @@ public class Message {
   /**
    * When building a Message this will return the number of the next Part to be added to the message
    */
-  public int getNextPartNumber() {
+  int getNextPartNumber() {
     return this.currentPart;
   }
 
@@ -268,32 +275,41 @@ public class Message {
     addStringPart(str, false);
   }
 
-  private static final Map<String, byte[]> CACHED_STRINGS = new ConcurrentHashMap<String, byte[]>();
+  private static final Map<String, byte[]> CACHED_STRINGS = new ConcurrentHashMap<>();
 
   public void addStringPart(String str, boolean enableCaching) {
     if (str == null) {
-      addRawPart((byte[]) null, false);
-    } else {
-      Part part = partsList[this.currentPart];
-      if (enableCaching) {
-        byte[] bytes = CACHED_STRINGS.get(str);
-        if (bytes == null) {
-          HeapDataOutputStream hdos = new HeapDataOutputStream(str);
+      addRawPart(null, false);
+      return;
+    }
+
+    Part part = this.partsList[this.currentPart];
+    if (enableCaching) {
+      byte[] bytes = CACHED_STRINGS.get(str);
+      if (bytes == null) {
+        HeapDataOutputStream hdos = new HeapDataOutputStream(str);
+        try {
           bytes = hdos.toByteArray();
           CACHED_STRINGS.put(str, bytes);
+        } finally {
+          close(hdos);
         }
-        part.setPartState(bytes, false);
-      } else {
-        HeapDataOutputStream hdos = new HeapDataOutputStream(str);
-        this.messageModified = true;
-        part.setPartState(hdos, false);
       }
-      this.currentPart++;
+      part.setPartState(bytes, false);
+    } else {
+      HeapDataOutputStream hdos = new HeapDataOutputStream(str);
+      try {
+      this.messageModified = true;
+      part.setPartState(hdos, false);
+      } finally {
+        close(hdos);
+      }
     }
+    this.currentPart++;
   }
 
   /*
-   * Adds a new part to this message that contains a <code>byte</code> array (as opposed to a
+   * Adds a new part to this message that contains a {@code byte} array (as opposed to a
    * serialized object).
    *
    * @see #addPart(byte[], boolean)
@@ -312,13 +328,6 @@ public class Message {
     }
   }
 
-  public void addDeltaPart(HeapDataOutputStream hdos) {
-    this.messageModified = true;
-    Part part = partsList[this.currentPart];
-    part.setPartState(hdos, false);
-    this.currentPart++;
-  }
-
   public void addObjPart(Object o) {
     addObjPart(o, false);
   }
@@ -345,6 +354,9 @@ public class Message {
     }
   }
 
+  /**
+   * Object o is always null
+   */
   public void addPartInAnyForm(@Unretained Object o, boolean isObject) {
     if (o == null) {
       addRawPart((byte[]) o, false);
@@ -353,7 +365,7 @@ public class Message {
     } else if (o instanceof StoredObject) {
       // It is possible it is an off-heap StoredObject that contains a simple non-object byte[].
       this.messageModified = true;
-      Part part = partsList[this.currentPart];
+      Part part = this.partsList[this.currentPart];
       part.setPartState((StoredObject) o, isObject);
       this.currentPart++;
     } else {
@@ -362,59 +374,61 @@ public class Message {
   }
 
   private void serializeAndAddPartNoCopying(Object o) {
-    HeapDataOutputStream hdos;
-    Version v = version;
-    if (version.equals(Version.CURRENT)) {
+    Version v = this.version;
+    if (this.version.equals(Version.CURRENT)) {
       v = null;
     }
+    
     // create the HDOS with a flag telling it that it can keep any byte[] or ByteBuffers/ByteSources
     // passed to it.
-    hdos = new HeapDataOutputStream(chunkSize, v, true);
+    HeapDataOutputStream hdos = new HeapDataOutputStream(this.chunkSize, v, true);
     try {
       BlobHelper.serializeTo(o, hdos);
+      this.messageModified = true;
+      Part part = this.partsList[this.currentPart];
+      part.setPartState(hdos, true);
+      this.currentPart++;
     } catch (IOException ex) {
       throw new SerializationException("failed serializing object", ex);
+    } finally {
+      close(hdos);
     }
-    this.messageModified = true;
-    Part part = partsList[this.currentPart];
-    part.setPartState(hdos, true);
-    this.currentPart++;
-
   }
 
   private void serializeAndAddPart(Object o, boolean zipValues) {
     if (zipValues) {
       throw new UnsupportedOperationException("zipValues no longer supported");
-
-    } else {
-      HeapDataOutputStream hdos;
-      Version v = version;
-      if (version.equals(Version.CURRENT)) {
-        v = null;
-      }
-      hdos = new HeapDataOutputStream(chunkSize, v);
-      try {
-        BlobHelper.serializeTo(o, hdos);
-      } catch (IOException ex) {
-        throw new SerializationException("failed serializing object", ex);
-      }
+    }
+    
+    Version v = this.version;
+    if (this.version.equals(Version.CURRENT)) {
+      v = null;
+    }
+    
+    HeapDataOutputStream hdos = new HeapDataOutputStream(this.chunkSize, v);
+    try {
+      BlobHelper.serializeTo(o, hdos);
       this.messageModified = true;
-      Part part = partsList[this.currentPart];
+      Part part = this.partsList[this.currentPart];
       part.setPartState(hdos, true);
       this.currentPart++;
+    } catch (IOException ex) {
+      throw new SerializationException("failed serializing object", ex);
+    } finally {
+      close(hdos);
     }
   }
 
   public void addIntPart(int v) {
     this.messageModified = true;
-    Part part = partsList[this.currentPart];
+    Part part = this.partsList[this.currentPart];
     part.setInt(v);
     this.currentPart++;
   }
 
   public void addLongPart(long v) {
     this.messageModified = true;
-    Part part = partsList[this.currentPart];
+    Part part = this.partsList[this.currentPart];
     part.setLong(v);
     this.currentPart++;
   }
@@ -424,13 +438,13 @@ public class Message {
    */
   public void addRawPart(byte[] newPart, boolean isObject) {
     this.messageModified = true;
-    Part part = partsList[this.currentPart];
+    Part part = this.partsList[this.currentPart];
     part.setPartState(newPart, isObject);
     this.currentPart++;
   }
 
   public int getMessageType() {
-    return this.msgType;
+    return this.messageType;
   }
 
   public int getPayloadLength() {
@@ -451,7 +465,7 @@ public class Message {
 
   public Part getPart(int index) {
     if (index < this.numberOfParts) {
-      Part p = partsList[index];
+      Part p = this.partsList[index];
       if (this.version != null) {
         p.setVersion(this.version);
       }
@@ -480,9 +494,9 @@ public class Message {
     if (len != 0) {
       this.payloadLength = 0;
     }
-    if (this.hdrRead) {
-      if (this.msgStats != null) {
-        this.msgStats.decMessagesBeingReceived(len);
+    if (this.readHeader) {
+      if (this.messageStats != null) {
+        this.messageStats.decMessagesBeingReceived(len);
       }
     }
     ByteBuffer buffer = getCommBuffer();
@@ -495,20 +509,18 @@ public class Message {
       this.dataLimiter = null;
       this.maxIncomingMessageLength = 0;
     }
-    if (this.hdrRead) {
-      if (this.msgLimiter != null) {
-        this.msgLimiter.release(1);
-        this.msgLimiter = null;
+    if (this.readHeader) {
+      if (this.messageLimiter != null) {
+        this.messageLimiter.release(1);
+        this.messageLimiter = null;
       }
-      this.hdrRead = false;
+      this.readHeader = false;
     }
     this.flags = 0;
   }
 
   protected void packHeaderInfoForSending(int msgLen, boolean isSecurityHeader) {
-    // hitesh: setting second bit of flags byte for client
-    // this is not require but this makes all changes easily at client side right now
-    // just see this bit and process security header
+    // setting second bit of flags byte for client this is not require but this makes all changes easily at client side right now just see this bit and process security header
     byte flagsByte = this.flags;
     if (isSecurityHeader) {
       flagsByte |= MESSAGE_HAS_SECURE_PART;
@@ -516,14 +528,14 @@ public class Message {
     if (this.isRetry) {
       flagsByte |= MESSAGE_IS_RETRY;
     }
-    getCommBuffer().putInt(this.msgType).putInt(msgLen).putInt(this.numberOfParts)
-        .putInt(this.transactionId).put(flagsByte);
+    getCommBuffer().putInt(this.messageType).putInt(msgLen).putInt(this.numberOfParts)
+                   .putInt(this.transactionId).put(flagsByte);
   }
 
   protected Part getSecurityPart() {
-    if (this.sc != null) {
+    if (this.serverConnection != null) {
       // look types right put get etc
-      return this.sc.updateAndGetSecurityPart();
+      return this.serverConnection.updateAndGetSecurityPart();
     }
     return null;
   }
@@ -537,7 +549,7 @@ public class Message {
     this.isMetaRegion = isMetaRegion;
   }
 
-  public boolean getAndResetIsMetaRegion() {
+  boolean getAndResetIsMetaRegion() {
     boolean isMetaRegion = this.isMetaRegion;
     this.isMetaRegion = false;
     return isMetaRegion;
@@ -546,21 +558,20 @@ public class Message {
   /**
    * Sends this message out on its socket.
    */
-  protected void sendBytes(boolean clearMessage) throws IOException {
-    if (this.sc != null) {
+  void sendBytes(boolean clearMessage) throws IOException {
+    if (this.serverConnection != null) {
       // Keep track of the fact that we are making progress.
-      this.sc.updateProcessingMessage();
+      this.serverConnection.updateProcessingMessage();
     }
     if (this.socket == null) {
       throw new IOException(LocalizedStrings.Message_DEAD_CONNECTION.toLocalizedString());
     }
     try {
-      final ByteBuffer cb = getCommBuffer();
-      if (cb == null) {
+      final ByteBuffer commBuffer = getCommBuffer();
+      if (commBuffer == null) {
         throw new IOException("No buffer");
       }
-      int msgLen = 0;
-      synchronized (cb) {
+      synchronized (commBuffer) {
         long totalPartLen = 0;
         long headerLen = 0;
         int partsToTransmit = this.numberOfParts;
@@ -581,50 +592,50 @@ public class Message {
           partsToTransmit++;
         }
 
-        if ((headerLen + totalPartLen) > Integer.MAX_VALUE) {
+        if (headerLen + totalPartLen > Integer.MAX_VALUE) {
           throw new MessageTooLargeException(
               "Message size (" + (headerLen + totalPartLen) + ") exceeds maximum integer value");
         }
 
-        msgLen = (int) (headerLen + totalPartLen);
+        int msgLen = (int) (headerLen + totalPartLen);
 
-        if (msgLen > MAX_MESSAGE_SIZE) {
+        if (msgLen > this.maxMessageSize) {
           throw new MessageTooLargeException("Message size (" + msgLen
-              + ") exceeds gemfire.client.max-message-size setting (" + MAX_MESSAGE_SIZE + ")");
+                                             + ") exceeds gemfire.client.max-message-size setting (" + this.maxMessageSize + ")");
         }
 
-        cb.clear();
-        packHeaderInfoForSending(msgLen, (securityPart != null));
+        commBuffer.clear();
+        packHeaderInfoForSending(msgLen, securityPart != null);
         for (int i = 0; i < partsToTransmit; i++) {
-          Part part = (i == this.numberOfParts) ? securityPart : partsList[i];
+          Part part = i == this.numberOfParts ? securityPart : this.partsList[i];
 
-          if (cb.remaining() < PART_HEADER_SIZE) {
+          if (commBuffer.remaining() < PART_HEADER_SIZE) {
             flushBuffer();
           }
 
           int partLen = part.getLength();
-          cb.putInt(partLen);
-          cb.put(part.getTypeCode());
-          if (partLen <= cb.remaining()) {
-            part.writeTo(cb);
+          commBuffer.putInt(partLen);
+          commBuffer.put(part.getTypeCode());
+          if (partLen <= commBuffer.remaining()) {
+            part.writeTo(commBuffer);
           } else {
             flushBuffer();
-            if (this.sockCh != null) {
-              part.writeTo(this.sockCh, cb);
+            if (this.socketChannel != null) {
+              part.writeTo(this.socketChannel, commBuffer);
             } else {
-              part.writeTo(this.os, cb);
+              part.writeTo(this.outputStream, commBuffer);
             }
-            if (this.msgStats != null) {
-              this.msgStats.incSentBytes(partLen);
+            if (this.messageStats != null) {
+              this.messageStats.incSentBytes(partLen);
             }
           }
         }
-        if (cb.position() != 0) {
+        if (commBuffer.position() != 0) {
           flushBuffer();
         }
         this.messageModified = false;
-        if (this.sockCh == null) {
-          this.os.flush();
+        if (this.socketChannel == null) {
+          this.outputStream.flush();
         }
       }
     } finally {
@@ -634,69 +645,67 @@ public class Message {
     }
   }
 
-  protected void flushBuffer() throws IOException {
+  void flushBuffer() throws IOException {
     final ByteBuffer cb = getCommBuffer();
-    if (this.sockCh != null) {
+    if (this.socketChannel != null) {
       cb.flip();
       do {
-        this.sockCh.write(cb);
+        this.socketChannel.write(cb);
       } while (cb.remaining() > 0);
     } else {
-      this.os.write(cb.array(), 0, cb.position());
+      this.outputStream.write(cb.array(), 0, cb.position());
     }
-    if (this.msgStats != null) {
-      this.msgStats.incSentBytes(cb.position());
+    if (this.messageStats != null) {
+      this.messageStats.incSentBytes(cb.position());
     }
     cb.clear();
   }
 
   private void read() throws IOException {
     clearParts();
-    // TODO:Hitesh ??? for server changes make sure sc is not null as this class also used by client
-    // :(
+    // TODO: for server changes make sure sc is not null as this class also used by client
     readHeaderAndPayload();
   }
 
   /**
    * Read the actual bytes of the header off the socket
    */
-  protected void fetchHeader() throws IOException {
+  void fetchHeader() throws IOException {
     final ByteBuffer cb = getCommBuffer();
     cb.clear();
-    // msgType is invalidated here and can be used as an indicator
+    
+    // messageType is invalidated here and can be used as an indicator
     // of problems reading the message
-    this.msgType = MessageType.INVALID;
-
-    int hdr = 0;
+    this.messageType = MessageType.INVALID;
 
     final int headerLength = getHeaderLength();
-    if (this.sockCh != null) {
+    if (this.socketChannel != null) {
       cb.limit(headerLength);
       do {
-        int bytesRead = this.sockCh.read(cb);
-        // System.out.println("DEBUG: fetchHeader read " + bytesRead + " bytes commBuffer=" + cb);
+        int bytesRead = this.socketChannel.read(cb);
         if (bytesRead == -1) {
           throw new EOFException(
               LocalizedStrings.Message_THE_CONNECTION_HAS_BEEN_RESET_WHILE_READING_THE_HEADER
                   .toLocalizedString());
         }
-        if (this.msgStats != null) {
-          this.msgStats.incReceivedBytes(bytesRead);
+        if (this.messageStats != null) {
+          this.messageStats.incReceivedBytes(bytesRead);
         }
       } while (cb.remaining() > 0);
       cb.flip();
+      
     } else {
+      int hdr = 0;
       do {
-        int bytesRead = -1;
-        bytesRead = this.is.read(cb.array(), hdr, headerLength - hdr);
+        int bytesRead = this.inputStream.read(cb.array(), hdr, headerLength - hdr);
         if (bytesRead == -1) {
           throw new EOFException(
               LocalizedStrings.Message_THE_CONNECTION_HAS_BEEN_RESET_WHILE_READING_THE_HEADER
                   .toLocalizedString());
         }
         hdr += bytesRead;
-        if (this.msgStats != null) {
-          this.msgStats.incReceivedBytes(bytesRead);
+        if (this.messageStats != null) {
+          this.messageStats.incReceivedBytes(bytesRead);
         }
       } while (hdr < headerLength);
 
@@ -717,34 +726,36 @@ public class Message {
 
     if (!MessageType.validate(type)) {
       throw new IOException(LocalizedStrings.Message_INVALID_MESSAGE_TYPE_0_WHILE_READING_HEADER
-          .toLocalizedString(Integer.valueOf(type)));
+          .toLocalizedString(type));
     }
+    
     int timeToWait = 0;
-    if (this.sc != null) {
+    if (this.serverConnection != null) {
       // Keep track of the fact that a message is being processed.
-      this.sc.setProcessingMessage();
-      timeToWait = sc.getClientReadTimeout();
+      this.serverConnection.setProcessingMessage();
+      timeToWait = this.serverConnection.getClientReadTimeout();
     }
-    this.hdrRead = true;
-    if (this.msgLimiter != null) {
+    this.readHeader = true;
+    
+    if (this.messageLimiter != null) {
       for (;;) {
-        this.sc.getCachedRegionHelper().checkCancelInProgress(null);
+        this.serverConnection.getCachedRegionHelper().checkCancelInProgress(null);
         boolean interrupted = Thread.interrupted();
         try {
           if (timeToWait == 0) {
-            this.msgLimiter.acquire(1);
+            this.messageLimiter.acquire(1);
           } else {
-            if (!this.msgLimiter.tryAcquire(1, timeToWait, TimeUnit.MILLISECONDS)) {
-              if (this.msgStats != null && this.msgStats instanceof CacheServerStats) {
-                ((CacheServerStats) this.msgStats).incConnectionsTimedOut();
+            if (!this.messageLimiter.tryAcquire(1, timeToWait, TimeUnit.MILLISECONDS)) {
+              if (this.messageStats instanceof CacheServerStats) {
+                ((CacheServerStats) this.messageStats).incConnectionsTimedOut();
               }
               throw new IOException(
                   LocalizedStrings.Message_OPERATION_TIMED_OUT_ON_SERVER_WAITING_ON_CONCURRENT_MESSAGE_LIMITER_AFTER_WAITING_0_MILLISECONDS
-                      .toLocalizedString(Integer.valueOf(timeToWait)));
+                      .toLocalizedString(timeToWait));
             }
           }
           break;
-        } catch (InterruptedException e) {
+        } catch (InterruptedException ignore) {
           interrupted = true;
         } finally {
           if (interrupted) {
@@ -753,16 +764,19 @@ public class Message {
         }
       } // for
     }
+    
     if (len > 0) {
       if (this.maxIncomingMessageLength > 0 && len > this.maxIncomingMessageLength) {
         throw new IOException(LocalizedStrings.Message_MESSAGE_SIZE_0_EXCEEDED_MAX_LIMIT_OF_1
-            .toLocalizedString(new Object[] {Integer.valueOf(len),
-                Integer.valueOf(this.maxIncomingMessageLength)}));
+            .toLocalizedString(new Object[] {
+              len, this.maxIncomingMessageLength
+            }));
       }
+      
       if (this.dataLimiter != null) {
         for (;;) {
-          if (sc != null) {
-            this.sc.getCachedRegionHelper().checkCancelInProgress(null);
+          if (this.serverConnection != null) {
+            this.serverConnection.getCachedRegionHelper().checkCancelInProgress(null);
           }
           boolean interrupted = Thread.interrupted();
           try {
@@ -770,21 +784,21 @@ public class Message {
               this.dataLimiter.acquire(len);
             } else {
               int newTimeToWait = timeToWait;
-              if (this.msgLimiter != null) {
+              if (this.messageLimiter != null) {
                 // may have waited for msg limit so recalc time to wait
-                newTimeToWait -= (int) sc.getCurrentMessageProcessingTime();
+                newTimeToWait -= (int) this.serverConnection.getCurrentMessageProcessingTime();
               }
               if (newTimeToWait <= 0
-                  || !this.msgLimiter.tryAcquire(1, newTimeToWait, TimeUnit.MILLISECONDS)) {
+                  || !this.messageLimiter.tryAcquire(1, newTimeToWait, TimeUnit.MILLISECONDS)) {
                 throw new IOException(
                     LocalizedStrings.Message_OPERATION_TIMED_OUT_ON_SERVER_WAITING_ON_CONCURRENT_DATA_LIMITER_AFTER_WAITING_0_MILLISECONDS
                         .toLocalizedString(timeToWait));
               }
             }
-            this.payloadLength = len; // makes sure payloadLength gets set now so we will release
-                                      // the semaphore
+            // makes sure payloadLength gets set now so we will release the semaphore
+            this.payloadLength = len;
             break; // success
-          } catch (InterruptedException e) {
+          } catch (InterruptedException ignore) {
             interrupted = true;
           } finally {
             if (interrupted) {
@@ -794,15 +808,15 @@ public class Message {
         }
       }
     }
-    if (this.msgStats != null) {
-      this.msgStats.incMessagesBeingReceived(len);
+    if (this.messageStats != null) {
+      this.messageStats.incMessagesBeingReceived(len);
       this.payloadLength = len; // makes sure payloadLength gets set now so we will dec on clear
     }
 
     this.isRetry = (bits & MESSAGE_IS_RETRY) != 0;
-    bits = (byte) (bits & MESSAGE_IS_RETRY_MASK);
+    bits &= MESSAGE_IS_RETRY_MASK;
     this.flags = bits;
-    this.msgType = type;
+    this.messageType = type;
 
     readPayloadFields(numParts, len);
 
@@ -813,32 +827,38 @@ public class Message {
     // this.numberOfParts = numParts; Already set in setPayloadFields via setNumberOfParts
     this.transactionId = txid;
     this.flags = bits;
-    if (this.sc != null) {
+    if (this.serverConnection != null) {
       // Keep track of the fact that a message is being processed.
-      this.sc.updateProcessingMessage();
+      this.serverConnection.updateProcessingMessage();
     }
   }
 
-  protected void readPayloadFields(final int numParts, final int len) throws IOException {
+  /**
+   * TODO: refactor overly long method readPayloadFields
+   */
+  void readPayloadFields(final int numParts, final int len) throws IOException {
     if (len > 0 && numParts <= 0 || len <= 0 && numParts > 0) {
       throw new IOException(
           LocalizedStrings.Message_PART_LENGTH_0_AND_NUMBER_OF_PARTS_1_INCONSISTENT
-              .toLocalizedString(new Object[] {Integer.valueOf(len), Integer.valueOf(numParts)}));
+              .toLocalizedString(new Object[] { len, numParts }));
     }
 
-    Integer msgType = messageType.get();
+    Integer msgType = MESSAGE_TYPE.get();
     if (msgType != null && msgType == MessageType.PING) {
-      messageType.set(null); // set it to null right away.
-      int pingParts = 10; // Some number which will not throw OOM but still be acceptable for a ping
-                          // operation.
+      // set it to null right away.
+      MESSAGE_TYPE.set(null);
+      // Some number which will not throw OOM but still be acceptable for a ping operation.
+      int pingParts = 10;
       if (numParts > pingParts) {
         throw new IOException("Part length ( " + numParts + " ) is  inconsistent for "
             + MessageType.getString(msgType) + " operation.");
       }
     }
+    
     setNumberOfParts(numParts);
-    if (numParts <= 0)
+    if (numParts <= 0) {
       return;
+    }
 
     if (len < 0) {
       logger.info(LocalizedMessage.create(LocalizedStrings.Message_RPL_NEG_LEN__0, len));
@@ -849,12 +869,10 @@ public class Message {
     cb.clear();
     cb.flip();
 
-    int readSecurePart = 0;
-    readSecurePart = checkAndSetSecurityPart();
+    int readSecurePart = checkAndSetSecurityPart();
 
     int bytesRemaining = len;
-    for (int i = 0; ((i < numParts + readSecurePart)
-        || ((readSecurePart == 1) && (cb.remaining() > 0))); i++) {
+    for (int i = 0; i < numParts + readSecurePart || readSecurePart == 1 && cb.remaining() > 0; i++) {
       int bytesReadThisTime = readPartChunk(bytesRemaining);
       bytesRemaining -= bytesReadThisTime;
 
@@ -869,6 +887,7 @@ public class Message {
       int partLen = cb.getInt();
       byte partType = cb.get();
       byte[] partBytes = null;
+      
       if (partLen > 0) {
         partBytes = new byte[partLen];
         int alreadyReadBytes = cb.remaining();
@@ -878,26 +897,27 @@ public class Message {
           }
           cb.get(partBytes, 0, alreadyReadBytes);
         }
+        
         // now we need to read partLen - alreadyReadBytes off the wire
         int off = alreadyReadBytes;
         int remaining = partLen - off;
         while (remaining > 0) {
-          if (this.sockCh != null) {
+          if (this.socketChannel != null) {
             int bytesThisTime = remaining;
             cb.clear();
             if (bytesThisTime > cb.capacity()) {
               bytesThisTime = cb.capacity();
             }
             cb.limit(bytesThisTime);
-            int res = this.sockCh.read(cb);
+            int res = this.socketChannel.read(cb);
             if (res != -1) {
               cb.flip();
               bytesRemaining -= res;
               remaining -= res;
               cb.get(partBytes, off, res);
               off += res;
-              if (this.msgStats != null) {
-                this.msgStats.incReceivedBytes(res);
+              if (this.messageStats != null) {
+                this.messageStats.incReceivedBytes(res);
               }
             } else {
               throw new EOFException(
@@ -905,14 +925,13 @@ public class Message {
                       .toLocalizedString());
             }
           } else {
-            int res = 0;
-            res = this.is.read(partBytes, off, remaining);
+            int res = this.inputStream.read(partBytes, off, remaining);
             if (res != -1) {
               bytesRemaining -= res;
               remaining -= res;
               off += res;
-              if (this.msgStats != null) {
-                this.msgStats.incReceivedBytes(res);
+              if (this.messageStats != null) {
+                this.messageStats.incReceivedBytes(res);
               }
             } else {
               throw new EOFException(
@@ -941,35 +960,38 @@ public class Message {
    * @return the number of bytes read into commBuffer
    */
   private int readPartChunk(int bytesRemaining) throws IOException {
-    final ByteBuffer cb = getCommBuffer();
-    if (cb.remaining() >= PART_HEADER_SIZE) {
+    final ByteBuffer commBuffer = getCommBuffer();
+    if (commBuffer.remaining() >= PART_HEADER_SIZE) {
       // we already have the next part header in commBuffer so just return
       return 0;
     }
-    if (cb.position() != 0) {
-      cb.compact();
+    
+    if (commBuffer.position() != 0) {
+      commBuffer.compact();
     } else {
-      cb.position(cb.limit());
-      cb.limit(cb.capacity());
+      commBuffer.position(commBuffer.limit());
+      commBuffer.limit(commBuffer.capacity());
     }
-    int bytesRead = 0;
-    if (this.sc != null) {
+    
+    if (this.serverConnection != null) {
       // Keep track of the fact that we are making progress
-      this.sc.updateProcessingMessage();
+      this.serverConnection.updateProcessingMessage();
     }
-    if (this.sockCh != null) {
-      int remaining = cb.remaining();
+    int bytesRead = 0;
+    
+    if (this.socketChannel != null) {
+      int remaining = commBuffer.remaining();
       if (remaining > bytesRemaining) {
         remaining = bytesRemaining;
-        cb.limit(cb.position() + bytesRemaining);
+        commBuffer.limit(commBuffer.position() + bytesRemaining);
       }
       while (remaining > 0) {
-        int res = this.sockCh.read(cb);
+        int res = this.socketChannel.read(commBuffer);
         if (res != -1) {
           remaining -= res;
           bytesRead += res;
-          if (this.msgStats != null) {
-            this.msgStats.incReceivedBytes(res);
+          if (this.messageStats != null) {
+            this.messageStats.incReceivedBytes(res);
           }
         } else {
           throw new EOFException(
@@ -979,21 +1001,20 @@ public class Message {
       }
 
     } else {
-      int bufSpace = cb.capacity() - cb.position();
-      int bytesToRead = bufSpace;
+      int bytesToRead = commBuffer.capacity() - commBuffer.position();
       if (bytesRemaining < bytesToRead) {
         bytesToRead = bytesRemaining;
       }
-      int pos = cb.position();
+      int pos = commBuffer.position();
+      
       while (bytesToRead > 0) {
-        int res = 0;
-        res = this.is.read(cb.array(), pos, bytesToRead);
+        int res = this.inputStream.read(commBuffer.array(), pos, bytesToRead);
         if (res != -1) {
           bytesToRead -= res;
           pos += res;
           bytesRead += res;
-          if (this.msgStats != null) {
-            this.msgStats.incReceivedBytes(res);
+          if (this.messageStats != null) {
+            this.messageStats.incReceivedBytes(res);
           }
         } else {
           throw new EOFException(
@@ -1001,9 +1022,10 @@ public class Message {
                   .toLocalizedString());
         }
       }
-      cb.position(pos);
+      
+      commBuffer.position(pos);
     }
-    cb.flip();
+    commBuffer.flip();
     return bytesRead;
   }
 
@@ -1011,40 +1033,39 @@ public class Message {
    * Gets rid of all the parts that have been added to this message.
    */
   public void clearParts() {
-    for (int i = 0; i < partsList.length; i++) {
-      partsList[i].clear();
+    for (Part part : this.partsList) {
+      part.clear();
     }
     this.currentPart = 0;
   }
 
   @Override
   public String toString() {
-    StringBuffer sb = new StringBuffer();
-    sb.append("type=").append(MessageType.getString(msgType));
-    sb.append("; payloadLength=").append(payloadLength);
-    sb.append("; numberOfParts=").append(numberOfParts);
-    sb.append("; transactionId=").append(transactionId);
-    sb.append("; currentPart=").append(currentPart);
-    sb.append("; messageModified=").append(messageModified);
-    sb.append("; flags=").append(Integer.toHexString(flags));
-    for (int i = 0; i < numberOfParts; i++) {
+    StringBuilder sb = new StringBuilder();
+    sb.append("type=").append(MessageType.getString(this.messageType));
+    sb.append("; payloadLength=").append(this.payloadLength);
+    sb.append("; numberOfParts=").append(this.numberOfParts);
+    sb.append("; transactionId=").append(this.transactionId);
+    sb.append("; currentPart=").append(this.currentPart);
+    sb.append("; messageModified=").append(this.messageModified);
+    sb.append("; flags=").append(Integer.toHexString(this.flags));
+    for (int i = 0; i < this.numberOfParts; i++) {
       sb.append("; part[").append(i).append("]={");
-      sb.append(this.partsList[i].toString());
+      sb.append(this.partsList[i]);
       sb.append("}");
     }
     return sb.toString();
   }
 
-
-  public void setComms(ServerConnection sc, Socket socket, ByteBuffer bb, MessageStats msgStats)
+  void setComms(ServerConnection sc, Socket socket, ByteBuffer bb, MessageStats msgStats)
       throws IOException {
-    this.sc = sc;
+    this.serverConnection = sc;
     setComms(socket, bb, msgStats);
   }
 
-  public void setComms(Socket socket, ByteBuffer bb, MessageStats msgStats) throws IOException {
-    this.sockCh = socket.getChannel();
-    if (this.sockCh == null) {
+  void setComms(Socket socket, ByteBuffer bb, MessageStats msgStats) throws IOException {
+    this.socketChannel = socket.getChannel();
+    if (this.socketChannel == null) {
       setComms(socket, socket.getInputStream(), socket.getOutputStream(), bb, msgStats);
     } else {
       setComms(socket, null, null, bb, msgStats);
@@ -1052,14 +1073,14 @@ public class Message {
   }
 
   public void setComms(Socket socket, InputStream is, OutputStream os, ByteBuffer bb,
-      MessageStats msgStats) throws IOException {
+      MessageStats msgStats) {
     Assert.assertTrue(socket != null);
     this.socket = socket;
-    this.sockCh = socket.getChannel();
-    this.is = is;
-    this.os = os;
+    this.socketChannel = socket.getChannel();
+    this.inputStream = is;
+    this.outputStream = os;
     this.cachedCommBuffer = bb;
-    this.msgStats = msgStats;
+    this.messageStats = msgStats;
   }
 
   /**
@@ -1069,11 +1090,11 @@ public class Message {
    */
   public void unsetComms() {
     this.socket = null;
-    this.sockCh = null;
-    this.is = null;
-    this.os = null;
+    this.socketChannel = null;
+    this.inputStream = null;
+    this.outputStream = null;
     this.cachedCommBuffer = null;
-    this.msgStats = null;
+    this.messageStats = null;
   }
 
   /**
@@ -1084,7 +1105,7 @@ public class Message {
   }
 
   public void send(ServerConnection servConn) throws IOException {
-    if (this.sc != servConn)
+    if (this.serverConnection != servConn)
       throw new IllegalStateException("this.sc was not correctly set");
     send(true);
   }
@@ -1097,7 +1118,7 @@ public class Message {
   }
 
   /**
-   * Populates the stats of this <code>Message</code> with information received via its socket
+   * Populates the stats of this {@code Message} with information received via its socket
    */
   public void recv() throws IOException {
     if (this.socket != null) {
@@ -1111,10 +1132,10 @@ public class Message {
 
   public void recv(ServerConnection sc, int maxMessageLength, Semaphore dataLimiter,
       Semaphore msgLimiter) throws IOException {
-    this.sc = sc;
+    this.serverConnection = sc;
     this.maxIncomingMessageLength = maxMessageLength;
     this.dataLimiter = dataLimiter;
-    this.msgLimiter = msgLimiter;
+    this.messageLimiter = msgLimiter;
     recv();
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/f8786f53/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerConnection.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerConnection.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerConnection.java
index 83d0e9d..dfda14f 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerConnection.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerConnection.java
@@ -723,12 +723,7 @@ public class ServerConnection implements Runnable {
     ThreadState threadState = null;
     try {
       if (msg != null) {
-        // this.logger.fine("donormalMsg() msgType " + msg.getMessageType());
-        // Since this thread is not interrupted when the cache server is
-        // shutdown,
-        // test again after a message has been read. This is a bit of a hack. I
-        // think this thread should be interrupted, but currently AcceptorImpl
-        // doesn't keep track of the threads that it launches.
+        // Since this thread is not interrupted when the cache server is shutdown, test again after a message has been read. This is a bit of a hack. I think this thread should be interrupted, but currently AcceptorImpl doesn't keep track of the threads that it launches.
         if (!this.processMessages || (crHelper.isShutdown())) {
           if (logger.isDebugEnabled()) {
             logger.debug("{} ignoring message of type {} from client {} due to shutdown.",
@@ -1078,8 +1073,6 @@ public class ServerConnection implements Runnable {
    */
   public Part updateAndGetSecurityPart() {
     // need to take care all message types here
-    // this.logger.fine("getSecurityPart() msgType = "
-    // + this.requestMsg.msgType);
     if (AcceptorImpl.isAuthenticationRequired()
         && this.handshake.getVersion().compareTo(Version.GFE_65) >= 0
         && (this.communicationMode != Acceptor.GATEWAY_TO_GATEWAY)
@@ -1090,40 +1083,40 @@ public class ServerConnection implements Runnable {
       if (AcceptorImpl.isAuthenticationRequired() && logger.isDebugEnabled()) {
         logger.debug(
             "ServerConnection.updateAndGetSecurityPart() not adding security part for msg type {}",
-            MessageType.getString(this.requestMsg.msgType));
+            MessageType.getString(this.requestMsg.messageType));
       }
     }
     return null;
   }
 
   private boolean isInternalMessage() {
-    return (this.requestMsg.msgType == MessageType.CLIENT_READY
-        || this.requestMsg.msgType == MessageType.CLOSE_CONNECTION
-        || this.requestMsg.msgType == MessageType.GETCQSTATS_MSG_TYPE
-        || this.requestMsg.msgType == MessageType.GET_CLIENT_PARTITION_ATTRIBUTES
-        || this.requestMsg.msgType == MessageType.GET_CLIENT_PR_METADATA
-        || this.requestMsg.msgType == MessageType.INVALID
-        || this.requestMsg.msgType == MessageType.MAKE_PRIMARY
-        || this.requestMsg.msgType == MessageType.MONITORCQ_MSG_TYPE
-        || this.requestMsg.msgType == MessageType.PERIODIC_ACK
-        || this.requestMsg.msgType == MessageType.PING
-        || this.requestMsg.msgType == MessageType.REGISTER_DATASERIALIZERS
-        || this.requestMsg.msgType == MessageType.REGISTER_INSTANTIATORS
-        || this.requestMsg.msgType == MessageType.REQUEST_EVENT_VALUE
-        || this.requestMsg.msgType == MessageType.ADD_PDX_TYPE
-        || this.requestMsg.msgType == MessageType.GET_PDX_ID_FOR_TYPE
-        || this.requestMsg.msgType == MessageType.GET_PDX_TYPE_BY_ID
-        || this.requestMsg.msgType == MessageType.SIZE
-        || this.requestMsg.msgType == MessageType.TX_FAILOVER
-        || this.requestMsg.msgType == MessageType.TX_SYNCHRONIZATION
-        || this.requestMsg.msgType == MessageType.GET_FUNCTION_ATTRIBUTES
-        || this.requestMsg.msgType == MessageType.ADD_PDX_ENUM
-        || this.requestMsg.msgType == MessageType.GET_PDX_ID_FOR_ENUM
-        || this.requestMsg.msgType == MessageType.GET_PDX_ENUM_BY_ID
-        || this.requestMsg.msgType == MessageType.GET_PDX_TYPES
-        || this.requestMsg.msgType == MessageType.GET_PDX_ENUMS
-        || this.requestMsg.msgType == MessageType.COMMIT
-        || this.requestMsg.msgType == MessageType.ROLLBACK);
+    return (this.requestMsg.messageType == MessageType.CLIENT_READY
+        || this.requestMsg.messageType == MessageType.CLOSE_CONNECTION
+        || this.requestMsg.messageType == MessageType.GETCQSTATS_MSG_TYPE
+        || this.requestMsg.messageType == MessageType.GET_CLIENT_PARTITION_ATTRIBUTES
+        || this.requestMsg.messageType == MessageType.GET_CLIENT_PR_METADATA
+        || this.requestMsg.messageType == MessageType.INVALID
+        || this.requestMsg.messageType == MessageType.MAKE_PRIMARY
+        || this.requestMsg.messageType == MessageType.MONITORCQ_MSG_TYPE
+        || this.requestMsg.messageType == MessageType.PERIODIC_ACK
+        || this.requestMsg.messageType == MessageType.PING
+        || this.requestMsg.messageType == MessageType.REGISTER_DATASERIALIZERS
+        || this.requestMsg.messageType == MessageType.REGISTER_INSTANTIATORS
+        || this.requestMsg.messageType == MessageType.REQUEST_EVENT_VALUE
+        || this.requestMsg.messageType == MessageType.ADD_PDX_TYPE
+        || this.requestMsg.messageType == MessageType.GET_PDX_ID_FOR_TYPE
+        || this.requestMsg.messageType == MessageType.GET_PDX_TYPE_BY_ID
+        || this.requestMsg.messageType == MessageType.SIZE
+        || this.requestMsg.messageType == MessageType.TX_FAILOVER
+        || this.requestMsg.messageType == MessageType.TX_SYNCHRONIZATION
+        || this.requestMsg.messageType == MessageType.GET_FUNCTION_ATTRIBUTES
+        || this.requestMsg.messageType == MessageType.ADD_PDX_ENUM
+        || this.requestMsg.messageType == MessageType.GET_PDX_ID_FOR_ENUM
+        || this.requestMsg.messageType == MessageType.GET_PDX_ENUM_BY_ID
+        || this.requestMsg.messageType == MessageType.GET_PDX_TYPES
+        || this.requestMsg.messageType == MessageType.GET_PDX_ENUMS
+        || this.requestMsg.messageType == MessageType.COMMIT
+        || this.requestMsg.messageType == MessageType.ROLLBACK);
   }
 
   public void run() {

http://git-wip-us.apache.org/repos/asf/geode/blob/f8786f53/geode-core/src/main/java/org/apache/geode/internal/tcp/Connection.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/tcp/Connection.java b/geode-core/src/main/java/org/apache/geode/internal/tcp/Connection.java
index 4e450c7..1afe6ff 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/tcp/Connection.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/tcp/Connection.java
@@ -2149,7 +2149,7 @@ public class Connection implements Runnable {
                 logger.fatal(LocalizedMessage
                     .create(LocalizedStrings.Connection_FAILED_HANDLING_CHUNK_MESSAGE), ex);
               }
-            } else /* (msgType == END_CHUNKED_MSG_TYPE) */ {
+            } else /* (messageType == END_CHUNKED_MSG_TYPE) */ {
               MsgDestreamer md = obtainMsgDestreamer(msgId, remoteVersion);
               this.owner.getConduit().stats.incMessagesBeingReceived(md.size() == 0, len);
               try {

http://git-wip-us.apache.org/repos/asf/geode/blob/f8786f53/geode-core/src/main/java/org/apache/geode/internal/util/IOUtils.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/util/IOUtils.java b/geode-core/src/main/java/org/apache/geode/internal/util/IOUtils.java
index 031f827..80b16fc 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/util/IOUtils.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/util/IOUtils.java
@@ -30,8 +30,7 @@ import java.io.ObjectStreamClass;
 
 /**
  * Reusable Input/Output operation utility methods.
- * <p/>
- * 
+ *
  * @since GemFire 6.6
  */
 @SuppressWarnings("unused")
@@ -44,8 +43,7 @@ public abstract class IOUtils {
    * File.separator character. If the pathname is unspecified (null, empty or blank) then path
    * elements are considered relative to file system root, beginning with File.separator. If array
    * of path elements are null, then the pathname is returned as is.
-   * </p>
-   * 
+   *
    * @param pathname a String value indicating the base pathname.
    * @param pathElements the path elements to append to pathname.
    * @return the path elements appended to the pathname.

http://git-wip-us.apache.org/repos/asf/geode/blob/f8786f53/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/MessageJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/MessageJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/MessageJUnitTest.java
index 86fcbce..b2d903c 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/MessageJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/MessageJUnitTest.java
@@ -32,53 +32,49 @@ import org.apache.geode.test.junit.categories.UnitTest;
 public class MessageJUnitTest {
 
   private Message message;
-  private Socket mockSocket;
-  private MessageStats mockStats;
-  private ByteBuffer msgBuffer;
-  private ServerConnection mockServerConnection;
 
   @Before
   public void setUp() throws Exception {
-    mockSocket = mock(Socket.class);
-    message = new Message(2, Version.CURRENT);
-    assertEquals(2, message.getNumberOfParts());
-    mockStats = mock(MessageStats.class);
-    msgBuffer = ByteBuffer.allocate(1000);
-    mockServerConnection = mock(ServerConnection.class);
-    message.setComms(mockServerConnection, mockSocket, msgBuffer, mockStats);
+    Socket mockSocket = mock(Socket.class);
+    this.message = new Message(2, Version.CURRENT);
+    assertEquals(2, this.message.getNumberOfParts());
+    MessageStats mockStats = mock(MessageStats.class);
+    ByteBuffer msgBuffer = ByteBuffer.allocate(1000);
+    ServerConnection mockServerConnection = mock(ServerConnection.class);
+    this.message.setComms(mockServerConnection, mockSocket, msgBuffer, mockStats);
   }
 
   @Test
   public void clearDoesNotThrowNPE() throws Exception {
     // unsetComms clears the message's ByteBuffer, which was causing an NPE during shutdown
     // when clear() was invoked
-    message.unsetComms();
-    message.clear();
+    this.message.unsetComms();
+    this.message.clear();
   }
 
   @Test
   public void numberOfPartsIsAdjusted() {
-    int numParts = message.getNumberOfParts();
-    message.setNumberOfParts(2 * numParts + 1);
-    assertEquals(2 * numParts + 1, message.getNumberOfParts());
-    message.addBytesPart(new byte[1]);
-    message.addIntPart(2);
-    message.addLongPart(3);
-    message.addObjPart("4");
-    message.addStringPart("5");
-    assertEquals(5, message.getNextPartNumber());
+    int numParts = this.message.getNumberOfParts();
+    this.message.setNumberOfParts(2 * numParts + 1);
+    assertEquals(2 * numParts + 1, this.message.getNumberOfParts());
+    this.message.addBytesPart(new byte[1]);
+    this.message.addIntPart(2);
+    this.message.addLongPart(3);
+    this.message.addObjPart("4");
+    this.message.addStringPart("5");
+    assertEquals(5, this.message.getNextPartNumber());
   }
 
   @Test
   public void messageLongerThanMaxIntIsRejected() throws Exception {
-    Part[] parts = new Part[2];
     Part mockPart1 = mock(Part.class);
     when(mockPart1.getLength()).thenReturn(Integer.MAX_VALUE / 2);
+    Part[] parts = new Part[2];
     parts[0] = mockPart1;
     parts[1] = mockPart1;
-    message.setParts(parts);
+    this.message.setParts(parts);
     try {
-      message.send();
+      this.message.send();
       fail("expected an exception but none was thrown");
     } catch (MessageTooLargeException e) {
       assertTrue(e.getMessage().contains("exceeds maximum integer value"));
@@ -87,14 +83,14 @@ public class MessageJUnitTest {
 
   @Test
   public void maxMessageSizeIsRespected() throws Exception {
-    Part[] parts = new Part[2];
     Part mockPart1 = mock(Part.class);
-    when(mockPart1.getLength()).thenReturn(Message.MAX_MESSAGE_SIZE / 2);
+    when(mockPart1.getLength()).thenReturn(Message.DEFAULT_MAX_MESSAGE_SIZE / 2);
+    Part[] parts = new Part[2];
     parts[0] = mockPart1;
     parts[1] = mockPart1;
-    message.setParts(parts);
+    this.message.setParts(parts);
     try {
-      message.send();
+      this.message.send();
       fail("expected an exception but none was thrown");
     } catch (MessageTooLargeException e) {
       assertFalse(e.getMessage().contains("exceeds maximum integer value"));
@@ -103,21 +99,17 @@ public class MessageJUnitTest {
 
   /**
    * geode-1468: Message should clear the chunks in its Parts when performing cleanup.
-   * 
-   * @throws Exception
    */
   @Test
   public void streamBuffersAreClearedDuringCleanup() throws Exception {
-    Part[] parts = new Part[2];
     Part mockPart1 = mock(Part.class);
     when(mockPart1.getLength()).thenReturn(100);
+    Part[] parts = new Part[2];
     parts[0] = mockPart1;
     parts[1] = mockPart1;
-    message.setParts(parts);
-    message.clearParts();
+    this.message.setParts(parts);
+    this.message.clearParts();
     verify(mockPart1, times(2)).clear();
   }
 
-  // TODO many more tests are needed
-
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/f8786f53/geode-core/src/test/java/org/apache/geode/test/dunit/internal/JUnit4DistributedTestCase.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/internal/JUnit4DistributedTestCase.java b/geode-core/src/test/java/org/apache/geode/test/dunit/internal/JUnit4DistributedTestCase.java
index 5a679bb..110d649 100644
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/internal/JUnit4DistributedTestCase.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/internal/JUnit4DistributedTestCase.java
@@ -600,11 +600,11 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
     RegionTestCase.preSnapshotRegion = null;
     SocketCreator.resetHostNameCache();
     SocketCreator.resolve_dns = true;
-    Message.MAX_MESSAGE_SIZE = Message.DEFAULT_MAX_MESSAGE_SIZE;
 
     // clear system properties -- keep alphabetized
     System.clearProperty(DistributionConfig.GEMFIRE_PREFIX + "log-level");
     System.clearProperty("jgroups.resolve_dns");
+    System.clearProperty(Message.MAX_MESSAGE_SIZE_PROPERTY);
 
     if (InternalDistributedSystem.systemAttemptingReconnect != null) {
       InternalDistributedSystem.systemAttemptingReconnect.stopReconnecting();

http://git-wip-us.apache.org/repos/asf/geode/blob/f8786f53/geode-wan/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderOperationsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-wan/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderOperationsDUnitTest.java b/geode-wan/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderOperationsDUnitTest.java
index f403447..8cedbf0 100644
--- a/geode-wan/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderOperationsDUnitTest.java
+++ b/geode-wan/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderOperationsDUnitTest.java
@@ -14,8 +14,10 @@
  */
 package org.apache.geode.internal.cache.wan.parallel;
 
+import static org.apache.geode.internal.cache.tier.sockets.Message.MAX_MESSAGE_SIZE_PROPERTY;
 import static org.apache.geode.test.dunit.Assert.*;
 
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -31,8 +33,8 @@ import org.apache.geode.test.dunit.IgnoredException;
 import org.apache.geode.test.dunit.LogWriterUtils;
 import org.apache.geode.test.dunit.RMIException;
 import org.apache.geode.test.dunit.Wait;
+import org.apache.geode.test.dunit.rules.DistributedRestoreSystemProperties;
 import org.apache.geode.test.junit.categories.DistributedTest;
-import org.apache.geode.test.junit.categories.FlakyTest;
 
 /**
  * DUnit test for operations on ParallelGatewaySender
@@ -40,6 +42,9 @@ import org.apache.geode.test.junit.categories.FlakyTest;
 @Category(DistributedTest.class)
 public class ParallelGatewaySenderOperationsDUnitTest extends WANTestBase {
 
+  @Rule
+  public DistributedRestoreSystemProperties restoreSystemProperties = new DistributedRestoreSystemProperties();
+
   @Override
   protected final void postSetUpWANTestBase() throws Exception {
     IgnoredException.addIgnoredException("Broken pipe||Unexpected IOException");
@@ -582,13 +587,14 @@ public class ParallelGatewaySenderOperationsDUnitTest extends WANTestBase {
 
   @Test
   public void testParallelGatewaySenderMessageTooLargeException() {
+    vm4.invoke(() -> System.setProperty(MAX_MESSAGE_SIZE_PROPERTY, String.valueOf(1024 * 1024)));
+
     Integer[] locatorPorts = createLNAndNYLocators();
     Integer lnPort = locatorPorts[0];
     Integer nyPort = locatorPorts[1];
 
     // Create and start sender with reduced maximum message size and 1 dispatcher thread
     String regionName = getTestMethodName() + "_PR";
-    vm4.invoke(() -> setMaximumMessageSize(1024 * 1024));
     vm4.invoke(() -> createCache(lnPort));
     vm4.invoke(() -> setNumDispatcherThreadsForTheRun(1));
     vm4.invoke(() -> createSender("ln", 2, true, 100, 100, false, false, null, false));
@@ -617,12 +623,6 @@ public class ParallelGatewaySenderOperationsDUnitTest extends WANTestBase {
     ignoredGIOE.remove();
   }
 
-  private void setMaximumMessageSize(int maximumMessageSizeBytes) {
-    Message.MAX_MESSAGE_SIZE = maximumMessageSizeBytes;
-    LogWriterUtils.getLogWriter().info("Set gemfire.client.max-message-size: "
-        + System.getProperty(DistributionConfig.GEMFIRE_PREFIX + "client.max-message-size"));
-  }
-
   private void createSendersReceiversAndPartitionedRegion(Integer lnPort, Integer nyPort,
       boolean createAccessors, boolean startSenders) {
     // Note: This is a test-specific method used by several test to create


[19/43] geode git commit: Cleanup BaseCommand

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RollbackCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RollbackCommand.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RollbackCommand.java
index a579170..cd12ea7 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RollbackCommand.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RollbackCommand.java
@@ -39,18 +39,18 @@ public class RollbackCommand extends BaseCommand {
   private RollbackCommand() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException, InterruptedException {
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    TXManagerImpl txMgr = (TXManagerImpl) servConn.getCache().getCacheTransactionManager();
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    TXManagerImpl txMgr = (TXManagerImpl) serverConnection.getCache().getCacheTransactionManager();
     InternalDistributedMember client =
-        (InternalDistributedMember) servConn.getProxyID().getDistributedMember();
-    int uniqId = msg.getTransactionId();
+        (InternalDistributedMember) serverConnection.getProxyID().getDistributedMember();
+    int uniqId = clientMessage.getTransactionId();
     TXId txId = new TXId(client, uniqId);
     if (txMgr.isHostedTxRecentlyCompleted(txId)) {
       if (logger.isDebugEnabled()) {
         logger.debug("TX: found a recently rolled back tx: {}", txId);
-        sendRollbackReply(msg, servConn);
+        sendRollbackReply(clientMessage, serverConnection);
         txMgr.removeHostedTXState(txId);
         return;
       }
@@ -60,16 +60,16 @@ public class RollbackCommand extends BaseCommand {
       if (txState != null) {
         txId = txState.getTxId();
         txMgr.rollback();
-        sendRollbackReply(msg, servConn);
+        sendRollbackReply(clientMessage, serverConnection);
       } else {
         // could not find TxState in the host server.
         // Protect against a failover command received so late,
         // and it is removed from the failoverMap due to capacity.
-        sendRollbackReply(msg, servConn);
+        sendRollbackReply(clientMessage, serverConnection);
       }
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
     } finally {
       if (logger.isDebugEnabled()) {
         logger.debug("TX: removing tx state for {}", txId);

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Size.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Size.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Size.java
index c78f4d9..42e14a3 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Size.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Size.java
@@ -56,18 +56,18 @@ public class Size extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
     StringBuilder errMessage = new StringBuilder();
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    CacheServerStats stats = servConn.getCacheServerStats();
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
 
     long oldStart = start;
     start = DistributionStats.getStatTime();
     stats.incReadSizeRequestTime(start - oldStart);
     // Retrieve the data from the message parts
-    Part regionNamePart = msg.getPart(0);
+    Part regionNamePart = clientMessage.getPart(0);
     String regionName = regionNamePart.getString();
 
     if (regionName == null) {
@@ -76,8 +76,8 @@ public class Size extends BaseCommand {
       errMessage
           .append(LocalizedStrings.BaseCommand__THE_INPUT_REGION_NAME_FOR_THE_0_REQUEST_IS_NULL
               .toLocalizedString("size"));
-      writeErrorResponse(msg, MessageType.SIZE_ERROR, errMessage.toString(), servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.SIZE_ERROR, errMessage.toString(), serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -85,38 +85,38 @@ public class Size extends BaseCommand {
     if (region == null) {
       String reason = LocalizedStrings.BaseCommand__0_WAS_NOT_FOUND_DURING_1_REQUEST
           .toLocalizedString(regionName, "size");
-      writeRegionDestroyedEx(msg, regionName, reason, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     // Size the entry
     try {
       this.securityService.authorizeRegionRead(regionName);
-      writeSizeResponse(region.size(), msg, servConn);
+      writeSizeResponse(region.size(), clientMessage, serverConnection);
     } catch (RegionDestroyedException rde) {
-      writeException(msg, rde, false, servConn);
+      writeException(clientMessage, rde, false, serverConnection);
     } catch (Exception e) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, e);
+      checkForInterrupt(serverConnection, e);
 
       // If an exception occurs during the destroy, preserve the connection
-      writeException(msg, e, false, servConn);
+      writeException(clientMessage, e, false, serverConnection);
       if (e instanceof GemFireSecurityException) {
         // Fine logging for security exceptions since these are already
         // logged by the security logger
         if (logger.isDebugEnabled()) {
-          logger.debug("{}: Unexpected Security exception", servConn.getName(), e);
+          logger.debug("{}: Unexpected Security exception", serverConnection.getName(), e);
         }
       } else {
         logger.warn(LocalizedMessage.create(LocalizedStrings.BaseCommand_0_UNEXPECTED_EXCEPTION,
-            servConn.getName()), e);
+            serverConnection.getName()), e);
       }
     } finally {
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Sent size response for region {}", servConn.getName(), regionName);
+        logger.debug("{}: Sent size response for region {}", serverConnection.getName(), regionName);
       }
-      servConn.setAsTrue(RESPONDED);
+      serverConnection.setAsTrue(RESPONDED);
       stats.incWriteSizeResponseTime(DistributionStats.getStatTime() - start);
     }
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/TXFailoverCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/TXFailoverCommand.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/TXFailoverCommand.java
index 72eab50..9fc3fd1 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/TXFailoverCommand.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/TXFailoverCommand.java
@@ -49,23 +49,23 @@ public class TXFailoverCommand extends BaseCommand {
   private TXFailoverCommand() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException, InterruptedException {
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
     // Build the TXId for the transaction
     InternalDistributedMember client =
-        (InternalDistributedMember) servConn.getProxyID().getDistributedMember();
-    int uniqId = msg.getTransactionId();
+        (InternalDistributedMember) serverConnection.getProxyID().getDistributedMember();
+    int uniqId = clientMessage.getTransactionId();
     if (logger.isDebugEnabled()) {
       logger.debug("TX: Transaction {} from {} is failing over to this server", uniqId, client);
     }
     TXId txId = new TXId(client, uniqId);
-    TXManagerImpl mgr = (TXManagerImpl) servConn.getCache().getCacheTransactionManager();
+    TXManagerImpl mgr = (TXManagerImpl) serverConnection.getCache().getCacheTransactionManager();
     mgr.waitForCompletingTransaction(txId); // in case it's already completing here in another
                                             // thread
     if (mgr.isHostedTxRecentlyCompleted(txId)) {
-      writeReply(msg, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeReply(clientMessage, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       mgr.removeHostedTXState(txId);
       return;
     }
@@ -75,7 +75,7 @@ public class TXFailoverCommand extends BaseCommand {
     if (!tx.isRealDealLocal()) {
       // send message to all peers to find out who hosts the transaction
       FindRemoteTXMessageReplyProcessor processor =
-          FindRemoteTXMessage.send(servConn.getCache(), txId);
+          FindRemoteTXMessage.send(serverConnection.getCache(), txId);
       try {
         processor.waitForRepliesUninterruptibly();
       } catch (ReplyException e) {
@@ -96,7 +96,7 @@ public class TXFailoverCommand extends BaseCommand {
         // bug #42228 and bug #43504 - this cannot return until the current view
         // has been installed by all members, so that dlocks are released and
         // the same keys can be used in a new transaction by the same client thread
-        InternalCache cache = servConn.getCache();
+        InternalCache cache = serverConnection.getCache();
         try {
           WaitForViewInstallation.send((DistributionManager) cache.getDistributionManager());
         } catch (InterruptedException e) {
@@ -110,9 +110,9 @@ public class TXFailoverCommand extends BaseCommand {
           }
           mgr.saveTXCommitMessageForClientFailover(txId, processor.getTxCommitMessage());
         } else {
-          writeException(msg, new TransactionDataNodeHasDepartedException(
-              "Could not find transaction host for " + txId), false, servConn);
-          servConn.setAsTrue(RESPONDED);
+          writeException(clientMessage, new TransactionDataNodeHasDepartedException(
+              "Could not find transaction host for " + txId), false, serverConnection);
+          serverConnection.setAsTrue(RESPONDED);
           mgr.removeHostedTXState(txId);
           return;
         }
@@ -121,8 +121,8 @@ public class TXFailoverCommand extends BaseCommand {
     if (!wasInProgress) {
       mgr.setInProgress(false);
     }
-    writeReply(msg, servConn);
-    servConn.setAsTrue(RESPONDED);
+    writeReply(clientMessage, serverConnection);
+    serverConnection.setAsTrue(RESPONDED);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/TXSynchronizationCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/TXSynchronizationCommand.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/TXSynchronizationCommand.java
index 8cedd2c..c5b9fc5 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/TXSynchronizationCommand.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/TXSynchronizationCommand.java
@@ -15,7 +15,6 @@
 
 package org.apache.geode.internal.cache.tier.sockets.command;
 
-import org.apache.geode.cache.SynchronizationCommitConflictException;
 import org.apache.geode.cache.client.internal.TXSynchronizationOp.CompletionType;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.distributed.internal.ReplyException;
@@ -54,7 +53,7 @@ public class TXSynchronizationCommand extends BaseCommand {
    * org.apache.geode.internal.cache.tier.sockets.ServerConnection)
    */
   @Override
-  protected boolean shouldMasqueradeForTx(Message msg, ServerConnection servConn) {
+  protected boolean shouldMasqueradeForTx(Message clientMessage, ServerConnection serverConnection) {
     // masquerading is done in the waiting thread pool
     return false;
   }
@@ -68,26 +67,26 @@ public class TXSynchronizationCommand extends BaseCommand {
    * long)
    */
   @Override
-  public void cmdExecute(final Message msg, final ServerConnection servConn, long start)
+  public void cmdExecute(final Message clientMessage, final ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException, InterruptedException {
 
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
 
-    CompletionType type = CompletionType.values()[msg.getPart(0).getInt()];
-    /* int txIdInt = */ msg.getPart(1).getInt(); // [bruce] not sure if we need to transmit this
+    CompletionType type = CompletionType.values()[clientMessage.getPart(0).getInt()];
+    /* int txIdInt = */ clientMessage.getPart(1).getInt(); // [bruce] not sure if we need to transmit this
     final Part statusPart;
     if (type == CompletionType.AFTER_COMPLETION) {
-      statusPart = msg.getPart(2);
+      statusPart = clientMessage.getPart(2);
     } else {
       statusPart = null;
     }
 
-    final TXManagerImpl txMgr = (TXManagerImpl) servConn.getCache().getCacheTransactionManager();
+    final TXManagerImpl txMgr = (TXManagerImpl) serverConnection.getCache().getCacheTransactionManager();
     final InternalDistributedMember member =
-        (InternalDistributedMember) servConn.getProxyID().getDistributedMember();
+        (InternalDistributedMember) serverConnection.getProxyID().getDistributedMember();
 
     // get the tx state without associating it with this thread. That's done later
-    final TXStateProxy txProxy = txMgr.masqueradeAs(msg, member, true);
+    final TXStateProxy txProxy = txMgr.masqueradeAs(clientMessage, member, true);
 
     // we have to run beforeCompletion and afterCompletion in the same thread
     // because beforeCompletion obtains locks for the thread and afterCompletion
@@ -102,21 +101,21 @@ public class TXSynchronizationCommand extends BaseCommand {
               TXStateProxy txState = null;
               Throwable failureException = null;
               try {
-                txState = txMgr.masqueradeAs(msg, member, false);
+                txState = txMgr.masqueradeAs(clientMessage, member, false);
                 if (isDebugEnabled) {
                   logger.debug("Executing beforeCompletion() notification for transaction {}",
-                      msg.getTransactionId());
+                      clientMessage.getTransactionId());
                 }
                 txState.setIsJTA(true);
                 txState.beforeCompletion();
                 try {
-                  writeReply(msg, servConn);
+                  writeReply(clientMessage, serverConnection);
                 } catch (IOException e) {
                   if (isDebugEnabled) {
                     logger.debug("Problem writing reply to client", e);
                   }
                 }
-                servConn.setAsTrue(RESPONDED);
+                serverConnection.setAsTrue(RESPONDED);
               } catch (ReplyException e) {
                 failureException = e.getCause();
               } catch (InterruptedException e) {
@@ -128,13 +127,13 @@ public class TXSynchronizationCommand extends BaseCommand {
               }
               if (failureException != null) {
                 try {
-                  writeException(msg, failureException, false, servConn);
+                  writeException(clientMessage, failureException, false, serverConnection);
                 } catch (IOException ioe) {
                   if (isDebugEnabled) {
                     logger.debug("Problem writing reply to client", ioe);
                   }
                 }
-                servConn.setAsTrue(RESPONDED);
+                serverConnection.setAsTrue(RESPONDED);
               }
             }
           };
@@ -150,11 +149,11 @@ public class TXSynchronizationCommand extends BaseCommand {
             public void run() {
               TXStateProxy txState = null;
               try {
-                txState = txMgr.masqueradeAs(msg, member, false);
+                txState = txMgr.masqueradeAs(clientMessage, member, false);
                 int status = statusPart.getInt();
                 if (isDebugEnabled) {
                   logger.debug("Executing afterCompletion({}) notification for transaction {}",
-                      status, msg.getTransactionId());
+                      status, clientMessage.getTransactionId());
                 }
                 txState.setIsJTA(true);
                 txState.afterCompletion(status);
@@ -162,7 +161,7 @@ public class TXSynchronizationCommand extends BaseCommand {
                 // where it can be applied to the local cache
                 TXCommitMessage cmsg = txState.getCommitMessage();
                 try {
-                  CommitCommand.writeCommitResponse(cmsg, msg, servConn);
+                  CommitCommand.writeCommitResponse(cmsg, clientMessage, serverConnection);
                   txMgr.removeHostedTXState(txState.getTxId());
                 } catch (IOException e) {
                   // not much can be done here
@@ -170,16 +169,16 @@ public class TXSynchronizationCommand extends BaseCommand {
                     logger.warn("Problem writing reply to client", e);
                   }
                 }
-                servConn.setAsTrue(RESPONDED);
+                serverConnection.setAsTrue(RESPONDED);
               } catch (RuntimeException e) {
                 try {
-                  writeException(msg, e, false, servConn);
+                  writeException(clientMessage, e, false, serverConnection);
                 } catch (IOException ioe) {
                   if (isDebugEnabled) {
                     logger.debug("Problem writing reply to client", ioe);
                   }
                 }
-                servConn.setAsTrue(RESPONDED);
+                serverConnection.setAsTrue(RESPONDED);
               } catch (InterruptedException e) {
                 Thread.currentThread().interrupt();
               } finally {
@@ -195,12 +194,12 @@ public class TXSynchronizationCommand extends BaseCommand {
             sync.runSecondRunnable(afterCompletion);
           } else {
             if (statusPart.getInt() == Status.STATUS_COMMITTED) {
-              TXStateProxy txState = txMgr.masqueradeAs(msg, member, false);
+              TXStateProxy txState = txMgr.masqueradeAs(clientMessage, member, false);
               try {
                 if (isDebugEnabled) {
                   logger.debug(
                       "Executing beforeCompletion() notification for transaction {} after failover",
-                      msg.getTransactionId());
+                      clientMessage.getTransactionId());
                 }
                 txState.setIsJTA(true);
                 txState.beforeCompletion();
@@ -212,8 +211,8 @@ public class TXSynchronizationCommand extends BaseCommand {
           }
         }
       } catch (Exception e) {
-        writeException(msg, MessageType.EXCEPTION, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeException(clientMessage, MessageType.EXCEPTION, e, false, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
       }
       if (isDebugEnabled) {
         logger.debug("Sent tx synchronization response");

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UnregisterInterest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UnregisterInterest.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UnregisterInterest.java
index 7dbb78f..597f92b 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UnregisterInterest.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UnregisterInterest.java
@@ -45,43 +45,43 @@ public class UnregisterInterest extends BaseCommand {
   UnregisterInterest() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws ClassNotFoundException, IOException {
     Part regionNamePart = null, keyPart = null;
     String regionName = null;
     Object key = null;
     int interestType = 0;
     StringId errMessage = null;
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
 
-    regionNamePart = msg.getPart(0);
-    interestType = msg.getPart(1).getInt();
-    keyPart = msg.getPart(2);
-    Part isClosingPart = msg.getPart(3);
+    regionNamePart = clientMessage.getPart(0);
+    interestType = clientMessage.getPart(1).getInt();
+    keyPart = clientMessage.getPart(2);
+    Part isClosingPart = clientMessage.getPart(3);
     byte[] isClosingPartBytes = (byte[]) isClosingPart.getObject();
     boolean isClosing = isClosingPartBytes[0] == 0x01;
     regionName = regionNamePart.getString();
     try {
       key = keyPart.getStringOrObject();
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     boolean keepalive = false;
     try {
-      Part keepalivePart = msg.getPart(4);
+      Part keepalivePart = clientMessage.getPart(4);
       byte[] keepaliveBytes = (byte[]) keepalivePart.getObject();
       keepalive = keepaliveBytes[0] != 0x00;
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     if (logger.isDebugEnabled()) {
       logger.debug(
           "{}: Received unregister interest request ({} bytes) from {} for region {} key {}",
-          servConn.getName(), msg.getPayloadLength(), servConn.getSocketString(), regionName, key);
+          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName, key);
     }
 
     // Process the unregister interest request
@@ -95,9 +95,9 @@ public class UnregisterInterest extends BaseCommand {
       errMessage =
           LocalizedStrings.UnRegisterInterest_THE_INPUT_REGION_NAME_FOR_THE_UNREGISTER_INTEREST_REQUEST_IS_NULL;
       String s = errMessage.toLocalizedString();
-      logger.warn("{}: {}", servConn.getName(), s);
-      writeErrorResponse(msg, MessageType.UNREGISTER_INTEREST_DATA_ERROR, s, servConn);
-      servConn.setAsTrue(RESPONDED);
+      logger.warn("{}: {}", serverConnection.getName(), s);
+      writeErrorResponse(clientMessage, MessageType.UNREGISTER_INTEREST_DATA_ERROR, s, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -108,12 +108,12 @@ public class UnregisterInterest extends BaseCommand {
         this.securityService.authorizeRegionRead(regionName, key.toString());
       }
     } catch (NotAuthorizedException ex) {
-      writeException(msg, ex, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, ex, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+    AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
     if (authzRequest != null) {
       if (!DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
         try {
@@ -121,8 +121,8 @@ public class UnregisterInterest extends BaseCommand {
               authzRequest.unregisterInterestAuthorize(regionName, key, interestType);
           key = unregisterContext.getKey();
         } catch (NotAuthorizedException ex) {
-          writeException(msg, ex, false, servConn);
-          servConn.setAsTrue(RESPONDED);
+          writeException(clientMessage, ex, false, serverConnection);
+          serverConnection.setAsTrue(RESPONDED);
           return;
         }
       }
@@ -141,17 +141,17 @@ public class UnregisterInterest extends BaseCommand {
      */
     // Unregister interest irrelevent of whether the region is present it or
     // not
-    servConn.getAcceptor().getCacheClientNotifier().unregisterClientInterest(regionName, key,
-        interestType, isClosing, servConn.getProxyID(), keepalive);
+    serverConnection.getAcceptor().getCacheClientNotifier().unregisterClientInterest(regionName, key,
+        interestType, isClosing, serverConnection.getProxyID(), keepalive);
 
     // Update the statistics and write the reply
     // bserverStats.incLong(processDestroyTimeId,
     // DistributionStats.getStatTime() - start);
     // start = DistributionStats.getStatTime();
-    writeReply(msg, servConn);
-    servConn.setAsTrue(RESPONDED);
+    writeReply(clientMessage, serverConnection);
+    serverConnection.setAsTrue(RESPONDED);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sent unregister interest response for region {} key {}", servConn.getName(),
+      logger.debug("{}: Sent unregister interest response for region {} key {}", serverConnection.getName(),
           regionName, key);
     }
     // bserverStats.incLong(writeDestroyResponseTimeId,

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UnregisterInterestList.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UnregisterInterestList.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UnregisterInterestList.java
index 7369587..76cbba2 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UnregisterInterestList.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UnregisterInterestList.java
@@ -46,48 +46,48 @@ public class UnregisterInterestList extends BaseCommand {
   private UnregisterInterestList() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException {
     Part regionNamePart = null, keyPart = null, numberOfKeysPart = null;
     String regionName = null;
     Object key = null;
     List keys = null;
     int numberOfKeys = 0, partNumber = 0;
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
 
     // bserverStats.incLong(readDestroyRequestTimeId,
     // DistributionStats.getStatTime() - start);
     // bserverStats.incInt(destroyRequestsId, 1);
     // start = DistributionStats.getStatTime();
     // Retrieve the data from the message parts
-    regionNamePart = msg.getPart(0);
+    regionNamePart = clientMessage.getPart(0);
     regionName = regionNamePart.getString();
 
-    Part isClosingListPart = msg.getPart(1);
+    Part isClosingListPart = clientMessage.getPart(1);
     byte[] isClosingListPartBytes = (byte[]) isClosingListPart.getObject();
     boolean isClosingList = isClosingListPartBytes[0] == 0x01;
     boolean keepalive = false;
     try {
-      Part keepalivePart = msg.getPart(2);
+      Part keepalivePart = clientMessage.getPart(2);
       byte[] keepalivePartBytes = (byte[]) keepalivePart.getObject();
       keepalive = keepalivePartBytes[0] == 0x01;
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
-    numberOfKeysPart = msg.getPart(3);
+    numberOfKeysPart = clientMessage.getPart(3);
     numberOfKeys = numberOfKeysPart.getInt();
 
     partNumber = 4;
     keys = new ArrayList();
     for (int i = 0; i < numberOfKeys; i++) {
-      keyPart = msg.getPart(partNumber + i);
+      keyPart = clientMessage.getPart(partNumber + i);
       try {
         key = keyPart.getStringOrObject();
       } catch (Exception e) {
-        writeException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeException(clientMessage, e, false, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
       keys.add(key);
@@ -95,7 +95,7 @@ public class UnregisterInterestList extends BaseCommand {
     if (logger.isDebugEnabled()) {
       logger.debug(
           "{}: Received unregister interest request ({} bytes) from {} for the following {} keys in region {}: {}",
-          servConn.getName(), msg.getPayloadLength(), servConn.getSocketString(), numberOfKeys,
+          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), numberOfKeys,
           regionName, keys);
     }
 
@@ -113,22 +113,22 @@ public class UnregisterInterestList extends BaseCommand {
             LocalizedStrings.UnRegisterInterest_THE_INPUT_REGION_NAME_FOR_THE_UNREGISTER_INTEREST_REQUEST_IS_NULL;
       }
       String s = errMessage.toLocalizedString();
-      logger.warn("{}: {}", servConn.getName(), s);
-      writeErrorResponse(msg, MessageType.UNREGISTER_INTEREST_DATA_ERROR, s, servConn);
-      servConn.setAsTrue(RESPONDED);
+      logger.warn("{}: {}", serverConnection.getName(), s);
+      writeErrorResponse(clientMessage, MessageType.UNREGISTER_INTEREST_DATA_ERROR, s, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     try {
       this.securityService.authorizeRegionRead(regionName);
     } catch (NotAuthorizedException ex) {
-      writeException(msg, ex, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, ex, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
 
-    AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+    AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
     if (authzRequest != null) {
       if (!DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
         try {
@@ -136,8 +136,8 @@ public class UnregisterInterestList extends BaseCommand {
               authzRequest.unregisterInterestListAuthorize(regionName, keys);
           keys = (List) unregisterContext.getKey();
         } catch (NotAuthorizedException ex) {
-          writeException(msg, ex, false, servConn);
-          servConn.setAsTrue(RESPONDED);
+          writeException(clientMessage, ex, false, serverConnection);
+          serverConnection.setAsTrue(RESPONDED);
           return;
         }
       }
@@ -155,20 +155,20 @@ public class UnregisterInterestList extends BaseCommand {
      * responded = true; } else {
      */
     // Register interest
-    servConn.getAcceptor().getCacheClientNotifier().unregisterClientInterest(regionName, keys,
-        isClosingList, servConn.getProxyID(), keepalive);
+    serverConnection.getAcceptor().getCacheClientNotifier().unregisterClientInterest(regionName, keys,
+        isClosingList, serverConnection.getProxyID(), keepalive);
 
     // Update the statistics and write the reply
     // bserverStats.incLong(processDestroyTimeId,
     // DistributionStats.getStatTime() - start);
     // start = DistributionStats.getStatTime(); WHY ARE GETTING START AND NOT
     // USING IT?
-    writeReply(msg, servConn);
-    servConn.setAsTrue(RESPONDED);
+    writeReply(clientMessage, serverConnection);
+    serverConnection.setAsTrue(RESPONDED);
     if (logger.isDebugEnabled()) {
       logger.debug(
           "{}: Sent unregister interest response for the following {} keys in region {}: {}",
-          servConn.getName(), numberOfKeys, regionName, keys);
+          serverConnection.getName(), numberOfKeys, regionName, keys);
     }
     // bserverStats.incLong(writeDestroyResponseTimeId,
     // DistributionStats.getStatTime() - start);

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UpdateClientNotification.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UpdateClientNotification.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UpdateClientNotification.java
index 57aca22..b870a96 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UpdateClientNotification.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UpdateClientNotification.java
@@ -35,8 +35,8 @@ public class UpdateClientNotification extends BaseCommand {
   private UpdateClientNotification() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
-    CacheServerStats stats = servConn.getCacheServerStats();
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+    CacheServerStats stats = serverConnection.getCacheServerStats();
     {
       long oldStart = start;
       start = DistributionStats.getStatTime();

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CloseCQ.java
----------------------------------------------------------------------
diff --git a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CloseCQ.java b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CloseCQ.java
index ac9b5da..72719b2 100644
--- a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CloseCQ.java
+++ b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CloseCQ.java
@@ -44,30 +44,30 @@ public class CloseCQ extends BaseCQCommand {
   private CloseCQ() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    ClientProxyMembershipID id = servConn.getProxyID();
-    CacheServerStats stats = servConn.getCacheServerStats();
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    ClientProxyMembershipID id = serverConnection.getProxyID();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
 
     // Based on MessageType.QUERY
     // Added by Rao 2/1/2007
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
 
     start = DistributionStats.getStatTime();
     // Retrieve the data from the message parts
-    String cqName = msg.getPart(0).getString();
+    String cqName = clientMessage.getPart(0).getString();
 
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Received close CQ request from {} cqName: {}", servConn.getName(),
-          servConn.getSocketString(), cqName);
+      logger.debug("{}: Received close CQ request from {} cqName: {}", serverConnection.getName(),
+          serverConnection.getSocketString(), cqName);
     }
 
     // Process the query request
     if (cqName == null) {
       String err =
           LocalizedStrings.CloseCQ_THE_CQNAME_FOR_THE_CQ_CLOSE_REQUEST_IS_NULL.toLocalizedString();
-      sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, msg.getTransactionId(), null, servConn);
+      sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, clientMessage.getTransactionId(), null, serverConnection);
       return;
     }
 
@@ -85,7 +85,7 @@ public class CloseCQ extends BaseCQCommand {
       }
       InternalCqQuery cqQuery = cqService.getCq(serverCqName);
 
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         String queryStr = null;
         Set cqRegionNames = null;
@@ -102,22 +102,22 @@ public class CloseCQ extends BaseCQCommand {
       // getMembershipID());
       cqService.closeCq(cqName, id);
       if (cqQuery != null)
-        servConn.removeCq(cqName, cqQuery.isDurable());
+        serverConnection.removeCq(cqName, cqQuery.isDurable());
     } catch (CqException cqe) {
-      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", msg.getTransactionId(), cqe, servConn);
+      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", clientMessage.getTransactionId(), cqe, serverConnection);
       return;
     } catch (Exception e) {
       String err =
           LocalizedStrings.CloseCQ_EXCEPTION_WHILE_CLOSING_CQ_CQNAME_0.toLocalizedString(cqName);
-      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, err, msg.getTransactionId(), e, servConn);
+      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, err, clientMessage.getTransactionId(), e, serverConnection);
       return;
     }
 
     // Send OK to client
     sendCqResponse(MessageType.REPLY,
-        LocalizedStrings.CloseCQ_CQ_CLOSED_SUCCESSFULLY.toLocalizedString(), msg.getTransactionId(),
-        null, servConn);
-    servConn.setAsTrue(RESPONDED);
+        LocalizedStrings.CloseCQ_CQ_CLOSED_SUCCESSFULLY.toLocalizedString(), clientMessage.getTransactionId(),
+        null, serverConnection);
+    serverConnection.setAsTrue(RESPONDED);
 
     {
       long oldStart = start;

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteCQ.java
----------------------------------------------------------------------
diff --git a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteCQ.java b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteCQ.java
index 9bddbc7..d2a4453 100644
--- a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteCQ.java
+++ b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteCQ.java
@@ -52,27 +52,27 @@ public class ExecuteCQ extends BaseCQCommand {
   private ExecuteCQ() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
-    AcceptorImpl acceptor = servConn.getAcceptor();
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    ClientProxyMembershipID id = servConn.getProxyID();
-    CacheServerStats stats = servConn.getCacheServerStats();
+    AcceptorImpl acceptor = serverConnection.getAcceptor();
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    ClientProxyMembershipID id = serverConnection.getProxyID();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
 
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
 
     // Retrieve the data from the message parts
-    String cqName = msg.getPart(0).getString();
-    String cqQueryString = msg.getPart(1).getString();
-    int cqState = msg.getPart(2).getInt();
+    String cqName = clientMessage.getPart(0).getString();
+    String cqQueryString = clientMessage.getPart(1).getString();
+    int cqState = clientMessage.getPart(2).getInt();
 
-    Part isDurablePart = msg.getPart(3);
+    Part isDurablePart = clientMessage.getPart(3);
     byte[] isDurableByte = isDurablePart.getSerializedForm();
     boolean isDurable = (isDurableByte == null || isDurableByte[0] == 0) ? false : true;
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Received {} request from {} CqName: {} queryString: {}", servConn.getName(),
-          MessageType.getString(msg.getMessageType()), servConn.getSocketString(), cqName,
+      logger.debug("{}: Received {} request from {} CqName: {} queryString: {}", serverConnection.getName(),
+          MessageType.getString(clientMessage.getMessageType()), serverConnection.getSocketString(), cqName,
           cqQueryString);
     }
 
@@ -87,7 +87,7 @@ public class ExecuteCQ extends BaseCQCommand {
       qService = (DefaultQueryService) crHelper.getCache().getLocalQueryService();
 
       // Authorization check
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         query = qService.newQuery(cqQueryString);
         cqRegionNames = ((DefaultQuery) query).getRegionsInQuery(null);
@@ -108,10 +108,10 @@ public class ExecuteCQ extends BaseCQCommand {
       cqQuery = cqServiceForExec.executeCq(cqName, cqQueryString, cqState, id,
           acceptor.getCacheClientNotifier(), isDurable, false, 0, null);
     } catch (CqException cqe) {
-      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", msg.getTransactionId(), cqe, servConn);
+      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", clientMessage.getTransactionId(), cqe, serverConnection);
       return;
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
+      writeChunkedException(clientMessage, e, serverConnection);
       return;
     }
 
@@ -119,7 +119,7 @@ public class ExecuteCQ extends BaseCQCommand {
     boolean sendResults = false;
     boolean successQuery = false;
 
-    if (msg.getMessageType() == MessageType.EXECUTECQ_WITH_IR_MSG_TYPE) {
+    if (clientMessage.getMessageType() == MessageType.EXECUTECQ_WITH_IR_MSG_TYPE) {
       sendResults = true;
     }
 
@@ -130,8 +130,8 @@ public class ExecuteCQ extends BaseCQCommand {
         cqRegionNames = ((DefaultQuery) query).getRegionsInQuery(null);
       }
       ((DefaultQuery) query).setIsCqQuery(true);
-      successQuery = processQuery(msg, query, cqQueryString, cqRegionNames, start, cqQuery,
-          executeCQContext, servConn, sendResults);
+      successQuery = processQuery(clientMessage, query, cqQueryString, cqRegionNames, start, cqQuery,
+          executeCQContext, serverConnection, sendResults);
 
       // Update the CQ statistics.
       cqQuery.getVsdStats().setCqInitialResultsTime((DistributionStats.getStatTime()) - oldstart);
@@ -153,12 +153,12 @@ public class ExecuteCQ extends BaseCQCommand {
       // Send OK to client
       sendCqResponse(MessageType.REPLY,
           LocalizedStrings.ExecuteCQ_CQ_CREATED_SUCCESSFULLY.toLocalizedString(),
-          msg.getTransactionId(), null, servConn);
+          clientMessage.getTransactionId(), null, serverConnection);
 
       long start2 = DistributionStats.getStatTime();
       stats.incProcessCreateCqTime(start2 - oldstart);
     }
-    servConn.setAsTrue(RESPONDED);
+    serverConnection.setAsTrue(RESPONDED);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteCQ61.java
----------------------------------------------------------------------
diff --git a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteCQ61.java b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteCQ61.java
index de61445..805ee48 100755
--- a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteCQ61.java
+++ b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteCQ61.java
@@ -60,30 +60,30 @@ public class ExecuteCQ61 extends BaseCQCommand {
   private ExecuteCQ61() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
-    AcceptorImpl acceptor = servConn.getAcceptor();
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    ClientProxyMembershipID id = servConn.getProxyID();
-    CacheServerStats stats = servConn.getCacheServerStats();
+    AcceptorImpl acceptor = serverConnection.getAcceptor();
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    ClientProxyMembershipID id = serverConnection.getProxyID();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
 
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
 
     // Retrieve the data from the message parts
-    String cqName = msg.getPart(0).getString();
-    String cqQueryString = msg.getPart(1).getString();
-    int cqState = msg.getPart(2).getInt();
+    String cqName = clientMessage.getPart(0).getString();
+    String cqQueryString = clientMessage.getPart(1).getString();
+    int cqState = clientMessage.getPart(2).getInt();
 
-    Part isDurablePart = msg.getPart(3);
+    Part isDurablePart = clientMessage.getPart(3);
     byte[] isDurableByte = isDurablePart.getSerializedForm();
     boolean isDurable = (isDurableByte == null || isDurableByte[0] == 0) ? false : true;
     // region data policy
-    Part regionDataPolicyPart = msg.getPart(msg.getNumberOfParts() - 1);
+    Part regionDataPolicyPart = clientMessage.getPart(clientMessage.getNumberOfParts() - 1);
     byte[] regionDataPolicyPartBytes = regionDataPolicyPart.getSerializedForm();
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Received {} request from {} CqName: {} queryString: {}", servConn.getName(),
-          MessageType.getString(msg.getMessageType()), servConn.getSocketString(), cqName,
+      logger.debug("{}: Received {} request from {} CqName: {} queryString: {}", serverConnection.getName(),
+          MessageType.getString(clientMessage.getMessageType()), serverConnection.getSocketString(), cqName,
           cqQueryString);
     }
 
@@ -96,8 +96,7 @@ public class ExecuteCQ61 extends BaseCQCommand {
         String err =
             LocalizedStrings.ExecuteCQ_SERVER_NOTIFYBYSUBSCRIPTION_MODE_IS_SET_TO_FALSE_CQ_EXECUTION_IS_NOT_SUPPORTED_IN_THIS_MODE
                 .toLocalizedString();
-        sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, msg.getTransactionId(), null,
-            servConn);
+        sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, clientMessage.getTransactionId(), null, serverConnection);
         return;
       }
     }
@@ -113,7 +112,7 @@ public class ExecuteCQ61 extends BaseCQCommand {
       qService = (DefaultQueryService) crHelper.getCache().getLocalQueryService();
 
       // Authorization check
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         query = qService.newQuery(cqQueryString);
         cqRegionNames = ((DefaultQuery) query).getRegionsInQuery(null);
@@ -141,16 +140,16 @@ public class ExecuteCQ61 extends BaseCQCommand {
       // registering cq with serverConnection so that when CCP will require auth info it can access
       // that
       // registering cq auth before as possibility that you may get event
-      servConn.setCq(cqName, isDurable);
+      serverConnection.setCq(cqName, isDurable);
       cqQuery = (ServerCQImpl) cqServiceForExec.executeCq(cqName, cqQueryString, cqState, id, ccn,
           isDurable, true, regionDataPolicyPartBytes[0], null);
     } catch (CqException cqe) {
-      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", msg.getTransactionId(), cqe, servConn);
-      servConn.removeCq(cqName, isDurable);
+      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", clientMessage.getTransactionId(), cqe, serverConnection);
+      serverConnection.removeCq(cqName, isDurable);
       return;
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.removeCq(cqName, isDurable);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.removeCq(cqName, isDurable);
       return;
     }
 
@@ -158,7 +157,7 @@ public class ExecuteCQ61 extends BaseCQCommand {
     boolean sendResults = false;
     boolean successQuery = false;
 
-    if (msg.getMessageType() == MessageType.EXECUTECQ_WITH_IR_MSG_TYPE) {
+    if (clientMessage.getMessageType() == MessageType.EXECUTECQ_WITH_IR_MSG_TYPE) {
       sendResults = true;
     }
 
@@ -173,8 +172,8 @@ public class ExecuteCQ61 extends BaseCQCommand {
           cqRegionNames = ((DefaultQuery) query).getRegionsInQuery(null);
         }
         ((DefaultQuery) query).setIsCqQuery(true);
-        successQuery = processQuery(msg, query, cqQueryString, cqRegionNames, start, cqQuery,
-            executeCQContext, servConn, sendResults);
+        successQuery = processQuery(clientMessage, query, cqQueryString, cqRegionNames, start, cqQuery,
+            executeCQContext, serverConnection, sendResults);
 
 
         // Update the CQ statistics.
@@ -203,12 +202,12 @@ public class ExecuteCQ61 extends BaseCQCommand {
       // Send OK to client
       sendCqResponse(MessageType.REPLY,
           LocalizedStrings.ExecuteCQ_CQ_CREATED_SUCCESSFULLY.toLocalizedString(),
-          msg.getTransactionId(), null, servConn);
+          clientMessage.getTransactionId(), null, serverConnection);
 
       long start2 = DistributionStats.getStatTime();
       stats.incProcessCreateCqTime(start2 - oldstart);
     }
-    servConn.setAsTrue(RESPONDED);
+    serverConnection.setAsTrue(RESPONDED);
 
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetCQStats.java
----------------------------------------------------------------------
diff --git a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetCQStats.java b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetCQStats.java
index 69be347..b1faeee 100644
--- a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetCQStats.java
+++ b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetCQStats.java
@@ -36,32 +36,32 @@ public class GetCQStats extends BaseCQCommand {
   private GetCQStats() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
 
-    CacheServerStats stats = servConn.getCacheServerStats();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
 
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
 
     final boolean isDebugEnabled = logger.isDebugEnabled();
     if (isDebugEnabled) {
-      logger.debug("{}: Received close all client CQs request from {}", servConn.getName(),
-          servConn.getSocketString());
+      logger.debug("{}: Received close all client CQs request from {}", serverConnection.getName(),
+          serverConnection.getSocketString());
     }
 
     // Retrieve the data from the message parts
-    String cqName = msg.getPart(0).getString();
+    String cqName = clientMessage.getPart(0).getString();
 
     if (isDebugEnabled) {
-      logger.debug("{}: Received close CQ request from {} cqName: {}", servConn.getName(),
-          servConn.getSocketString(), cqName);
+      logger.debug("{}: Received close CQ request from {} cqName: {}", serverConnection.getName(),
+          serverConnection.getSocketString(), cqName);
     }
 
     // Process the query request
     if (cqName == null) {
       String err = "The cqName for the cq stats request is null";
-      sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, msg.getTransactionId(), null, servConn);
+      sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, clientMessage.getTransactionId(), null, serverConnection);
       return;
     }
 
@@ -74,13 +74,12 @@ public class GetCQStats extends BaseCQCommand {
       cqService.start();
     } catch (Exception e) {
       String err = "Exception while Getting the CQ Statistics. ";
-      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, err, msg.getTransactionId(), e, servConn);
+      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, err, clientMessage.getTransactionId(), e, serverConnection);
       return;
     }
     // Send OK to client
-    sendCqResponse(MessageType.REPLY, "cq stats sent successfully.", msg.getTransactionId(), null,
-        servConn);
-    servConn.setAsTrue(RESPONDED);
+    sendCqResponse(MessageType.REPLY, "cq stats sent successfully.", clientMessage.getTransactionId(), null, serverConnection);
+    serverConnection.setAsTrue(RESPONDED);
 
     {
       long oldStart = start;

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetDurableCQs.java
----------------------------------------------------------------------
diff --git a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetDurableCQs.java b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetDurableCQs.java
index a2d201d..e39c8e1 100755
--- a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetDurableCQs.java
+++ b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetDurableCQs.java
@@ -44,19 +44,19 @@ public class GetDurableCQs extends BaseCQCommand {
   private GetDurableCQs() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
-    AcceptorImpl acceptor = servConn.getAcceptor();
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    ClientProxyMembershipID id = servConn.getProxyID();
-    CacheServerStats stats = servConn.getCacheServerStats();
+    AcceptorImpl acceptor = serverConnection.getAcceptor();
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    ClientProxyMembershipID id = serverConnection.getProxyID();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
 
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
 
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Received {} request from {}", servConn.getName(),
-          MessageType.getString(msg.getMessageType()), servConn.getSocketString());
+      logger.debug("{}: Received {} request from {}", serverConnection.getName(),
+          MessageType.getString(clientMessage.getMessageType()), serverConnection.getSocketString());
     }
 
     DefaultQueryService qService = null;
@@ -68,7 +68,7 @@ public class GetDurableCQs extends BaseCQCommand {
       this.securityService.authorizeClusterRead();
 
       // Authorization check
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         authzRequest.getDurableCQsAuthorize();
       }
@@ -76,34 +76,34 @@ public class GetDurableCQs extends BaseCQCommand {
       cqServiceForExec = qService.getCqService();
       List<String> durableCqs = cqServiceForExec.getAllDurableClientCqs(id);
 
-      ChunkedMessage chunkedResponseMsg = servConn.getChunkedResponseMessage();
+      ChunkedMessage chunkedResponseMsg = serverConnection.getChunkedResponseMessage();
       chunkedResponseMsg.setMessageType(MessageType.RESPONSE);
-      chunkedResponseMsg.setTransactionId(msg.getTransactionId());
+      chunkedResponseMsg.setTransactionId(clientMessage.getTransactionId());
       chunkedResponseMsg.sendHeader();
 
-      List durableCqList = new ArrayList(maximumChunkSize);
+      List durableCqList = new ArrayList(MAXIMUM_CHUNK_SIZE);
       final boolean isTraceEnabled = logger.isTraceEnabled();
       for (Iterator it = durableCqs.iterator(); it.hasNext();) {
         Object durableCqName = it.next();
         durableCqList.add(durableCqName);
         if (isTraceEnabled) {
-          logger.trace("{}: getDurableCqsResponse <{}>; list size was {}", servConn.getName(),
+          logger.trace("{}: getDurableCqsResponse <{}>; list size was {}", serverConnection.getName(),
               durableCqName, durableCqList.size());
         }
-        if (durableCqList.size() == maximumChunkSize) {
+        if (durableCqList.size() == MAXIMUM_CHUNK_SIZE) {
           // Send the chunk and clear the list
-          sendDurableCqsResponseChunk(durableCqList, false, servConn);
+          sendDurableCqsResponseChunk(durableCqList, false, serverConnection);
           durableCqList.clear();
         }
       }
       // Send the last chunk even if the list is of zero size.
-      sendDurableCqsResponseChunk(durableCqList, true, servConn);
+      sendDurableCqsResponseChunk(durableCqList, true, serverConnection);
 
     } catch (CqException cqe) {
-      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", msg.getTransactionId(), cqe, servConn);
+      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", clientMessage.getTransactionId(), cqe, serverConnection);
       return;
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
+      writeChunkedException(clientMessage, e, serverConnection);
       return;
     }
   }
@@ -114,7 +114,7 @@ public class GetDurableCQs extends BaseCQCommand {
 
     chunkedResponseMsg.setNumberOfParts(1);
     chunkedResponseMsg.setLastChunk(lastChunk);
-    chunkedResponseMsg.addObjPart(list, zipValues);
+    chunkedResponseMsg.addObjPart(list, false);
 
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Sending {} durableCQs response chunk{}", servConn.getName(),

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/MonitorCQ.java
----------------------------------------------------------------------
diff --git a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/MonitorCQ.java b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/MonitorCQ.java
index a8fec9f..5393e81 100644
--- a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/MonitorCQ.java
+++ b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/MonitorCQ.java
@@ -36,39 +36,38 @@ public class MonitorCQ extends BaseCQCommand {
   private MonitorCQ() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
 
-    int op = msg.getPart(0).getInt();
+    int op = clientMessage.getPart(0).getInt();
 
     if (op < 1) {
       // This should have been taken care at the client - remove?
       String err = LocalizedStrings.MonitorCQ__0_THE_MONITORCQ_OPERATION_IS_INVALID
-          .toLocalizedString(servConn.getName());
-      sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, msg.getTransactionId(), null, servConn);
+          .toLocalizedString(serverConnection.getName());
+      sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, clientMessage.getTransactionId(), null, serverConnection);
       return;
     }
 
     String regionName = null;
-    if (msg.getNumberOfParts() == 2) {
+    if (clientMessage.getNumberOfParts() == 2) {
       // This will be enable/disable on region.
-      regionName = msg.getPart(1).getString();
+      regionName = clientMessage.getPart(1).getString();
       if (regionName == null) {
         // This should have been taken care at the client - remove?
         String err =
             LocalizedStrings.MonitorCQ__0_A_NULL_REGION_NAME_WAS_PASSED_FOR_MONITORCQ_OPERATION
-                .toLocalizedString(servConn.getName());
-        sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, msg.getTransactionId(), null,
-            servConn);
+                .toLocalizedString(serverConnection.getName());
+        sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, clientMessage.getTransactionId(), null, serverConnection);
         return;
       }
     }
 
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Received MonitorCq request from {} op: {}{}", servConn.getName(),
-          servConn.getSocketString(), op, (regionName != null) ? " RegionName: " + regionName : "");
+      logger.debug("{}: Received MonitorCq request from {} op: {}{}", serverConnection.getName(),
+          serverConnection.getSocketString(), op, (regionName != null) ? " RegionName: " + regionName : "");
     }
 
     this.securityService.authorizeClusterRead();
@@ -85,12 +84,12 @@ public class MonitorCQ extends BaseCQCommand {
       throw new CqException(
           LocalizedStrings.CqService_INVALID_CQ_MONITOR_REQUEST_RECEIVED.toLocalizedString());
     } catch (CqException cqe) {
-      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", msg.getTransactionId(), cqe, servConn);
+      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", clientMessage.getTransactionId(), cqe, serverConnection);
       return;
     } catch (Exception e) {
       String err = LocalizedStrings.MonitorCQ_EXCEPTION_WHILE_HANDLING_THE_MONITOR_REQUEST_OP_IS_0
           .toLocalizedString(Integer.valueOf(op));
-      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, err, msg.getTransactionId(), e, servConn);
+      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, err, clientMessage.getTransactionId(), e, serverConnection);
       return;
     }
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/StopCQ.java
----------------------------------------------------------------------
diff --git a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/StopCQ.java b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/StopCQ.java
index 94304d3..070cb04 100644
--- a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/StopCQ.java
+++ b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/StopCQ.java
@@ -44,30 +44,30 @@ public class StopCQ extends BaseCQCommand {
   private StopCQ() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    ClientProxyMembershipID id = servConn.getProxyID();
-    CacheServerStats stats = servConn.getCacheServerStats();
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    ClientProxyMembershipID id = serverConnection.getProxyID();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
 
     // Based on MessageType.QUERY
     // Added by Rao 2/1/2007
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
 
     start = DistributionStats.getStatTime();
     // Retrieve the data from the message parts
-    String cqName = msg.getPart(0).getString();
+    String cqName = clientMessage.getPart(0).getString();
 
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Received stop CQ request from {} cqName: {}", servConn.getName(),
-          servConn.getSocketString(), cqName);
+      logger.debug("{}: Received stop CQ request from {} cqName: {}", serverConnection.getName(),
+          serverConnection.getSocketString(), cqName);
     }
 
     // Process the query request
     if (cqName == null) {
       String err =
           LocalizedStrings.StopCQ_THE_CQNAME_FOR_THE_CQ_STOP_REQUEST_IS_NULL.toLocalizedString();
-      sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, msg.getTransactionId(), null, servConn);
+      sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, clientMessage.getTransactionId(), null, serverConnection);
       return;
     }
 
@@ -86,7 +86,7 @@ public class StopCQ extends BaseCQCommand {
 
       this.securityService.authorizeDataManage();
 
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         String queryStr = null;
         Set cqRegionNames = null;
@@ -100,23 +100,23 @@ public class StopCQ extends BaseCQCommand {
       }
       cqService.stopCq(cqName, id);
       if (cqQuery != null)
-        servConn.removeCq(cqName, cqQuery.isDurable());
+        serverConnection.removeCq(cqName, cqQuery.isDurable());
     } catch (CqException cqe) {
-      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", msg.getTransactionId(), cqe, servConn);
+      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", clientMessage.getTransactionId(), cqe, serverConnection);
       return;
     } catch (Exception e) {
       String err =
           LocalizedStrings.StopCQ_EXCEPTION_WHILE_STOPPING_CQ_NAMED_0.toLocalizedString(cqName);
-      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, err, msg.getTransactionId(), e, servConn);
+      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, err, clientMessage.getTransactionId(), e, serverConnection);
       return;
     }
 
     // Send OK to client
     sendCqResponse(MessageType.REPLY,
-        LocalizedStrings.StopCQ_CQ_STOPPED_SUCCESSFULLY.toLocalizedString(), msg.getTransactionId(),
-        null, servConn);
+        LocalizedStrings.StopCQ_CQ_STOPPED_SUCCESSFULLY.toLocalizedString(), clientMessage.getTransactionId(),
+        null, serverConnection);
 
-    servConn.setAsTrue(RESPONDED);
+    serverConnection.setAsTrue(RESPONDED);
 
     {
       long oldStart = start;


[15/43] geode git commit: Cleanup CacheClientNotifier

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/d66e51d0/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientProxy.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientProxy.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientProxy.java
index 75c89ab..8450db9 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientProxy.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientProxy.java
@@ -1951,7 +1951,7 @@ public class CacheClientProxy implements ClientSession {
 
         // Close the proxy
         terminateDispatching(false);
-        _cacheClientNotifier._statistics.incQueueDroppedCount();
+        _cacheClientNotifier.statistics.incQueueDroppedCount();
 
         /**
          * Setting the expiration task to null again and cancelling existing one, if any. See
@@ -2850,7 +2850,7 @@ public class CacheClientProxy implements ClientSession {
       try {
         this._messageQueue.put(clientMessage);
         if (this._proxy.isPaused() && this._proxy.isDurable()) {
-          this._proxy._cacheClientNotifier._statistics.incEventEnqueuedWhileClientAwayCount();
+          this._proxy._cacheClientNotifier.statistics.incEventEnqueuedWhileClientAwayCount();
           if (logger.isDebugEnabled()) {
             logger.debug("{}: Queued message while Durable Client is away {}", this, clientMessage);
           }


[02/43] geode git commit: GEODE-2954 Old client gets null memberID in cache listener

Posted by kl...@apache.org.
GEODE-2954 Old client gets null memberID in cache listener

I've added a new test that demonstrates that a new-version server sends
an EventID to a client that the client is unable to deserialize
completely.  It gets an error when deserializing its member ID,
causing cache listeners to get a null when requesting the ID of
the member that effected the change.

The fix is to reserialize the member ID in EventID.toData if the
destination stream is for an older version, such as a 1.1.0 client.
This ensures the proper on-wire format is used for that version of Geode.

I've also bumped up the version ordinal for 1.2 since version 59 is
marked as unusable in Version.java.

I've changed the Banner to show the version ordinal because the other
version information in the banner isn't completely trustworthy.  It
looks for a GemFireVersion.properties file on the classpath to get
this information and so it may not get it from the Geode jar file
as expected.


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/e79d27d7
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/e79d27d7
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/e79d27d7

Branch: refs/heads/feature/GEODE-2632-17
Commit: e79d27d7e258d2a5f0d8a3155cc1911825a90493
Parents: 096c22d
Author: Bruce Schuchardt <bs...@pivotal.io>
Authored: Wed May 24 15:13:52 2017 -0700
Committer: Bruce Schuchardt <bs...@pivotal.io>
Committed: Thu May 25 07:45:18 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/geode/internal/Banner.java  |   2 +
 .../java/org/apache/geode/internal/Version.java |   4 +-
 .../apache/geode/internal/cache/EventID.java    |  11 +-
 .../sockets/ClientServerMiscBCDUnitTest.java    |  44 +++
 .../tier/sockets/ClientServerMiscDUnitTest.java | 358 ++++++++-----------
 .../cli/commands/ShowDeadlockDUnitTest.java     |   0
 .../sanctionedDataSerializables.txt             |   8 +-
 7 files changed, 215 insertions(+), 212 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/e79d27d7/geode-core/src/main/java/org/apache/geode/internal/Banner.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/Banner.java b/geode-core/src/main/java/org/apache/geode/internal/Banner.java
index b6a89bf..a218a5b 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/Banner.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/Banner.java
@@ -104,6 +104,8 @@ public class Banner {
 
     GemFireVersion.print(out);
 
+    out.println("Communications version: " + Version.CURRENT_ORDINAL);
+
     out.println("Process ID: " + processId);
     out.println("User: " + sp.get("user.name"));
     sp.remove("user.name");

http://git-wip-us.apache.org/repos/asf/geode/blob/e79d27d7/geode-core/src/main/java/org/apache/geode/internal/Version.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/Version.java b/geode-core/src/main/java/org/apache/geode/internal/Version.java
index 1c131e8..5576971 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/Version.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/Version.java
@@ -59,7 +59,7 @@ public class Version implements Comparable<Version> {
   /** byte used as ordinal to represent this <code>Version</code> */
   private final short ordinal;
 
-  public static final int HIGHEST_VERSION = 60;
+  public static final int HIGHEST_VERSION = 65;
 
   private static final Version[] VALUES = new Version[HIGHEST_VERSION + 1];
 
@@ -190,7 +190,7 @@ public class Version implements Comparable<Version> {
   public static final Version GEODE_111 =
       new Version("GEODE", "1.1.1", (byte) 1, (byte) 1, (byte) 1, (byte) 0, GEODE_111_ORDINAL);
 
-  private static final byte GEODE_120_ORDINAL = 60;
+  private static final byte GEODE_120_ORDINAL = 65;
 
   public static final Version GEODE_120 =
       new Version("GEODE", "1.2.0", (byte) 1, (byte) 2, (byte) 0, (byte) 0, GEODE_120_ORDINAL);

http://git-wip-us.apache.org/repos/asf/geode/blob/e79d27d7/geode-core/src/main/java/org/apache/geode/internal/cache/EventID.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/EventID.java b/geode-core/src/main/java/org/apache/geode/internal/cache/EventID.java
index 87835ff..71acdc9 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/EventID.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/EventID.java
@@ -27,6 +27,7 @@ import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.concurrent.atomic.AtomicLong;
 
+import org.apache.geode.internal.InternalDataSerializer;
 import org.apache.logging.log4j.Logger;
 
 import org.apache.geode.DataSerializer;
@@ -322,7 +323,15 @@ public class EventID implements DataSerializableFixedID, Serializable, Externali
   }
 
   public void toData(DataOutput dop) throws IOException {
-    DataSerializer.writeByteArray(this.membershipID, dop);
+    Version version = InternalDataSerializer.getVersionForDataStream(dop);
+    if (version.compareTo(Version.GFE_90) <= 0) {
+      InternalDistributedMember member = getDistributedMember();
+      HeapDataOutputStream hdos = new HeapDataOutputStream(version);
+      member.writeEssentialData(hdos);
+      DataSerializer.writeByteArray(hdos.toByteArray(), dop);
+    } else {
+      DataSerializer.writeByteArray(this.membershipID, dop);
+    }
     DataSerializer.writeByteArray(getOptimizedByteArrayForEventID(this.threadID, this.sequenceID),
         dop);
     dop.writeInt(this.bucketID);

http://git-wip-us.apache.org/repos/asf/geode/blob/e79d27d7/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientServerMiscBCDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientServerMiscBCDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientServerMiscBCDUnitTest.java
index be0ac6b..d51c196 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientServerMiscBCDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientServerMiscBCDUnitTest.java
@@ -14,17 +14,28 @@
  */
 package org.apache.geode.internal.cache.tier.sockets;
 
+import static org.junit.Assert.assertFalse;
+
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.client.Pool;
+import org.apache.geode.internal.cache.LocalRegion;
+import org.apache.geode.test.dunit.Host;
+import org.apache.geode.test.dunit.NetworkUtils;
+import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.dunit.standalone.VersionManager;
 import org.apache.geode.test.junit.categories.BackwardCompatibilityTest;
 import org.apache.geode.test.junit.categories.ClientServerTest;
 import org.apache.geode.test.junit.categories.DistributedTest;
 import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactory;
+import org.awaitility.Awaitility;
+import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
 import java.util.Collection;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 @Category({DistributedTest.class, ClientServerTest.class, BackwardCompatibilityTest.class})
 @RunWith(Parameterized.class)
@@ -46,4 +57,37 @@ public class ClientServerMiscBCDUnitTest extends ClientServerMiscDUnitTest {
     testVersion = version;
   }
 
+  @Test
+  public void testSubscriptionWithCurrentServerAndOldClients() throws Exception {
+    // start server first
+    int serverPort = initServerCache(true);
+    VM client1 = Host.getHost(0).getVM(testVersion, 1);
+    VM client2 = Host.getHost(0).getVM(testVersion, 3);
+    String hostname = NetworkUtils.getServerHostName(Host.getHost(0));
+    client1.invoke("create client1 cache", () -> {
+      createClientCache(hostname, serverPort);
+      populateCache();
+      registerInterest();
+    });
+    client2.invoke("create client2 cache", () -> {
+      Pool ignore = createClientCache(hostname, serverPort);
+    });
+
+    client2.invoke("putting data in client2", () -> putForClient());
+
+    // client1 will receive client2's updates asynchronously
+    client1.invoke(() -> {
+      Region r2 = getCache().getRegion(REGION_NAME2);
+      MemberIDVerifier verifier = (MemberIDVerifier) ((LocalRegion) r2).getCacheListener();
+      Awaitility.await().atMost(60, TimeUnit.SECONDS).until(() -> verifier.eventReceived);
+    });
+
+    // client2's update should have included a memberID - GEODE-2954
+    client1.invoke(() -> {
+      Region r2 = getCache().getRegion(REGION_NAME2);
+      MemberIDVerifier verifier = (MemberIDVerifier) ((LocalRegion) r2).getCacheListener();
+      assertFalse(verifier.memberIDNotReceived);
+    });
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/e79d27d7/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientServerMiscDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientServerMiscDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientServerMiscDUnitTest.java
index b4f3185..9ca5ab9 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientServerMiscDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientServerMiscDUnitTest.java
@@ -30,6 +30,7 @@ import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheException;
 import org.apache.geode.cache.CacheWriterException;
 import org.apache.geode.cache.DataPolicy;
+import org.apache.geode.cache.EntryEvent;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionAttributes;
 import org.apache.geode.cache.Scope;
@@ -42,7 +43,9 @@ import org.apache.geode.cache.client.internal.Op;
 import org.apache.geode.cache.client.internal.PoolImpl;
 import org.apache.geode.cache.client.internal.RegisterInterestTracker;
 import org.apache.geode.cache.server.CacheServer;
+import org.apache.geode.cache.util.CacheListenerAdapter;
 import org.apache.geode.cache30.CacheSerializableRunnable;
+import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.distributed.DistributedSystemDisconnectedException;
 import org.apache.geode.internal.AvailablePort;
@@ -63,6 +66,7 @@ import org.apache.geode.test.dunit.standalone.VersionManager;
 import org.apache.geode.test.junit.categories.ClientServerTest;
 import org.apache.geode.test.junit.categories.DistributedTest;
 import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactory;
+import org.awaitility.Awaitility;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
@@ -72,6 +76,7 @@ import java.util.Collection;
 import java.util.Iterator;
 import java.util.Properties;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 
 /**
  * Tests client server corner cases between Region and Pool
@@ -95,9 +100,9 @@ public class ClientServerMiscDUnitTest extends JUnit4CacheTestCase {
 
   private static final String server_k2 = "server-k2";
 
-  private static final String REGION_NAME1 = "ClientServerMiscDUnitTest_region1";
+  static final String REGION_NAME1 = "ClientServerMiscDUnitTest_region1";
 
-  private static final String REGION_NAME2 = "ClientServerMiscDUnitTest_region2";
+  static final String REGION_NAME2 = "ClientServerMiscDUnitTest_region2";
 
   private static final String PR_REGION_NAME = "ClientServerMiscDUnitTest_PRregion";
 
@@ -138,13 +143,13 @@ public class ClientServerMiscDUnitTest extends JUnit4CacheTestCase {
     server2 = host.getVM(3);
   }
 
-  private int initServerCache(boolean notifyBySub) {
+  int initServerCache(boolean notifyBySub) {
     Object[] args = new Object[] {notifyBySub, getMaxThreads()};
     return ((Integer) server1.invoke(ClientServerMiscDUnitTest.class, "createServerCache", args))
         .intValue();
   }
 
-  private int initServerCache2(boolean notifyBySub) {
+  int initServerCache2(boolean notifyBySub) {
     Object[] args = new Object[] {notifyBySub, getMaxThreads()};
     return ((Integer) server2.invoke(ClientServerMiscDUnitTest.class, "createServerCache", args))
         .intValue();
@@ -373,19 +378,18 @@ public class ClientServerMiscDUnitTest extends JUnit4CacheTestCase {
   public void testForTwoRegionHavingDifferentInterestList() throws Exception {
     // start server first
     PORT1 = initServerCache(true);
-    createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1);
-    populateCache();
-    registerInterest();
-    server1.invoke(() -> ClientServerMiscDUnitTest.put());
+    int serverPort = PORT1;
+    VM client1 = Host.getHost(0).getVM(testVersion, 1);
+    String hostname = NetworkUtils.getServerHostName(Host.getHost(0));
+    client1.invoke("create client1 cache", () -> {
+      createClientCache(hostname, serverPort);
+      populateCache();
+      registerInterest();
+    });
 
-    // pause(5000 + 5000 + 10000);
-    /*
-     * final int maxWaitTime = Integer.getInteger(WAIT_PROPERTY, WAIT_DEFAULT).intValue(); try {
-     * Thread.yield(); Thread.sleep(maxWaitTime); } catch (InterruptedException e) {
-     * fail("interrupted"); }
-     */
-    verifyUpdates();
+    server1.invoke("putting entries in server1", () -> put());
 
+    client1.invoke(() -> verifyUpdates());
   }
 
   /**
@@ -590,65 +594,27 @@ public class ClientServerMiscDUnitTest extends JUnit4CacheTestCase {
 
     populateCache();
     server1.invoke(() -> ClientServerMiscDUnitTest.put());
-    // pause(5000);
-    WaitCriterion wc = new WaitCriterion() {
-      String excuse;
-
-      public boolean done() {
-        Object val = region1.getEntry(k1).getValue();
-        return k1.equals(val);
-      }
 
-      public String description() {
-        return excuse;
-      }
-    };
-    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
-
-    // assertIndexDetailsEquals(region1.getEntry(k1).getValue(), k1);
-    wc = new WaitCriterion() {
-      String excuse;
-
-      public boolean done() {
-        Object val = region1.getEntry(k2).getValue();
-        return k2.equals(val);
-      }
-
-      public String description() {
-        return excuse;
-      }
-    };
-    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
-
-    wc = new WaitCriterion() {
-      String excuse;
-
-      public boolean done() {
-        Object val = region2.getEntry(k1).getValue();
-        return k1.equals(val);
-      }
+    Awaitility.await().atMost(60, TimeUnit.SECONDS).until(() -> {
+      Object val = region1.getEntry(k1).getValue();
+      return k1.equals(val);
+    });
 
-      public String description() {
-        return excuse;
-      }
-    };
-    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
+    Awaitility.await().atMost(60, TimeUnit.SECONDS).until(() -> {
+      Object val = region1.getEntry(k2).getValue();
+      return k2.equals(val);
+    });
 
-    // assertIndexDetailsEquals(region1.getEntry(k2).getValue(), k2);
-    // assertIndexDetailsEquals(region2.getEntry(k1).getValue(), k1);
-    wc = new WaitCriterion() {
-      String excuse;
 
-      public boolean done() {
-        Object val = region2.getEntry(k2).getValue();
-        return k2.equals(val);
-      }
+    Awaitility.await().atMost(60, TimeUnit.SECONDS).until(() -> {
+      Object val = region2.getEntry(k1).getValue();
+      return k1.equals(val);
+    });
 
-      public String description() {
-        return excuse;
-      }
-    };
-    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
+    Awaitility.await().atMost(60, TimeUnit.SECONDS).until(() -> {
+      Object val = region2.getEntry(k2).getValue();
+      return k2.equals(val);
+    });
 
     // assertIndexDetailsEquals(region2.getEntry(k2).getValue(), k2);
   }
@@ -857,6 +823,32 @@ public class ClientServerMiscDUnitTest extends JUnit4CacheTestCase {
     return p;
   }
 
+  static class MemberIDVerifier extends CacheListenerAdapter {
+    boolean memberIDNotReceived = true;
+    boolean eventReceived = false;
+
+    @Override
+    public void afterCreate(EntryEvent event) {
+      eventReceived(event);
+    }
+
+    @Override
+    public void afterUpdate(EntryEvent event) {
+      eventReceived(event);
+    }
+
+    private void eventReceived(EntryEvent event) {
+      eventReceived = true;
+      DistributedMember memberID = event.getDistributedMember();
+      memberIDNotReceived = (memberID == null);
+    }
+
+    public void reset() {
+      memberIDNotReceived = true;
+      eventReceived = false;
+    }
+  }
+
   public static Integer createServerCache(Boolean notifyBySubscription, Integer maxThreads)
       throws Exception {
     Cache cache = new ClientServerMiscDUnitTest().createCacheV(new Properties());
@@ -893,17 +885,12 @@ public class ClientServerMiscDUnitTest extends JUnit4CacheTestCase {
     return 0;
   }
 
-  public static void registerInterest() {
-    try {
-      Cache cache = new ClientServerMiscDUnitTest().getCache();
-      Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME2);
-      assertNotNull(r);
-      // r.registerInterestRegex(CacheClientProxy.ALL_KEYS);
-      r.registerInterest("ALL_KEYS");
-    } catch (CacheWriterException e) {
-      e.printStackTrace();
-      Assert.fail("Test failed due to CacheWriterException during registerInterest", e);
-    }
+  public static void registerInterest() throws Exception {
+    Cache cache = new ClientServerMiscDUnitTest().getCache();
+    Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME2);
+    assertNotNull(r);
+    r.registerInterest("ALL_KEYS");
+    r.getAttributesMutator().addCacheListener(new MemberIDVerifier());
   }
 
   public static void registerInterestForInvalidatesInBothTheRegions() {
@@ -1070,153 +1057,114 @@ public class ClientServerMiscDUnitTest extends JUnit4CacheTestCase {
   }
 
   public static void verifyCacheClientProxyOnServer() {
-    try {
-      Cache cache = new ClientServerMiscDUnitTest().getCache();
-      assertEquals("More than one BridgeServer", 1, cache.getCacheServers().size());
-      CacheServerImpl bs = (CacheServerImpl) cache.getCacheServers().iterator().next();
-      assertNotNull(bs);
-      assertNotNull(bs.getAcceptor());
-      final CacheClientNotifier ccn = bs.getAcceptor().getCacheClientNotifier();
-
-      assertNotNull(ccn);
-      WaitCriterion wc = new WaitCriterion() {
-        String excuse;
+    Cache cache = new ClientServerMiscDUnitTest().getCache();
+    assertEquals("More than one BridgeServer", 1, cache.getCacheServers().size());
+    CacheServerImpl bs = (CacheServerImpl) cache.getCacheServers().iterator().next();
+    assertNotNull(bs);
+    assertNotNull(bs.getAcceptor());
+    final CacheClientNotifier ccn = bs.getAcceptor().getCacheClientNotifier();
+
+    assertNotNull(ccn);
+    WaitCriterion wc = new WaitCriterion() {
+      String excuse;
 
-        public boolean done() {
-          return ccn.getClientProxies().size() == 1;
-        }
+      public boolean done() {
+        return ccn.getClientProxies().size() == 1;
+      }
 
-        public String description() {
-          return excuse;
-        }
-      };
-      Wait.waitForCriterion(wc, 40 * 1000, 1000, true);
-    } catch (Exception ex) {
-      ex.printStackTrace();
-      fail("while setting verifyNoCacheClientProxyOnServer  " + ex);
-    }
+      public String description() {
+        return excuse;
+      }
+    };
+    Wait.waitForCriterion(wc, 40 * 1000, 1000, true);
   }
 
   public static void populateCache() {
-    try {
-      Cache cache = new ClientServerMiscDUnitTest().getCache();
-      Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME1);
-      Region r2 = cache.getRegion(Region.SEPARATOR + REGION_NAME2);
-      assertNotNull(r1);
-      assertNotNull(r2);
+    Cache cache = new ClientServerMiscDUnitTest().getCache();
+    Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME1);
+    Region r2 = cache.getRegion(Region.SEPARATOR + REGION_NAME2);
+    assertNotNull(r1);
+    assertNotNull(r2);
 
-      if (!r1.containsKey(k1))
-        r1.create(k1, k1);
-      if (!r1.containsKey(k2))
-        r1.create(k2, k2);
-      if (!r2.containsKey(k1))
-        r2.create(k1, k1);
-      if (!r2.containsKey(k2))
-        r2.create(k2, k2);
-
-      assertEquals(r1.getEntry(k1).getValue(), k1);
-      assertEquals(r1.getEntry(k2).getValue(), k2);
-      assertEquals(r2.getEntry(k1).getValue(), k1);
-      assertEquals(r2.getEntry(k2).getValue(), k2);
-    } catch (Exception ex) {
-      Assert.fail("failed while createEntries()", ex);
-    }
+    if (!r1.containsKey(k1))
+      r1.create(k1, k1);
+    if (!r1.containsKey(k2))
+      r1.create(k2, k2);
+    if (!r2.containsKey(k1))
+      r2.create(k1, k1);
+    if (!r2.containsKey(k2))
+      r2.create(k2, k2);
+
+    assertEquals(r1.getEntry(k1).getValue(), k1);
+    assertEquals(r1.getEntry(k2).getValue(), k2);
+    assertEquals(r2.getEntry(k1).getValue(), k1);
+    assertEquals(r2.getEntry(k2).getValue(), k2);
   }
 
   public static void put() {
-    try {
-      Cache cache = new ClientServerMiscDUnitTest().getCache();
-      Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME1);
-      Region r2 = cache.getRegion(Region.SEPARATOR + REGION_NAME2);
-      assertNotNull(r1);
-      assertNotNull(r2);
+    Cache cache = new ClientServerMiscDUnitTest().getCache();
+    Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME1);
+    Region r2 = cache.getRegion(Region.SEPARATOR + REGION_NAME2);
+    assertNotNull(r1);
+    assertNotNull(r2);
 
-      r1.put(k1, server_k1);
-      r1.put(k2, server_k2);
+    r1.put(k1, server_k1);
+    r1.put(k2, server_k2);
 
-      r2.put(k1, server_k1);
-      r2.put(k2, server_k2);
+    r2.put(k1, server_k1);
+    r2.put(k2, server_k2);
 
-      assertEquals(r1.getEntry(k1).getValue(), server_k1);
-      assertEquals(r1.getEntry(k2).getValue(), server_k2);
-      assertEquals(r2.getEntry(k1).getValue(), server_k1);
-      assertEquals(r2.getEntry(k2).getValue(), server_k2);
-    } catch (Exception ex) {
-      Assert.fail("failed while put()", ex);
-    }
+    assertEquals(r1.getEntry(k1).getValue(), server_k1);
+    assertEquals(r1.getEntry(k2).getValue(), server_k2);
+    assertEquals(r2.getEntry(k1).getValue(), server_k1);
+    assertEquals(r2.getEntry(k2).getValue(), server_k2);
   }
 
-  public static void verifyUpdates() {
-    try {
-      Cache cache = new ClientServerMiscDUnitTest().getCache();
-      final Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME1);
-      final Region r2 = cache.getRegion(Region.SEPARATOR + REGION_NAME2);
-      assertNotNull(r1);
-      assertNotNull(r2);
-      // verify updates
-      WaitCriterion wc = new WaitCriterion() {
-        String excuse;
-
-        public boolean done() {
-          Object val = r1.getEntry(k1).getValue();
-          return k1.equals(val);
-        }
+  public static void putForClient() {
+    Cache cache = new ClientServerMiscDUnitTest().getCache();
+    Region r2 = cache.getRegion(Region.SEPARATOR + REGION_NAME2);
 
-        public String description() {
-          return excuse;
-        }
-      };
-      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
-
-      // assertIndexDetailsEquals(k1, r1.getEntry(k1).getValue());
-      wc = new WaitCriterion() {
-        String excuse;
-
-        public boolean done() {
-          Object val = r1.getEntry(k2).getValue();
-          return k2.equals(val);
-        }
-
-        public String description() {
-          return excuse;
-        }
-      };
-      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
+    r2.put(k1, "client2_k1");
+    r2.put(k2, "client2_k2");
+  }
 
-      // assertIndexDetailsEquals(k2, r1.getEntry(k2).getValue());
-      wc = new WaitCriterion() {
-        String excuse;
+  public static void verifyUpdates() {
+    Cache cache = new ClientServerMiscDUnitTest().getCache();
+    final Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME1);
+    final Region r2 = cache.getRegion(Region.SEPARATOR + REGION_NAME2);
+    assertNotNull(r1);
+    assertNotNull(r2);
 
-        public boolean done() {
-          Object val = r2.getEntry(k1).getValue();
-          return server_k1.equals(val);
-        }
+    // no interest registered in region1 - it should hold client values, which are
+    // the same as the keys
+    Awaitility.await().atMost(60, TimeUnit.SECONDS).until(() -> {
+      Object val = r1.getEntry(k1).getValue();
+      return k1.equals(val);
+    });
 
-        public String description() {
-          return excuse;
-        }
-      };
-      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
+    Awaitility.await().atMost(60, TimeUnit.SECONDS).until(() -> {
+      Object val = r1.getEntry(k2).getValue();
+      return k2.equals(val);
+    });
 
-      // assertIndexDetailsEquals(server_k1, r2.getEntry(k1).getValue());
-      wc = new WaitCriterion() {
-        String excuse;
+    // interest was registered in region2 - it should contain server values
+    Awaitility.await().atMost(60, TimeUnit.SECONDS).until(() -> {
+      Object val = r2.getEntry(k1).getValue();
+      return server_k1.equals(val);
+    });
 
-        public boolean done() {
-          Object val = r2.getEntry(k2).getValue();
-          return server_k2.equals(val);
-        }
+    Awaitility.await().atMost(60, TimeUnit.SECONDS).until(() -> {
+      Object val = r2.getEntry(k2).getValue();
+      return server_k2.equals(val);
+    });
 
-        public String description() {
-          return excuse;
-        }
-      };
-      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
+    // events should have contained a memberID
+    MemberIDVerifier verifier = (MemberIDVerifier) ((LocalRegion) r2).getCacheListener();
+    assertTrue("client should have received a listener event", verifier.eventReceived);
+    assertFalse("client received an update but the event had no member id",
+        verifier.memberIDNotReceived);
+    verifier.reset();
 
-      // assertIndexDetailsEquals(server_k2, r2.getEntry(k2).getValue());
-    } catch (Exception ex) {
-      Assert.fail("failed while verifyUpdates()", ex);
-    }
   }
 
   public static void verifyInvalidatesOnBothRegions() {

http://git-wip-us.apache.org/repos/asf/geode/blob/e79d27d7/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ShowDeadlockDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ShowDeadlockDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ShowDeadlockDUnitTest.java
old mode 100644
new mode 100755

http://git-wip-us.apache.org/repos/asf/geode/blob/e79d27d7/geode-core/src/test/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt b/geode-core/src/test/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
index f2baa50..88df942 100644
--- a/geode-core/src/test/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
+++ b/geode-core/src/test/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
@@ -997,10 +997,10 @@ fromData,50,2a03b500052bb9004201003d1c9900112abb000759b70043b50004a7000e2abb0003
 toData,22,2b2ab40004c10007b9004002002ab400042bb60041b1
 
 org/apache/geode/internal/cache/EventID,4
-fromData,53,2a2bb80038b500042bb80038b800394d2a2cb8003ab500092a2cb8003ab5000b2a2bb9003b0100b5000c2a2bb9003c0100b50001b1
-fromDataPre_GFE_8_0_0_0,33,2a2bb80038b500042bb80038b800394d2a2cb8003ab500092a2cb8003ab5000bb1
-toData,44,2ab400042bb800352ab400092ab4000bb800332bb800352b2ab4000cb9003602002b2ab40001b900370200b1
-toDataPre_GFE_8_0_0_0,24,2ab400042bb800352ab400092ab4000bb800332bb80035b1
+fromData,53,2a2bb8003db500042bb8003db8003e4d2a2cb8003fb500092a2cb8003fb5000b2a2bb900400100b5000c2a2bb900410100b50001b1
+fromDataPre_GFE_8_0_0_0,33,2a2bb8003db500042bb8003db8003e4d2a2cb8003fb500092a2cb8003fb5000bb1
+toData,92,2bb800354d2cb20036b600379d00242ab600384ebb0010592cb700393a042d1904b600151904b600162bb8003aa7000b2ab400042bb8003a2ab400092ab4000bb800332bb8003a2b2ab4000cb9003b02002b2ab40001b9003c0200b1
+toDataPre_GFE_8_0_0_0,24,2ab400042bb8003a2ab400092ab4000bb800332bb8003ab1
 
 org/apache/geode/internal/cache/EventTracker$EventSeqnoHolder,2
 fromData,22,2a2bb9000e0100b500042a2bb8000fc00010b50005b1


[26/43] geode git commit: Cleanup BaseCommand

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommand.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommand.java
index 9ed00be..f09c854 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommand.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommand.java
@@ -23,21 +23,20 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Set;
 import java.util.concurrent.Semaphore;
 import java.util.regex.Pattern;
 
+import edu.umd.cs.findbugs.annotations.SuppressWarnings;
 import org.apache.logging.log4j.Logger;
 
-import org.apache.geode.CancelException;
 import org.apache.geode.CopyException;
 import org.apache.geode.InternalGemFireError;
 import org.apache.geode.SerializationException;
 import org.apache.geode.SystemFailure;
-import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheLoaderException;
 import org.apache.geode.cache.CacheWriterException;
 import org.apache.geode.cache.InterestResultPolicy;
@@ -86,24 +85,12 @@ import org.apache.geode.security.GemFireSecurityException;
 public abstract class BaseCommand implements Command {
   protected static final Logger logger = LogService.getLogger();
 
-  /**
-   * Whether zipped values are being passed to/from the client. Can be modified using the system
-   * property Message.ZIP_VALUES ? This does not appear to happen anywhere
-   */
-  protected static final boolean zipValues = false;
-
-  protected static final boolean APPLY_RETRIES =
-      Boolean.getBoolean(DistributionConfig.GEMFIRE_PREFIX + "gateway.ApplyRetries");
-
-  public static final byte[] OK_BYTES = new byte[] {0};
-
-  public static final int maximumChunkSize =
-      Integer.getInteger("BridgeServer.MAXIMUM_CHUNK_SIZE", 100).intValue();
+  private static final byte[] OK_BYTES = new byte[] {0};
 
-  /** Maximum number of entries in each chunked response chunk */
+  public static final int MAXIMUM_CHUNK_SIZE = Integer.getInteger("BridgeServer.MAXIMUM_CHUNK_SIZE", 100);
 
   /** Whether to suppress logging of IOExceptions */
-  private static boolean suppressIOExceptionLogging =
+  private static final boolean SUPPRESS_IO_EXCEPTION_LOGGING =
       Boolean.getBoolean(DistributionConfig.GEMFIRE_PREFIX + "bridge.suppressIOExceptionLogging");
 
   /**
@@ -112,86 +99,88 @@ public abstract class BaseCommand implements Command {
    * of them completes or fails. The bytes are computed based in the size sent in the incoming msg
    * header.
    */
-  private static final int MAX_INCOMING_DATA =
-      Integer.getInteger("BridgeServer.MAX_INCOMING_DATA", -1).intValue();
+  private static final int MAX_INCOMING_DATA = Integer.getInteger("BridgeServer.MAX_INCOMING_DATA", -1);
 
   /**
    * Maximum number of concurrent incoming client messages that a bridge server will allow. Once a
    * server is working on this number additional incoming client messages will wait until one of
    * them completes or fails.
    */
-  private static final int MAX_INCOMING_MSGS =
-      Integer.getInteger("BridgeServer.MAX_INCOMING_MSGS", -1).intValue();
+  private static final int MAX_INCOMING_MESSAGES = Integer.getInteger("BridgeServer.MAX_INCOMING_MSGS", -1);
 
-  private static final Semaphore incomingDataLimiter;
+  private static final Semaphore INCOMING_DATA_LIMITER;
+
+  private static final Semaphore INCOMING_MSG_LIMITER;
+
+  protected SecurityService securityService = IntegratedSecurityService.getSecurityService();
 
-  private static final Semaphore incomingMsgLimiter;
   static {
-    Semaphore tmp;
+    Semaphore semaphore;
     if (MAX_INCOMING_DATA > 0) {
       // backport requires that this is fair since we inc by values > 1
-      tmp = new Semaphore(MAX_INCOMING_DATA, true);
+      semaphore = new Semaphore(MAX_INCOMING_DATA, true);
     } else {
-      tmp = null;
+      semaphore = null;
     }
-    incomingDataLimiter = tmp;
-    if (MAX_INCOMING_MSGS > 0) {
-      tmp = new Semaphore(MAX_INCOMING_MSGS, false); // unfair for best
-      // performance
+    INCOMING_DATA_LIMITER = semaphore;
+    if (MAX_INCOMING_MESSAGES > 0) {
+      // unfair for best performance
+      semaphore = new Semaphore(MAX_INCOMING_MESSAGES, false);
     } else {
-      tmp = null;
+      semaphore = null;
     }
-    incomingMsgLimiter = tmp;
-
+    INCOMING_MSG_LIMITER = semaphore;
   }
 
-  protected SecurityService securityService = IntegratedSecurityService.getSecurityService();
+  protected static byte[] okBytes() {
+    return OK_BYTES;
+  }
 
-  public void execute(Message msg, ServerConnection servConn) {
+  @Override
+  public void execute(Message clientMessage, ServerConnection serverConnection) {
     // Read the request and update the statistics
     long start = DistributionStats.getStatTime();
-    // servConn.resetTransientData();
-    if (EntryLogger.isEnabled() && servConn != null) {
-      EntryLogger.setSource(servConn.getMembershipID(), "c2s");
+    if (EntryLogger.isEnabled() && serverConnection != null) {
+      EntryLogger.setSource(serverConnection.getMembershipID(), "c2s");
     }
-    boolean shouldMasquerade = shouldMasqueradeForTx(msg, servConn);
+    boolean shouldMasquerade = shouldMasqueradeForTx(clientMessage, serverConnection);
     try {
       if (shouldMasquerade) {
-        InternalCache cache = servConn.getCache();
+        InternalCache cache = serverConnection.getCache();
         InternalDistributedMember member =
-            (InternalDistributedMember) servConn.getProxyID().getDistributedMember();
+            (InternalDistributedMember) serverConnection.getProxyID().getDistributedMember();
         TXManagerImpl txMgr = cache.getTxManager();
         TXStateProxy tx = null;
         try {
-          tx = txMgr.masqueradeAs(msg, member, false);
-          cmdExecute(msg, servConn, start);
+          tx = txMgr.masqueradeAs(clientMessage, member, false);
+          cmdExecute(clientMessage, serverConnection, start);
           tx.updateProxyServer(txMgr.getMemberId());
         } finally {
           txMgr.unmasquerade(tx);
         }
       } else {
-        cmdExecute(msg, servConn, start);
+        cmdExecute(clientMessage, serverConnection, start);
       }
 
     } catch (TransactionException | CopyException | SerializationException | CacheWriterException
         | CacheLoaderException | GemFireSecurityException | PartitionOfflineException
         | MessageTooLargeException e) {
-      handleExceptionNoDisconnect(msg, servConn, e);
+      handleExceptionNoDisconnect(clientMessage, serverConnection, e);
     } catch (EOFException eof) {
-      BaseCommand.handleEOFException(msg, servConn, eof);
+      BaseCommand.handleEOFException(clientMessage, serverConnection, eof);
     } catch (InterruptedIOException e) { // Solaris only
-      BaseCommand.handleInterruptedIOException(msg, servConn, e);
+      BaseCommand.handleInterruptedIOException(serverConnection, e);
     } catch (IOException e) {
-      BaseCommand.handleIOException(msg, servConn, e);
+      BaseCommand.handleIOException(clientMessage, serverConnection, e);
     } catch (DistributedSystemDisconnectedException e) {
-      BaseCommand.handleShutdownException(msg, servConn, e);
+      BaseCommand.handleShutdownException(clientMessage, serverConnection, e);
     } catch (VirtualMachineError err) {
       SystemFailure.initiateFailure(err);
       // If this ever returns, rethrow the error. We're poisoned
       // now, so don't let this thread continue.
       throw err;
     } catch (Throwable e) {
-      BaseCommand.handleThrowable(msg, servConn, e);
+      BaseCommand.handleThrowable(clientMessage, serverConnection, e);
     } finally {
       EntryLogger.clearSource();
     }
@@ -201,16 +190,10 @@ public abstract class BaseCommand implements Command {
    * checks to see if this thread needs to masquerade as a transactional thread. clients after
    * GFE_66 should be able to start a transaction.
    * 
-   * @param msg
-   * @param servConn
    * @return true if thread should masquerade as a transactional thread.
    */
-  protected boolean shouldMasqueradeForTx(Message msg, ServerConnection servConn) {
-    if (servConn.getClientVersion().compareTo(Version.GFE_66) >= 0
-        && msg.getTransactionId() > TXManagerImpl.NOTX) {
-      return true;
-    }
-    return false;
+  protected boolean shouldMasqueradeForTx(Message clientMessage, ServerConnection serverConnection) {
+    return serverConnection.getClientVersion().compareTo(Version.GFE_66) >= 0 && clientMessage.getTransactionId() > TXManagerImpl.NOTX;
   }
 
   /**
@@ -221,13 +204,11 @@ public abstract class BaseCommand implements Command {
    * <p>
    * The client event should have the event identifier from the client and the region affected by
    * the operation.
-   * 
-   * @param clientEvent
    */
   public boolean recoverVersionTagForRetriedOperation(EntryEventImpl clientEvent) {
     LocalRegion r = clientEvent.getRegion();
-    VersionTag tag = null;
-    if ((clientEvent.getVersionTag() != null) && (clientEvent.getVersionTag().isGatewayTag())) {
+    VersionTag tag;
+    if (clientEvent.getVersionTag() != null && clientEvent.getVersionTag().isGatewayTag()) {
       tag = r.findVersionTagForGatewayEvent(clientEvent.getEventId());
     } else {
       tag = r.findVersionTagForClientEvent(clientEvent.getEventId());
@@ -246,7 +227,7 @@ public abstract class BaseCommand implements Command {
       }
       clientEvent.setVersionTag(tag);
     }
-    return (tag != null);
+    return tag != null;
   }
 
   /**
@@ -258,18 +239,18 @@ public abstract class BaseCommand implements Command {
    * The client event should have the event identifier from the client and the region affected by
    * the operation.
    */
-  protected VersionTag findVersionTagsForRetriedBulkOp(LocalRegion r, EventID eventID) {
-    VersionTag tag = r.findVersionTagForClientBulkOp(eventID);
+  protected VersionTag findVersionTagsForRetriedBulkOp(LocalRegion region, EventID eventID) {
+    VersionTag tag = region.findVersionTagForClientBulkOp(eventID);
     if (tag != null) {
       if (logger.isDebugEnabled()) {
         logger.debug("recovered version tag {} for replayed bulk operation {}", tag, eventID);
       }
       return tag;
     }
-    if (r instanceof DistributedRegion || r instanceof PartitionedRegion) {
+    if (region instanceof DistributedRegion || region instanceof PartitionedRegion) {
       // TODO this could be optimized for partitioned regions by sending the key
       // so that the PR could look at an individual bucket for the event
-      tag = FindVersionTagOperation.findVersionTag(r, eventID, true);
+      tag = FindVersionTagOperation.findVersionTag(region, eventID, true);
     }
     if (tag != null) {
       if (logger.isDebugEnabled()) {
@@ -279,285 +260,231 @@ public abstract class BaseCommand implements Command {
     return tag;
   }
 
-  abstract public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public abstract void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException, InterruptedException;
 
-  protected void writeReply(Message origMsg, ServerConnection servConn) throws IOException {
-    Message replyMsg = servConn.getReplyMessage();
-    servConn.getCache().getCancelCriterion().checkCancelInProgress(null);
+  protected void writeReply(Message origMsg, ServerConnection serverConnection) throws IOException {
+    Message replyMsg = serverConnection.getReplyMessage();
+    serverConnection.getCache().getCancelCriterion().checkCancelInProgress(null);
     replyMsg.setMessageType(MessageType.REPLY);
     replyMsg.setNumberOfParts(1);
     replyMsg.setTransactionId(origMsg.getTransactionId());
-    replyMsg.addBytesPart(OK_BYTES);
-    replyMsg.send(servConn);
+    replyMsg.addBytesPart(okBytes());
+    replyMsg.send(serverConnection);
     if (logger.isTraceEnabled()) {
-      logger.trace("{}: rpl tx: {}", servConn.getName(), origMsg.getTransactionId());
+      logger.trace("{}: rpl tx: {}", serverConnection.getName(), origMsg.getTransactionId());
     }
   }
 
-  protected void writeReplyWithRefreshMetadata(Message origMsg, ServerConnection servConn,
+  protected void writeReplyWithRefreshMetadata(Message origMsg, ServerConnection serverConnection,
       PartitionedRegion pr, byte nwHop) throws IOException {
-    Message replyMsg = servConn.getReplyMessage();
-    servConn.getCache().getCancelCriterion().checkCancelInProgress(null);
+    Message replyMsg = serverConnection.getReplyMessage();
+    serverConnection.getCache().getCancelCriterion().checkCancelInProgress(null);
     replyMsg.setMessageType(MessageType.REPLY);
     replyMsg.setNumberOfParts(1);
     replyMsg.setTransactionId(origMsg.getTransactionId());
     replyMsg.addBytesPart(new byte[] {pr.getMetadataVersion(), nwHop});
-    replyMsg.send(servConn);
+    replyMsg.send(serverConnection);
     pr.getPrStats().incPRMetaDataSentCount();
     if (logger.isTraceEnabled()) {
-      logger.trace("{}: rpl with REFRESH_METADAT tx: {}", servConn.getName(),
+      logger.trace("{}: rpl with REFRESH_METADATA tx: {}", serverConnection.getName(),
           origMsg.getTransactionId());
     }
   }
 
-  private static void handleEOFException(Message msg, ServerConnection servConn, Exception eof) {
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    CacheServerStats stats = servConn.getCacheServerStats();
-    boolean potentialModification = servConn.getPotentialModification();
+  private static void handleEOFException(Message msg, ServerConnection serverConnection, Exception eof) {
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
+    boolean potentialModification = serverConnection.getPotentialModification();
     if (!crHelper.isShutdown()) {
       if (potentialModification) {
         stats.incAbandonedWriteRequests();
       } else {
         stats.incAbandonedReadRequests();
       }
-      if (!suppressIOExceptionLogging) {
+      if (!SUPPRESS_IO_EXCEPTION_LOGGING) {
         if (potentialModification) {
-          int transId = (msg != null) ? msg.getTransactionId() : Integer.MIN_VALUE;
+          int transId = msg != null ? msg.getTransactionId() : Integer.MIN_VALUE;
           logger.warn(LocalizedMessage.create(
               LocalizedStrings.BaseCommand_0_EOFEXCEPTION_DURING_A_WRITE_OPERATION_ON_REGION__1_KEY_2_MESSAGEID_3,
-              new Object[] {servConn.getName(), servConn.getModRegion(), servConn.getModKey(),
-                  Integer.valueOf(transId)}));
+              new Object[] {serverConnection.getName(), serverConnection.getModRegion(), serverConnection.getModKey(), transId }));
         } else {
           logger.debug("EOF exception", eof);
           logger.info(LocalizedMessage.create(
               LocalizedStrings.BaseCommand_0_CONNECTION_DISCONNECT_DETECTED_BY_EOF,
-              servConn.getName()));
+              serverConnection.getName()));
         }
       }
     }
-    servConn.setFlagProcessMessagesAsFalse();
-    servConn.setClientDisconnectedException(eof);
+    serverConnection.setFlagProcessMessagesAsFalse();
+    serverConnection.setClientDisconnectedException(eof);
   }
 
-  private static void handleInterruptedIOException(Message msg, ServerConnection servConn,
-      Exception e) {
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    if (!crHelper.isShutdown() && servConn.isOpen()) {
-      if (!suppressIOExceptionLogging) {
+  private static void handleInterruptedIOException(ServerConnection serverConnection, Exception e) {
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    if (!crHelper.isShutdown() && serverConnection.isOpen()) {
+      if (!SUPPRESS_IO_EXCEPTION_LOGGING) {
         if (logger.isDebugEnabled())
           logger.debug("Aborted message due to interrupt: {}", e.getMessage(), e);
       }
     }
-    servConn.setFlagProcessMessagesAsFalse();
-    servConn.setClientDisconnectedException(e);
+    serverConnection.setFlagProcessMessagesAsFalse();
+    serverConnection.setClientDisconnectedException(e);
   }
 
-  private static void handleIOException(Message msg, ServerConnection servConn, Exception e) {
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    boolean potentialModification = servConn.getPotentialModification();
+  private static void handleIOException(Message msg, ServerConnection serverConnection, Exception e) {
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    boolean potentialModification = serverConnection.getPotentialModification();
 
-    if (!crHelper.isShutdown() && servConn.isOpen()) {
-      if (!suppressIOExceptionLogging) {
+    if (!crHelper.isShutdown() && serverConnection.isOpen()) {
+      if (!SUPPRESS_IO_EXCEPTION_LOGGING) {
         if (potentialModification) {
-          int transId = (msg != null) ? msg.getTransactionId() : Integer.MIN_VALUE;
+          int transId = msg != null ? msg.getTransactionId() : Integer.MIN_VALUE;
           logger.warn(LocalizedMessage.create(
               LocalizedStrings.BaseCommand_0_UNEXPECTED_IOEXCEPTION_DURING_OPERATION_FOR_REGION_1_KEY_2_MESSID_3,
-              new Object[] {servConn.getName(), servConn.getModRegion(), servConn.getModKey(),
-                  Integer.valueOf(transId)}),
+              new Object[] {serverConnection.getName(), serverConnection.getModRegion(), serverConnection.getModKey(), transId }),
               e);
         } else {
           logger.warn(LocalizedMessage.create(LocalizedStrings.BaseCommand_0_UNEXPECTED_IOEXCEPTION,
-              servConn.getName()), e);
+              serverConnection.getName()), e);
         }
       }
     }
-    servConn.setFlagProcessMessagesAsFalse();
-    servConn.setClientDisconnectedException(e);
+    serverConnection.setFlagProcessMessagesAsFalse();
+    serverConnection.setClientDisconnectedException(e);
   }
 
-  private static void handleShutdownException(Message msg, ServerConnection servConn, Exception e) {
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    boolean potentialModification = servConn.getPotentialModification();
+  private static void handleShutdownException(Message msg, ServerConnection serverConnection, Exception e) {
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    boolean potentialModification = serverConnection.getPotentialModification();
 
     if (!crHelper.isShutdown()) {
       if (potentialModification) {
-        int transId = (msg != null) ? msg.getTransactionId() : Integer.MIN_VALUE;
+        int transId = msg != null ? msg.getTransactionId() : Integer.MIN_VALUE;
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.BaseCommand_0_UNEXPECTED_SHUTDOWNEXCEPTION_DURING_OPERATION_ON_REGION_1_KEY_2_MESSAGEID_3,
-            new Object[] {servConn.getName(), servConn.getModRegion(), servConn.getModKey(),
-                Integer.valueOf(transId)}),
+            new Object[] {serverConnection.getName(), serverConnection.getModRegion(), serverConnection.getModKey(), transId }),
             e);
       } else {
         logger.warn(LocalizedMessage.create(
-            LocalizedStrings.BaseCommand_0_UNEXPECTED_SHUTDOWNEXCEPTION, servConn.getName()), e);
+            LocalizedStrings.BaseCommand_0_UNEXPECTED_SHUTDOWNEXCEPTION, serverConnection.getName()), e);
       }
     }
-    servConn.setFlagProcessMessagesAsFalse();
-    servConn.setClientDisconnectedException(e);
+    serverConnection.setFlagProcessMessagesAsFalse();
+    serverConnection.setClientDisconnectedException(e);
   }
 
-  // Handle GemfireSecurityExceptions separately since the connection should not
-  // be terminated (by setting processMessages to false) unlike in
-  // handleThrowable. Fixes bugs #38384 and #39392.
-  // private static void handleGemfireSecurityException(Message msg,
-  // ServerConnection servConn, GemFireSecurityException e) {
-  //
-  // boolean requiresResponse = servConn.getTransientFlag(REQUIRES_RESPONSE);
-  // boolean responded = servConn.getTransientFlag(RESPONDED);
-  // boolean requiresChunkedResponse = servConn
-  // .getTransientFlag(REQUIRES_CHUNKED_RESPONSE);
-  // boolean potentialModification = servConn.getPotentialModification();
-  //
-  // try {
-  // try {
-  // if (requiresResponse && !responded) {
-  // if (requiresChunkedResponse) {
-  // writeChunkedException(msg, e, false, servConn);
-  // }
-  // else {
-  // writeException(msg, e, false, servConn);
-  // }
-  // servConn.setAsTrue(RESPONDED);
-  // }
-  // }
-  // finally { // inner try-finally to ensure proper ordering of logging
-  // if (potentialModification) {
-  // int transId = (msg != null) ? msg.getTransactionId()
-  // : Integer.MIN_VALUE;
-  // }
-  // }
-  // }
-  // catch (IOException ioe) {
-  // if (logger.isDebugEnabled()) {
-  // logger.fine(servConn.getName()
-  // + ": Unexpected IOException writing security exception: ", ioe);
-  // }
-  // }
-  // }
-
-  private static void handleExceptionNoDisconnect(Message msg, ServerConnection servConn,
+  private static void handleExceptionNoDisconnect(Message msg, ServerConnection serverConnection,
       Exception e) {
-    boolean requiresResponse = servConn.getTransientFlag(REQUIRES_RESPONSE);
-    boolean responded = servConn.getTransientFlag(RESPONDED);
-    boolean requiresChunkedResponse = servConn.getTransientFlag(REQUIRES_CHUNKED_RESPONSE);
-    boolean potentialModification = servConn.getPotentialModification();
-    boolean wroteExceptionResponse = false;
+    boolean requiresResponse = serverConnection.getTransientFlag(REQUIRES_RESPONSE);
+    boolean responded = serverConnection.getTransientFlag(RESPONDED);
+    boolean requiresChunkedResponse = serverConnection.getTransientFlag(REQUIRES_CHUNKED_RESPONSE);
+    boolean potentialModification = serverConnection.getPotentialModification();
 
     try {
+      boolean wroteExceptionResponse = false;
       try {
         if (requiresResponse && !responded) {
           if (requiresChunkedResponse) {
-            writeChunkedException(msg, e, false, servConn);
+            writeChunkedException(msg, e, serverConnection);
           } else {
-            writeException(msg, e, false, servConn);
+            writeException(msg, e, false, serverConnection);
           }
           wroteExceptionResponse = true;
-          servConn.setAsTrue(RESPONDED);
+          serverConnection.setAsTrue(RESPONDED);
         }
       } finally { // inner try-finally to ensure proper ordering of logging
         if (potentialModification) {
-          int transId = (msg != null) ? msg.getTransactionId() : Integer.MIN_VALUE;
+          int transId = msg != null ? msg.getTransactionId() : Integer.MIN_VALUE;
           if (!wroteExceptionResponse) {
             logger.warn(LocalizedMessage.create(
                 LocalizedStrings.BaseCommand_0_UNEXPECTED_EXCEPTION_DURING_OPERATION_ON_REGION_1_KEY_2_MESSAGEID_3,
-                new Object[] {servConn.getName(), servConn.getModRegion(), servConn.getModKey(),
-                    Integer.valueOf(transId)}),
+                new Object[] {serverConnection.getName(), serverConnection.getModRegion(), serverConnection.getModKey(), transId }),
                 e);
           } else {
             if (logger.isDebugEnabled()) {
               logger.debug("{}: Exception during operation on region: {} key: {} messageId: {}",
-                  servConn.getName(), servConn.getModRegion(), servConn.getModKey(), transId, e);
+                  serverConnection.getName(), serverConnection.getModRegion(), serverConnection.getModKey(), transId, e);
             }
           }
         } else {
           if (!wroteExceptionResponse) {
             logger.warn(LocalizedMessage.create(LocalizedStrings.BaseCommand_0_UNEXPECTED_EXCEPTION,
-                servConn.getName()), e);
+                serverConnection.getName()), e);
           } else {
             if (logger.isDebugEnabled()) {
-              logger.debug("{}: Exception: {}", servConn.getName(), e.getMessage(), e);
+              logger.debug("{}: Exception: {}", serverConnection.getName(), e.getMessage(), e);
             }
           }
         }
       }
     } catch (IOException ioe) {
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Unexpected IOException writing exception: {}", servConn.getName(),
+        logger.debug("{}: Unexpected IOException writing exception: {}", serverConnection.getName(),
             ioe.getMessage(), ioe);
       }
     }
   }
 
-  private static void handleThrowable(Message msg, ServerConnection servConn, Throwable th) {
-    boolean requiresResponse = servConn.getTransientFlag(REQUIRES_RESPONSE);
-    boolean responded = servConn.getTransientFlag(RESPONDED);
-    boolean requiresChunkedResponse = servConn.getTransientFlag(REQUIRES_CHUNKED_RESPONSE);
-    boolean potentialModification = servConn.getPotentialModification();
+  private static void handleThrowable(Message msg, ServerConnection serverConnection, Throwable th) {
+    boolean requiresResponse = serverConnection.getTransientFlag(REQUIRES_RESPONSE);
+    boolean responded = serverConnection.getTransientFlag(RESPONDED);
+    boolean requiresChunkedResponse = serverConnection.getTransientFlag(REQUIRES_CHUNKED_RESPONSE);
+    boolean potentialModification = serverConnection.getPotentialModification();
 
     try {
       try {
         if (th instanceof Error) {
           logger.fatal(LocalizedMessage.create(
-              LocalizedStrings.BaseCommand_0_UNEXPECTED_ERROR_ON_SERVER, servConn.getName()), th);
+              LocalizedStrings.BaseCommand_0_UNEXPECTED_ERROR_ON_SERVER, serverConnection.getName()), th);
         }
         if (requiresResponse && !responded) {
           if (requiresChunkedResponse) {
-            writeChunkedException(msg, th, false, servConn);
+            writeChunkedException(msg, th, serverConnection);
           } else {
-            writeException(msg, th, false, servConn);
+            writeException(msg, th, false, serverConnection);
           }
-          servConn.setAsTrue(RESPONDED);
+          serverConnection.setAsTrue(RESPONDED);
         }
       } finally { // inner try-finally to ensure proper ordering of logging
-        if (th instanceof Error) {
-          // log nothing
-        } else if (th instanceof CancelException) {
-          // log nothing
-        } else {
+        if (!(th instanceof Error || th instanceof CacheLoaderException)) {
           if (potentialModification) {
-            int transId = (msg != null) ? msg.getTransactionId() : Integer.MIN_VALUE;
+            int transId = msg != null ? msg.getTransactionId() : Integer.MIN_VALUE;
             logger.warn(LocalizedMessage.create(
                 LocalizedStrings.BaseCommand_0_UNEXPECTED_EXCEPTION_DURING_OPERATION_ON_REGION_1_KEY_2_MESSAGEID_3,
-                new Object[] {servConn.getName(), servConn.getModRegion(), servConn.getModKey(),
-                    Integer.valueOf(transId)}),
+                new Object[] {serverConnection.getName(), serverConnection.getModRegion(), serverConnection.getModKey(), transId }),
                 th);
           } else {
             logger.warn(LocalizedMessage.create(LocalizedStrings.BaseCommand_0_UNEXPECTED_EXCEPTION,
-                servConn.getName()), th);
+                serverConnection.getName()), th);
           }
         }
       }
     } catch (IOException ioe) {
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Unexpected IOException writing exception: {}", servConn.getName(),
+        logger.debug("{}: Unexpected IOException writing exception: {}", serverConnection.getName(),
             ioe.getMessage(), ioe);
       }
     } finally {
-      servConn.setFlagProcessMessagesAsFalse();
-      servConn.setClientDisconnectedException(th);
+      serverConnection.setFlagProcessMessagesAsFalse();
+      serverConnection.setClientDisconnectedException(th);
     }
   }
 
-  protected static void writeChunkedException(Message origMsg, Throwable e, boolean isSevere,
-      ServerConnection servConn) throws IOException {
-    writeChunkedException(origMsg, e, isSevere, servConn, servConn.getChunkedResponseMessage());
+  protected static void writeChunkedException(Message origMsg, Throwable e, ServerConnection serverConnection) throws IOException {
+    writeChunkedException(origMsg, e, serverConnection, serverConnection.getChunkedResponseMessage());
   }
 
-  protected static void writeChunkedException(Message origMsg, Throwable e, boolean isSevere,
-      ServerConnection servConn, ChunkedMessage originalReponse) throws IOException {
-    writeChunkedException(origMsg, e, isSevere, servConn, originalReponse, 2);
+  protected static void writeChunkedException(Message origMsg, Throwable e, ServerConnection serverConnection, ChunkedMessage originalResponse) throws IOException {
+    writeChunkedException(origMsg, e, serverConnection, originalResponse, 2);
   }
 
-  protected static void writeChunkedException(Message origMsg, Throwable exception,
-      boolean isSevere, ServerConnection servConn, ChunkedMessage originalReponse, int numOfParts)
+  private static void writeChunkedException(Message origMsg, Throwable exception, ServerConnection serverConnection, ChunkedMessage originalResponse, int numOfParts)
       throws IOException {
-    Throwable e = getClientException(servConn, exception);
-    ChunkedMessage chunkedResponseMsg = servConn.getChunkedResponseMessage();
-    chunkedResponseMsg.setServerConnection(servConn);
-    if (originalReponse.headerHasBeenSent()) {
-      // chunkedResponseMsg = originalReponse;
-      // fix for bug 35442
+    Throwable e = getClientException(serverConnection, exception);
+    ChunkedMessage chunkedResponseMsg = serverConnection.getChunkedResponseMessage();
+    chunkedResponseMsg.setServerConnection(serverConnection);
+    if (originalResponse.headerHasBeenSent()) {
       chunkedResponseMsg.setNumberOfParts(numOfParts);
       chunkedResponseMsg.setLastChunkAndNumParts(true, numOfParts);
       chunkedResponseMsg.addObjPart(e);
@@ -565,7 +492,7 @@ public abstract class BaseCommand implements Command {
         chunkedResponseMsg.addStringPart(getExceptionTrace(e));
       }
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Sending exception chunk while reply in progress: {}", servConn.getName(),
+        logger.debug("{}: Sending exception chunk while reply in progress: {}", serverConnection.getName(),
             e.getMessage(), e);
       }
     } else {
@@ -579,10 +506,10 @@ public abstract class BaseCommand implements Command {
         chunkedResponseMsg.addStringPart(getExceptionTrace(e));
       }
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Sending exception chunk: {}", servConn.getName(), e.getMessage(), e);
+        logger.debug("{}: Sending exception chunk: {}", serverConnection.getName(), e.getMessage(), e);
       }
     }
-    chunkedResponseMsg.sendChunk(servConn);
+    chunkedResponseMsg.sendChunk(serverConnection);
   }
 
   // Get the exception stacktrace for native clients
@@ -595,26 +522,25 @@ public abstract class BaseCommand implements Command {
   }
 
   protected static void writeException(Message origMsg, Throwable e, boolean isSevere,
-      ServerConnection servConn) throws IOException {
-    writeException(origMsg, MessageType.EXCEPTION, e, isSevere, servConn);
+      ServerConnection serverConnection) throws IOException {
+    writeException(origMsg, MessageType.EXCEPTION, e, isSevere, serverConnection);
   }
 
-  private static Throwable getClientException(ServerConnection servConn, Throwable e) {
-    Cache cache = servConn.getCache();
-    if (cache instanceof InternalCache) {
-      InternalCache icache = (InternalCache) servConn.getCache();
-      OldClientSupportService svc = icache.getService(OldClientSupportService.class);
+  private static Throwable getClientException(ServerConnection serverConnection, Throwable e) {
+    InternalCache cache = serverConnection.getCache();
+    if (cache != null) {
+      OldClientSupportService svc = cache.getService(OldClientSupportService.class);
       if (svc != null) {
-        return svc.getThrowable(e, servConn.getClientVersion());
+        return svc.getThrowable(e, serverConnection.getClientVersion());
       }
     }
     return e;
   }
 
   protected static void writeException(Message origMsg, int msgType, Throwable e, boolean isSevere,
-      ServerConnection servConn) throws IOException {
-    Throwable theException = getClientException(servConn, e);
-    Message errorMsg = servConn.getErrorResponseMessage();
+      ServerConnection serverConnection) throws IOException {
+    Throwable theException = getClientException(serverConnection, e);
+    Message errorMsg = serverConnection.getErrorResponseMessage();
     errorMsg.setMessageType(msgType);
     errorMsg.setNumberOfParts(2);
     errorMsg.setTransactionId(origMsg.getTransactionId());
@@ -628,9 +554,9 @@ public abstract class BaseCommand implements Command {
     }
     errorMsg.addObjPart(theException);
     errorMsg.addStringPart(getExceptionTrace(theException));
-    errorMsg.send(servConn);
+    errorMsg.send(serverConnection);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Wrote exception: {}", servConn.getName(), e.getMessage(), e);
+      logger.debug("{}: Wrote exception: {}", serverConnection.getName(), e.getMessage(), e);
     }
     if (e instanceof MessageTooLargeException) {
       throw (IOException) e;
@@ -638,41 +564,41 @@ public abstract class BaseCommand implements Command {
   }
 
   protected static void writeErrorResponse(Message origMsg, int messageType,
-      ServerConnection servConn) throws IOException {
-    Message errorMsg = servConn.getErrorResponseMessage();
+      ServerConnection serverConnection) throws IOException {
+    Message errorMsg = serverConnection.getErrorResponseMessage();
     errorMsg.setMessageType(messageType);
     errorMsg.setNumberOfParts(1);
     errorMsg.setTransactionId(origMsg.getTransactionId());
     errorMsg.addStringPart(
         LocalizedStrings.BaseCommand_INVALID_DATA_RECEIVED_PLEASE_SEE_THE_CACHE_SERVER_LOG_FILE_FOR_ADDITIONAL_DETAILS
             .toLocalizedString());
-    errorMsg.send(servConn);
+    errorMsg.send(serverConnection);
   }
 
   protected static void writeErrorResponse(Message origMsg, int messageType, String msg,
-      ServerConnection servConn) throws IOException {
-    Message errorMsg = servConn.getErrorResponseMessage();
+      ServerConnection serverConnection) throws IOException {
+    Message errorMsg = serverConnection.getErrorResponseMessage();
     errorMsg.setMessageType(messageType);
     errorMsg.setNumberOfParts(1);
     errorMsg.setTransactionId(origMsg.getTransactionId());
     errorMsg.addStringPart(msg);
-    errorMsg.send(servConn);
+    errorMsg.send(serverConnection);
   }
 
   protected static void writeRegionDestroyedEx(Message msg, String regionName, String title,
-      ServerConnection servConn) throws IOException {
-    String reason = servConn.getName() + ": Region named " + regionName + title;
+      ServerConnection serverConnection) throws IOException {
+    String reason = serverConnection.getName() + ": Region named " + regionName + title;
     RegionDestroyedException ex = new RegionDestroyedException(reason, regionName);
-    if (servConn.getTransientFlag(REQUIRES_CHUNKED_RESPONSE)) {
-      writeChunkedException(msg, ex, false, servConn);
+    if (serverConnection.getTransientFlag(REQUIRES_CHUNKED_RESPONSE)) {
+      writeChunkedException(msg, ex, serverConnection);
     } else {
-      writeException(msg, ex, false, servConn);
+      writeException(msg, ex, false, serverConnection);
     }
   }
 
   protected static void writeResponse(Object data, Object callbackArg, Message origMsg,
-      boolean isObject, ServerConnection servConn) throws IOException {
-    Message responseMsg = servConn.getResponseMessage();
+      boolean isObject, ServerConnection serverConnection) throws IOException {
+    Message responseMsg = serverConnection.getResponseMessage();
     responseMsg.setMessageType(MessageType.RESPONSE);
     responseMsg.setTransactionId(origMsg.getTransactionId());
 
@@ -686,20 +612,20 @@ public abstract class BaseCommand implements Command {
       responseMsg.addRawPart((byte[]) data, isObject);
     } else {
       Assert.assertTrue(isObject, "isObject should be true when value is not a byte[]");
-      responseMsg.addObjPart(data, zipValues);
+      responseMsg.addObjPart(data, false);
     }
     if (callbackArg != null) {
       responseMsg.addObjPart(callbackArg);
     }
-    servConn.getCache().getCancelCriterion().checkCancelInProgress(null);
-    responseMsg.send(servConn);
+    serverConnection.getCache().getCancelCriterion().checkCancelInProgress(null);
+    responseMsg.send(serverConnection);
     origMsg.clearParts();
   }
 
   protected static void writeResponseWithRefreshMetadata(Object data, Object callbackArg,
-      Message origMsg, boolean isObject, ServerConnection servConn, PartitionedRegion pr,
+      Message origMsg, boolean isObject, ServerConnection serverConnection, PartitionedRegion pr,
       byte nwHop) throws IOException {
-    Message responseMsg = servConn.getResponseMessage();
+    Message responseMsg = serverConnection.getResponseMessage();
     responseMsg.setMessageType(MessageType.RESPONSE);
     responseMsg.setTransactionId(origMsg.getTransactionId());
 
@@ -713,32 +639,32 @@ public abstract class BaseCommand implements Command {
       responseMsg.addRawPart((byte[]) data, isObject);
     } else {
       Assert.assertTrue(isObject, "isObject should be true when value is not a byte[]");
-      responseMsg.addObjPart(data, zipValues);
+      responseMsg.addObjPart(data, false);
     }
     if (callbackArg != null) {
       responseMsg.addObjPart(callbackArg);
     }
     responseMsg.addBytesPart(new byte[] {pr.getMetadataVersion(), nwHop});
-    servConn.getCache().getCancelCriterion().checkCancelInProgress(null);
-    responseMsg.send(servConn);
+    serverConnection.getCache().getCancelCriterion().checkCancelInProgress(null);
+    responseMsg.send(serverConnection);
     origMsg.clearParts();
   }
 
   protected static void writeResponseWithFunctionAttribute(byte[] data, Message origMsg,
-      ServerConnection servConn) throws IOException {
-    Message responseMsg = servConn.getResponseMessage();
+      ServerConnection serverConnection) throws IOException {
+    Message responseMsg = serverConnection.getResponseMessage();
     responseMsg.setMessageType(MessageType.RESPONSE);
     responseMsg.setTransactionId(origMsg.getTransactionId());
     responseMsg.setNumberOfParts(1);
     responseMsg.addBytesPart(data);
-    servConn.getCache().getCancelCriterion().checkCancelInProgress(null);
-    responseMsg.send(servConn);
+    serverConnection.getCache().getCancelCriterion().checkCancelInProgress(null);
+    responseMsg.send(serverConnection);
     origMsg.clearParts();
   }
 
-  static protected void checkForInterrupt(ServerConnection servConn, Exception e)
+  protected static void checkForInterrupt(ServerConnection serverConnection, Exception e)
       throws InterruptedException, InterruptedIOException {
-    servConn.getCachedRegionHelper().checkCancelInProgress(e);
+    serverConnection.getCachedRegionHelper().checkCancelInProgress(e);
     if (e instanceof InterruptedException) {
       throw (InterruptedException) e;
     }
@@ -747,37 +673,35 @@ public abstract class BaseCommand implements Command {
     }
   }
 
-  protected static void writeQueryResponseChunk(Object queryResponseChunk,
-      CollectionType collectionType, boolean lastChunk, ServerConnection servConn)
+  static void writeQueryResponseChunk(Object queryResponseChunk, CollectionType collectionType, boolean lastChunk, ServerConnection serverConnection)
       throws IOException {
-    ChunkedMessage queryResponseMsg = servConn.getQueryResponseMessage();
+    ChunkedMessage queryResponseMsg = serverConnection.getQueryResponseMessage();
     queryResponseMsg.setNumberOfParts(2);
     queryResponseMsg.setLastChunk(lastChunk);
-    queryResponseMsg.addObjPart(collectionType, zipValues);
-    queryResponseMsg.addObjPart(queryResponseChunk, zipValues);
-    queryResponseMsg.sendChunk(servConn);
+    queryResponseMsg.addObjPart(collectionType, false);
+    queryResponseMsg.addObjPart(queryResponseChunk, false);
+    queryResponseMsg.sendChunk(serverConnection);
   }
 
-  protected static void writeQueryResponseException(Message origMsg, Throwable exception,
-      boolean isSevere, ServerConnection servConn) throws IOException {
-    Throwable e = getClientException(servConn, exception);
-    ChunkedMessage queryResponseMsg = servConn.getQueryResponseMessage();
-    ChunkedMessage chunkedResponseMsg = servConn.getChunkedResponseMessage();
+  protected static void writeQueryResponseException(Message origMsg, Throwable exception, ServerConnection serverConnection) throws IOException {
+    Throwable e = getClientException(serverConnection, exception);
+    ChunkedMessage queryResponseMsg = serverConnection.getQueryResponseMessage();
+    ChunkedMessage chunkedResponseMsg = serverConnection.getChunkedResponseMessage();
     if (queryResponseMsg.headerHasBeenSent()) {
       // fix for bug 35442
       // This client is expecting 2 parts in this message so send 2 parts
-      queryResponseMsg.setServerConnection(servConn);
+      queryResponseMsg.setServerConnection(serverConnection);
       queryResponseMsg.setNumberOfParts(2);
       queryResponseMsg.setLastChunkAndNumParts(true, 2);
       queryResponseMsg.addObjPart(e);
       queryResponseMsg.addStringPart(getExceptionTrace(e));
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Sending exception chunk while reply in progress: {}", servConn.getName(),
+        logger.debug("{}: Sending exception chunk while reply in progress: {}", serverConnection.getName(),
             e.getMessage(), e);
       }
-      queryResponseMsg.sendChunk(servConn);
+      queryResponseMsg.sendChunk(serverConnection);
     } else {
-      chunkedResponseMsg.setServerConnection(servConn);
+      chunkedResponseMsg.setServerConnection(serverConnection);
       chunkedResponseMsg.setMessageType(MessageType.EXCEPTION);
       chunkedResponseMsg.setNumberOfParts(2);
       chunkedResponseMsg.setLastChunkAndNumParts(true, 2);
@@ -786,19 +710,18 @@ public abstract class BaseCommand implements Command {
       chunkedResponseMsg.addObjPart(e);
       chunkedResponseMsg.addStringPart(getExceptionTrace(e));
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Sending exception chunk: {}", servConn.getName(), e.getMessage(), e);
+        logger.debug("{}: Sending exception chunk: {}", serverConnection.getName(), e.getMessage(), e);
       }
-      chunkedResponseMsg.sendChunk(servConn);
+      chunkedResponseMsg.sendChunk(serverConnection);
     }
   }
 
   protected static void writeChunkedErrorResponse(Message origMsg, int messageType, String message,
-      ServerConnection servConn) throws IOException {
+      ServerConnection serverConnection) throws IOException {
     // Send chunked response header identifying error message
-    ChunkedMessage chunkedResponseMsg = servConn.getChunkedResponseMessage();
+    ChunkedMessage chunkedResponseMsg = serverConnection.getChunkedResponseMessage();
     if (logger.isDebugEnabled()) {
-      logger.debug(servConn.getName() + ": Sending error message header type: " + messageType
-          + " transaction: " + origMsg.getTransactionId());
+      logger.debug("{}: Sending error message header type: {} transaction: {}", serverConnection.getName(), messageType, origMsg.getTransactionId());
     }
     chunkedResponseMsg.setMessageType(messageType);
     chunkedResponseMsg.setTransactionId(origMsg.getTransactionId());
@@ -806,32 +729,31 @@ public abstract class BaseCommand implements Command {
 
     // Send actual error
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sending error message chunk: {}", servConn.getName(), message);
+      logger.debug("{}: Sending error message chunk: {}", serverConnection.getName(), message);
     }
     chunkedResponseMsg.setNumberOfParts(1);
     chunkedResponseMsg.setLastChunk(true);
     chunkedResponseMsg.addStringPart(message);
-    chunkedResponseMsg.sendChunk(servConn);
+    chunkedResponseMsg.sendChunk(serverConnection);
   }
 
-  protected static void writeFunctionResponseException(Message origMsg, int messageType,
-      String message, ServerConnection servConn, Throwable exception) throws IOException {
-    Throwable e = getClientException(servConn, exception);
-    ChunkedMessage functionResponseMsg = servConn.getFunctionResponseMessage();
-    ChunkedMessage chunkedResponseMsg = servConn.getChunkedResponseMessage();
+  protected static void writeFunctionResponseException(Message origMsg, int messageType, ServerConnection serverConnection, Throwable exception) throws IOException {
+    Throwable e = getClientException(serverConnection, exception);
+    ChunkedMessage functionResponseMsg = serverConnection.getFunctionResponseMessage();
+    ChunkedMessage chunkedResponseMsg = serverConnection.getChunkedResponseMessage();
     if (functionResponseMsg.headerHasBeenSent()) {
-      functionResponseMsg.setServerConnection(servConn);
+      functionResponseMsg.setServerConnection(serverConnection);
       functionResponseMsg.setNumberOfParts(2);
       functionResponseMsg.setLastChunkAndNumParts(true, 2);
       functionResponseMsg.addObjPart(e);
       functionResponseMsg.addStringPart(getExceptionTrace(e));
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Sending exception chunk while reply in progress: {}", servConn.getName(),
+        logger.debug("{}: Sending exception chunk while reply in progress: {}", serverConnection.getName(),
             e.getMessage(), e);
       }
-      functionResponseMsg.sendChunk(servConn);
+      functionResponseMsg.sendChunk(serverConnection);
     } else {
-      chunkedResponseMsg.setServerConnection(servConn);
+      chunkedResponseMsg.setServerConnection(serverConnection);
       chunkedResponseMsg.setMessageType(messageType);
       chunkedResponseMsg.setNumberOfParts(2);
       chunkedResponseMsg.setLastChunkAndNumParts(true, 2);
@@ -840,9 +762,9 @@ public abstract class BaseCommand implements Command {
       chunkedResponseMsg.addObjPart(e);
       chunkedResponseMsg.addStringPart(getExceptionTrace(e));
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Sending exception chunk: {}", servConn.getName(), e.getMessage(), e);
+        logger.debug("{}: Sending exception chunk: {}", serverConnection.getName(), e.getMessage(), e);
       }
-      chunkedResponseMsg.sendChunk(servConn);
+      chunkedResponseMsg.sendChunk(serverConnection);
     }
   }
 
@@ -898,14 +820,14 @@ public abstract class BaseCommand implements Command {
     Message requestMsg = null;
     try {
       requestMsg = servConn.getRequestMessage();
-      requestMsg.recv(servConn, MAX_INCOMING_DATA, incomingDataLimiter, incomingMsgLimiter);
+      requestMsg.recv(servConn, MAX_INCOMING_DATA, INCOMING_DATA_LIMITER, INCOMING_MSG_LIMITER);
       return requestMsg;
     } catch (EOFException eof) {
       handleEOFException(null, servConn, eof);
-      // TODO:Asif: Check if there is any need for explicitly returning
+      // TODO: Check if there is any need for explicitly returning
 
     } catch (InterruptedIOException e) { // Solaris only
-      handleInterruptedIOException(null, servConn, e);
+      handleInterruptedIOException(servConn, e);
 
     } catch (IOException e) {
       handleIOException(null, servConn, e);
@@ -930,7 +852,7 @@ public abstract class BaseCommand implements Command {
     fillAndSendRegisterInterestResponseChunks(region, riKey, interestType, false, policy, servConn);
   }
 
-  /*
+  /**
    * serializeValues is unused for clients < GFE_80
    */
   protected static void fillAndSendRegisterInterestResponseChunks(LocalRegion region, Object riKey,
@@ -959,20 +881,20 @@ public abstract class BaseCommand implements Command {
         // Not supported yet
         throw new InternalGemFireError(
             LocalizedStrings.BaseCommand_NOT_YET_SUPPORTED.toLocalizedString());
+
       case InterestType.FILTER_CLASS:
         throw new InternalGemFireError(
             LocalizedStrings.BaseCommand_NOT_YET_SUPPORTED.toLocalizedString());
-        // handleFilter(region, (String)riKey, policy);
-        // break;
-      case InterestType.REGULAR_EXPRESSION: {
+
+      case InterestType.REGULAR_EXPRESSION:
         String regEx = (String) riKey;
         if (regEx.equals(".*")) {
           handleAllKeys(region, policy, servConn);
         } else {
           handleRegEx(region, regEx, policy, servConn);
         }
-      }
         break;
+
       case InterestType.KEY:
         if (riKey.equals("ALL_KEYS")) {
           handleAllKeys(region, policy, servConn);
@@ -980,13 +902,13 @@ public abstract class BaseCommand implements Command {
           handleSingleton(region, riKey, policy, servConn);
         }
         break;
+
       default:
         throw new InternalGemFireError(
             LocalizedStrings.BaseCommand_UNKNOWN_INTEREST_TYPE.toLocalizedString());
     }
   }
 
-  @SuppressWarnings("rawtypes")
   private static void handleKeysValuesPolicy(LocalRegion region, Object riKey, int interestType,
       boolean serializeValues, ServerConnection servConn) throws IOException {
     if (riKey instanceof List) {
@@ -1002,9 +924,11 @@ public abstract class BaseCommand implements Command {
       case InterestType.OQL_QUERY:
         throw new InternalGemFireError(
             LocalizedStrings.BaseCommand_NOT_YET_SUPPORTED.toLocalizedString());
+
       case InterestType.FILTER_CLASS:
         throw new InternalGemFireError(
             LocalizedStrings.BaseCommand_NOT_YET_SUPPORTED.toLocalizedString());
+
       case InterestType.REGULAR_EXPRESSION:
         String regEx = (String) riKey;
         if (regEx.equals(".*")) {
@@ -1013,6 +937,7 @@ public abstract class BaseCommand implements Command {
           handleKVAllKeys(region, regEx, serializeValues, servConn);
         }
         break;
+
       case InterestType.KEY:
         if (riKey.equals("ALL_KEYS")) {
           handleKVAllKeys(region, null, serializeValues, servConn);
@@ -1020,6 +945,7 @@ public abstract class BaseCommand implements Command {
           handleKVSingleton(region, riKey, serializeValues, servConn);
         }
         break;
+
       default:
         throw new InternalGemFireError(
             LocalizedStrings.BaseCommand_UNKNOWN_INTEREST_TYPE.toLocalizedString());
@@ -1029,18 +955,14 @@ public abstract class BaseCommand implements Command {
   /**
    * @param list is a List of entry keys
    */
-  protected static void sendRegisterInterestResponseChunk(Region region, Object riKey,
-      ArrayList list, boolean lastChunk, ServerConnection servConn) throws IOException {
+  private static void sendRegisterInterestResponseChunk(Region region, Object riKey, List list, boolean lastChunk, ServerConnection servConn) throws IOException {
     ChunkedMessage chunkedResponseMsg = servConn.getRegisterInterestResponseMessage();
     chunkedResponseMsg.setNumberOfParts(1);
     chunkedResponseMsg.setLastChunk(lastChunk);
-    chunkedResponseMsg.addObjPart(list, zipValues);
-    String regionName = (region == null) ? " null " : region.getFullPath();
+    chunkedResponseMsg.addObjPart(list, false);
+    String regionName = region == null ? " null " : region.getFullPath();
     if (logger.isDebugEnabled()) {
-      String str = servConn.getName() + ": Sending" + (lastChunk ? " last " : " ")
-          + "register interest response chunk for region: " + regionName + " for keys: " + riKey
-          + " chunk=<" + chunkedResponseMsg + ">";
-      logger.debug(str);
+      logger.debug("{}: Sending{}register interest response chunk for region: {} for keys: {} chunk=<{}>", servConn.getName(), lastChunk ? " last " : " ", regionName, riKey, chunkedResponseMsg);
     }
 
     chunkedResponseMsg.sendChunk(servConn);
@@ -1050,14 +972,12 @@ public abstract class BaseCommand implements Command {
    * Determines whether keys for destroyed entries (tombstones) should be sent to clients in
    * register-interest results.
    * 
-   * @param servConn
-   * @param policy
    * @return true if tombstones should be sent to the client
    */
   private static boolean sendTombstonesInRIResults(ServerConnection servConn,
       InterestResultPolicy policy) {
-    return (policy == InterestResultPolicy.KEYS_VALUES)
-        && (servConn.getClientVersion().compareTo(Version.GFE_80) >= 0);
+    return policy == InterestResultPolicy.KEYS_VALUES
+           && servConn.getClientVersion().compareTo(Version.GFE_80) >= 0;
   }
 
   /**
@@ -1066,7 +986,6 @@ public abstract class BaseCommand implements Command {
    * @param region the region
    * @param keyList the list of keys
    * @param policy the policy
-   * @throws IOException
    */
   private static void handleList(LocalRegion region, List keyList, InterestResultPolicy policy,
       ServerConnection servConn) throws IOException {
@@ -1075,15 +994,13 @@ public abstract class BaseCommand implements Command {
       handleListPR((PartitionedRegion) region, keyList, policy, servConn);
       return;
     }
-    ArrayList newKeyList = new ArrayList(maximumChunkSize);
+    List newKeyList = new ArrayList(MAXIMUM_CHUNK_SIZE);
     // Handle list of keys
     if (region != null) {
-      for (Iterator it = keyList.iterator(); it.hasNext();) {
-        Object entryKey = it.next();
-        if (region.containsKey(entryKey) || (sendTombstonesInRIResults(servConn, policy)
-            && region.containsTombstone(entryKey))) {
+      for (Object entryKey : keyList) {
+        if (region.containsKey(entryKey) || sendTombstonesInRIResults(servConn, policy) && region.containsTombstone(entryKey)) {
 
-          appendInterestResponseKey(region, keyList, entryKey, newKeyList, "list", servConn);
+          appendInterestResponseKey(region, keyList, entryKey, newKeyList, servConn);
         }
       }
     }
@@ -1095,13 +1012,11 @@ public abstract class BaseCommand implements Command {
   /**
    * Handles both RR and PR cases
    */
-  @SuppressWarnings("rawtypes")
-  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_PARAM_DEREF",
+  @SuppressWarnings(value = "NP_NULL_PARAM_DEREF",
       justification = "Null value handled in sendNewRegisterInterestResponseChunk()")
   private static void handleKVSingleton(LocalRegion region, Object entryKey,
       boolean serializeValues, ServerConnection servConn) throws IOException {
-    VersionedObjectList values = new VersionedObjectList(maximumChunkSize, true,
-        region == null ? true : region.getAttributes().getConcurrencyChecksEnabled(),
+    VersionedObjectList values = new VersionedObjectList(MAXIMUM_CHUNK_SIZE, true, region == null || region.getAttributes().getConcurrencyChecksEnabled(),
         serializeValues);
 
     if (region != null) {
@@ -1126,15 +1041,14 @@ public abstract class BaseCommand implements Command {
    * @param region the region
    * @param entryKey the key
    * @param policy the policy
-   * @throws IOException
    */
   private static void handleSingleton(LocalRegion region, Object entryKey,
       InterestResultPolicy policy, ServerConnection servConn) throws IOException {
-    ArrayList keyList = new ArrayList(1);
+    List keyList = new ArrayList(1);
     if (region != null) {
       if (region.containsKey(entryKey)
-          || (sendTombstonesInRIResults(servConn, policy) && region.containsTombstone(entryKey))) {
-        appendInterestResponseKey(region, entryKey, entryKey, keyList, "individual", servConn);
+          || sendTombstonesInRIResults(servConn, policy) && region.containsTombstone(entryKey)) {
+        appendInterestResponseKey(region, entryKey, entryKey, keyList, servConn);
       }
     }
     // Send the last chunk (the only chunk for individual and list keys)
@@ -1147,15 +1061,13 @@ public abstract class BaseCommand implements Command {
    *
    * @param region the region
    * @param policy the policy
-   * @throws IOException
    */
   private static void handleAllKeys(LocalRegion region, InterestResultPolicy policy,
       ServerConnection servConn) throws IOException {
-    ArrayList keyList = new ArrayList(maximumChunkSize);
+    List keyList = new ArrayList(MAXIMUM_CHUNK_SIZE);
     if (region != null) {
-      for (Iterator it = region.keySet(sendTombstonesInRIResults(servConn, policy)).iterator(); it
-          .hasNext();) {
-        appendInterestResponseKey(region, "ALL_KEYS", it.next(), keyList, "ALL_KEYS", servConn);
+      for (Object entryKey : region.keySet(sendTombstonesInRIResults(servConn, policy))) {
+        appendInterestResponseKey(region, "ALL_KEYS", entryKey, keyList, servConn);
       }
     }
     // Send the last chunk (the only chunk for individual and list keys)
@@ -1163,30 +1075,19 @@ public abstract class BaseCommand implements Command {
     sendRegisterInterestResponseChunk(region, "ALL_KEYS", keyList, true, servConn);
   }
 
-  /**
-   * @param region
-   * @param regex
-   * @param serializeValues
-   * @param servConn
-   * @throws IOException
-   */
   private static void handleKVAllKeys(LocalRegion region, String regex, boolean serializeValues,
       ServerConnection servConn) throws IOException {
 
-    if (region != null && region instanceof PartitionedRegion) {
+    if (region instanceof PartitionedRegion) {
       handleKVKeysPR((PartitionedRegion) region, regex, serializeValues, servConn);
       return;
     }
 
-    VersionedObjectList values = new VersionedObjectList(maximumChunkSize, true,
-        region == null ? true : region.getAttributes().getConcurrencyChecksEnabled(),
+    VersionedObjectList values = new VersionedObjectList(MAXIMUM_CHUNK_SIZE, true, region == null || region.getAttributes().getConcurrencyChecksEnabled(),
         serializeValues);
 
     if (region != null) {
 
-      VersionTag versionTag = null;
-      Object data = null;
-
       Pattern keyPattern = null;
       if (regex != null) {
         keyPattern = Pattern.compile(regex);
@@ -1207,11 +1108,11 @@ public abstract class BaseCommand implements Command {
         }
 
         ClientProxyMembershipID id = servConn == null ? null : servConn.getProxyID();
-        data = region.get(key, null, true, true, true, id, versionHolder, true);
-        versionTag = versionHolder.getVersionTag();
+        Object data = region.get(key, null, true, true, true, id, versionHolder, true);
+        VersionTag versionTag = versionHolder.getVersionTag();
         updateValues(values, key, data, versionTag);
 
-        if (values.size() == maximumChunkSize) {
+        if (values.size() == MAXIMUM_CHUNK_SIZE) {
           sendNewRegisterInterestResponseChunk(region, regex != null ? regex : "ALL_KEYS", values,
               false, servConn);
           values.clear();
@@ -1227,20 +1128,18 @@ public abstract class BaseCommand implements Command {
 
   private static void handleKVKeysPR(PartitionedRegion region, Object keyInfo,
       boolean serializeValues, ServerConnection servConn) throws IOException {
-    int id = 0;
-    HashMap<Integer, HashSet> bucketKeys = null;
 
-    VersionedObjectList values = new VersionedObjectList(maximumChunkSize, true,
+    VersionedObjectList values = new VersionedObjectList(MAXIMUM_CHUNK_SIZE, true,
         region.getConcurrencyChecksEnabled(), serializeValues);
 
-    if (keyInfo != null && keyInfo instanceof List) {
-      bucketKeys = new HashMap<Integer, HashSet>();
+    if (keyInfo instanceof List) {
+      HashMap<Integer, HashSet> bucketKeys = new HashMap<>();
       for (Object key : (List) keyInfo) {
-        id = PartitionedRegionHelper.getHashKey(region, null, key, null, null);
+        int id = PartitionedRegionHelper.getHashKey(region, null, key, null, null);
         if (bucketKeys.containsKey(id)) {
           bucketKeys.get(id).add(key);
         } else {
-          HashSet<Object> keys = new HashSet<Object>();
+          HashSet<Object> keys = new HashSet<>();
           keys.add(key);
           bucketKeys.put(id, keys);
         }
@@ -1259,8 +1158,6 @@ public abstract class BaseCommand implements Command {
   /**
    * Copied from Get70.getValueAndIsObject(), except a minor change. (Make the method static instead
    * of copying it here?)
-   * 
-   * @param value
    */
   private static void updateValues(VersionedObjectList values, Object key, Object value,
       VersionTag versionTag) {
@@ -1274,8 +1171,7 @@ public abstract class BaseCommand implements Command {
     boolean wasInvalid = false;
     if (value instanceof CachedDeserializable) {
       value = ((CachedDeserializable) value).getValue();
-    } else if (value == Token.REMOVED_PHASE1 || value == Token.REMOVED_PHASE2
-        || value == Token.DESTROYED || value == Token.TOMBSTONE) {
+    } else if (isRemovalToken(value)) {
       value = null;
     } else if (value == Token.INVALID || value == Token.LOCAL_INVALID) {
       value = null; // fix for bug 35884
@@ -1292,46 +1188,39 @@ public abstract class BaseCommand implements Command {
     }
   }
 
+  private static boolean isRemovalToken(final Object value) {
+    return value == Token.REMOVED_PHASE1 || value == Token.REMOVED_PHASE2
+           || value == Token.DESTROYED || value == Token.TOMBSTONE;
+  }
+
   public static void appendNewRegisterInterestResponseChunkFromLocal(LocalRegion region,
       VersionedObjectList values, Object riKeys, Set keySet, ServerConnection servConn)
       throws IOException {
     ClientProxyMembershipID requestingClient = servConn == null ? null : servConn.getProxyID();
-    for (Iterator it = keySet.iterator(); it.hasNext();) {
-      Object key = it.next();
+    for (Object key : keySet) {
       VersionTagHolder versionHolder = createVersionTagHolder();
 
       Object value = region.get(key, null, true, true, true, requestingClient, versionHolder, true);
 
       updateValues(values, key, value, versionHolder.getVersionTag());
 
-      if (values.size() == maximumChunkSize) {
+      if (values.size() == MAXIMUM_CHUNK_SIZE) {
         // Send the chunk and clear the list
         // values.setKeys(null); // Now we need to send keys too.
-        sendNewRegisterInterestResponseChunk(region, riKeys != null ? riKeys : "ALL_KEYS", values,
-            false, servConn);
+        sendNewRegisterInterestResponseChunk(region, riKeys != null ? riKeys : "ALL_KEYS", values, false, servConn);
         values.clear();
       }
     } // for
   }
 
-  /**
-   * 
-   * @param region
-   * @param values {@link VersionedObjectList}
-   * @param riKeys
-   * @param set set of entries
-   * @param servConn
-   * @throws IOException
-   */
   public static void appendNewRegisterInterestResponseChunk(LocalRegion region,
-      VersionedObjectList values, Object riKeys, Set set, ServerConnection servConn)
+      VersionedObjectList values, Object riKeys, Set<Map.Entry> set, ServerConnection servConn)
       throws IOException {
-    for (Iterator<Map.Entry> it = set.iterator(); it.hasNext();) {
-      Map.Entry entry = it.next(); // Region.Entry or Map.Entry
+    for (Entry entry : set) {
       if (entry instanceof Region.Entry) { // local entries
-        VersionTag vt = null;
-        Object key = null;
-        Object value = null;
+        VersionTag vt;
+        Object key;
+        Object value;
         if (entry instanceof EntrySnapshot) {
           vt = ((EntrySnapshot) entry).getVersionTag();
           key = ((EntrySnapshot) entry).getRegionEntry().getKey();
@@ -1349,16 +1238,14 @@ public abstract class BaseCommand implements Command {
           }
         }
       } else { // Map.Entry (remote entries)
-        ArrayList list = (ArrayList) entry.getValue();
+        List list = (List) entry.getValue();
         Object value = list.get(0);
         VersionTag tag = (VersionTag) list.get(1);
         updateValues(values, entry.getKey(), value, tag);
       }
-      if (values.size() == maximumChunkSize) {
+      if (values.size() == MAXIMUM_CHUNK_SIZE) {
         // Send the chunk and clear the list
-        // values.setKeys(null); // Now we need to send keys too.
-        sendNewRegisterInterestResponseChunk(region, riKeys != null ? riKeys : "ALL_KEYS", values,
-            false, servConn);
+        sendNewRegisterInterestResponseChunk(region, riKeys != null ? riKeys : "ALL_KEYS", values, false, servConn);
         values.clear();
       }
     } // for
@@ -1369,25 +1256,18 @@ public abstract class BaseCommand implements Command {
     ChunkedMessage chunkedResponseMsg = servConn.getRegisterInterestResponseMessage();
     chunkedResponseMsg.setNumberOfParts(1);
     chunkedResponseMsg.setLastChunk(lastChunk);
-    chunkedResponseMsg.addObjPart(list, zipValues);
-    String regionName = (region == null) ? " null " : region.getFullPath();
+    chunkedResponseMsg.addObjPart(list, false);
+    String regionName = region == null ? " null " : region.getFullPath();
     if (logger.isDebugEnabled()) {
-      String str = servConn.getName() + ": Sending" + (lastChunk ? " last " : " ")
-          + "register interest response chunk for region: " + regionName + " for keys: " + riKey
-          + " chunk=<" + chunkedResponseMsg + ">";
-      logger.debug(str);
+      logger.debug("{}: Sending{}register interest response chunk for region: {} for keys: {} chunk=<{}>",
+        servConn.getName(), lastChunk ? " last " : " ", regionName, riKey, chunkedResponseMsg
+        );
     }
-
     chunkedResponseMsg.sendChunk(servConn);
   }
 
   /**
    * Process an interest request of type {@link InterestType#REGULAR_EXPRESSION}
-   *
-   * @param region the region
-   * @param regex the regex
-   * @param policy the policy
-   * @throws IOException
    */
   private static void handleRegEx(LocalRegion region, String regex, InterestResultPolicy policy,
       ServerConnection servConn) throws IOException {
@@ -1396,13 +1276,11 @@ public abstract class BaseCommand implements Command {
       handleRegExPR((PartitionedRegion) region, regex, policy, servConn);
       return;
     }
-    ArrayList keyList = new ArrayList(maximumChunkSize);
+    List keyList = new ArrayList(MAXIMUM_CHUNK_SIZE);
     // Handle the regex pattern
-    Pattern keyPattern = Pattern.compile(regex);
     if (region != null) {
-      for (Iterator it = region.keySet(sendTombstonesInRIResults(servConn, policy)).iterator(); it
-          .hasNext();) {
-        Object entryKey = it.next();
+      Pattern keyPattern = Pattern.compile(regex);
+      for (Object entryKey : region.keySet(sendTombstonesInRIResults(servConn, policy))) {
         if (!(entryKey instanceof String)) {
           // key is not a String, cannot apply regex to this entry
           continue;
@@ -1412,7 +1290,7 @@ public abstract class BaseCommand implements Command {
           continue;
         }
 
-        appendInterestResponseKey(region, regex, entryKey, keyList, "regex", servConn);
+        appendInterestResponseKey(region, regex, entryKey, keyList, servConn);
       }
     }
     // Send the last chunk (the only chunk for individual and list keys)
@@ -1422,19 +1300,15 @@ public abstract class BaseCommand implements Command {
 
   /**
    * Process an interest request of type {@link InterestType#REGULAR_EXPRESSION}
-   *
-   * @param region the region
-   * @param regex the regex
-   * @param policy the policy
-   * @throws IOException
    */
   private static void handleRegExPR(final PartitionedRegion region, final String regex,
       final InterestResultPolicy policy, final ServerConnection servConn) throws IOException {
-    final ArrayList keyList = new ArrayList(maximumChunkSize);
+    final List keyList = new ArrayList(MAXIMUM_CHUNK_SIZE);
     region.getKeysWithRegEx(regex, sendTombstonesInRIResults(servConn, policy),
         new PartitionedRegion.SetCollector() {
+          @Override
           public void receiveSet(Set theSet) throws IOException {
-            appendInterestResponseKeys(region, regex, theSet, keyList, "regex", servConn);
+            appendInterestResponseKeys(region, regex, theSet, keyList, servConn);
           }
         });
     // Send the last chunk (the only chunk for individual and list keys)
@@ -1444,19 +1318,15 @@ public abstract class BaseCommand implements Command {
 
   /**
    * Process an interest request involving a list of keys
-   *
-   * @param region the region
-   * @param keyList the list of keys
-   * @param policy the policy
-   * @throws IOException
    */
   private static void handleListPR(final PartitionedRegion region, final List keyList,
       final InterestResultPolicy policy, final ServerConnection servConn) throws IOException {
-    final ArrayList newKeyList = new ArrayList(maximumChunkSize);
+    final List newKeyList = new ArrayList(MAXIMUM_CHUNK_SIZE);
     region.getKeysWithList(keyList, sendTombstonesInRIResults(servConn, policy),
         new PartitionedRegion.SetCollector() {
+          @Override
           public void receiveSet(Set theSet) throws IOException {
-            appendInterestResponseKeys(region, keyList, theSet, newKeyList, "list", servConn);
+            appendInterestResponseKeys(region, keyList, theSet, newKeyList, servConn);
           }
         });
     // Send the last chunk (the only chunk for individual and list keys)
@@ -1464,34 +1334,29 @@ public abstract class BaseCommand implements Command {
     sendRegisterInterestResponseChunk(region, keyList, newKeyList, true, servConn);
   }
 
-  @SuppressWarnings("rawtypes")
   private static void handleKVList(final LocalRegion region, final List keyList,
       boolean serializeValues, final ServerConnection servConn) throws IOException {
 
-    if (region != null && region instanceof PartitionedRegion) {
+    if (region instanceof PartitionedRegion) {
       handleKVKeysPR((PartitionedRegion) region, keyList, serializeValues, servConn);
       return;
     }
-    VersionedObjectList values = new VersionedObjectList(maximumChunkSize, true,
-        region == null ? true : region.getAttributes().getConcurrencyChecksEnabled(),
+    VersionedObjectList values = new VersionedObjectList(MAXIMUM_CHUNK_SIZE, true, region == null || region.getAttributes().getConcurrencyChecksEnabled(),
         serializeValues);
 
     // Handle list of keys
     if (region != null) {
-      VersionTag versionTag = null;
-      Object data = null;
 
-      for (Iterator it = keyList.iterator(); it.hasNext();) {
-        Object key = it.next();
+      for (Object key : keyList) {
         if (region.containsKey(key) || region.containsTombstone(key)) {
           VersionTagHolder versionHolder = createVersionTagHolder();
 
           ClientProxyMembershipID id = servConn == null ? null : servConn.getProxyID();
-          data = region.get(key, null, true, true, true, id, versionHolder, true);
-          versionTag = versionHolder.getVersionTag();
+          Object data = region.get(key, null, true, true, true, id, versionHolder, true);
+          VersionTag versionTag = versionHolder.getVersionTag();
           updateValues(values, key, data, versionTag);
 
-          if (values.size() == maximumChunkSize) {
+          if (values.size() == MAXIMUM_CHUNK_SIZE) {
             // Send the chunk and clear the list
             // values.setKeys(null); // Now we need to send keys too.
             sendNewRegisterInterestResponseChunk(region, keyList, values, false, servConn);
@@ -1518,27 +1383,24 @@ public abstract class BaseCommand implements Command {
    * @param riKey the registerInterest "key" (what the client is interested in)
    * @param entryKey key we're responding to
    * @param list list to append to
-   * @param kind for debugging
    */
-  private static void appendInterestResponseKey(LocalRegion region, Object riKey, Object entryKey,
-      ArrayList list, String kind, ServerConnection servConn) throws IOException {
+  private static void appendInterestResponseKey(LocalRegion region, Object riKey, Object entryKey, List list, ServerConnection servConn) throws IOException {
     list.add(entryKey);
     if (logger.isDebugEnabled()) {
       logger.debug("{}: appendInterestResponseKey <{}>; list size was {}; region: {}",
           servConn.getName(), entryKey, list.size(), region.getFullPath());
     }
-    if (list.size() == maximumChunkSize) {
+    if (list.size() == MAXIMUM_CHUNK_SIZE) {
       // Send the chunk and clear the list
       sendRegisterInterestResponseChunk(region, riKey, list, false, servConn);
       list.clear();
     }
   }
 
-  protected static void appendInterestResponseKeys(LocalRegion region, Object riKey,
-      Collection entryKeys, ArrayList collector, String riDescr, ServerConnection servConn)
+  private static void appendInterestResponseKeys(LocalRegion region, Object riKey, Collection entryKeys, List collector, ServerConnection servConn)
       throws IOException {
-    for (Iterator it = entryKeys.iterator(); it.hasNext();) {
-      appendInterestResponseKey(region, riKey, it.next(), collector, riDescr, servConn);
+    for (final Object entryKey : entryKeys) {
+      appendInterestResponseKey(region, riKey, entryKey, collector, servConn);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommandQuery.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommandQuery.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommandQuery.java
index 5f7a8ef..adf702a 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommandQuery.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommandQuery.java
@@ -193,11 +193,11 @@ public abstract class BaseCommandQuery extends BaseCommand {
           }
         }
 
-        int numberOfChunks = (int) Math.ceil(selectResults.size() * 1.0 / maximumChunkSize);
+        int numberOfChunks = (int) Math.ceil(selectResults.size() * 1.0 / MAXIMUM_CHUNK_SIZE);
 
         if (logger.isTraceEnabled()) {
           logger.trace("{}: Query results size: {}: Entries in chunk: {}: Number of chunks: {}",
-              servConn.getName(), selectResults.size(), maximumChunkSize, numberOfChunks);
+              servConn.getName(), selectResults.size(), MAXIMUM_CHUNK_SIZE, numberOfChunks);
         }
 
         long oldStart = start;
@@ -262,7 +262,7 @@ public abstract class BaseCommandQuery extends BaseCommand {
       QueryInvalidException qie =
           new QueryInvalidException(LocalizedStrings.BaseCommand_0_QUERYSTRING_IS_1
               .toLocalizedString(new Object[] {e.getLocalizedMessage(), queryString}));
-      writeQueryResponseException(msg, qie, false, servConn);
+      writeQueryResponseException(msg, qie, servConn);
       return false;
     } catch (DistributedSystemDisconnectedException se) {
       if (msg != null && logger.isDebugEnabled()) {
@@ -282,7 +282,7 @@ public abstract class BaseCommandQuery extends BaseCommand {
       if ((defaultQuery).isCanceled()) {
         e = new QueryException(defaultQuery.getQueryCanceledException().getMessage(), e.getCause());
       }
-      writeQueryResponseException(msg, e, false, servConn);
+      writeQueryResponseException(msg, e, servConn);
       return false;
     } finally {
       // Since the query object is being shared in case of bind queries,
@@ -375,8 +375,8 @@ public abstract class BaseCommandQuery extends BaseCommand {
       if (logger.isTraceEnabled()) {
         logger.trace("{}: Creating chunk: {}", servConn.getName(), j);
       }
-      Object[] results = new Object[maximumChunkSize];
-      for (int i = 0; i < maximumChunkSize; i++) {
+      Object[] results = new Object[MAXIMUM_CHUNK_SIZE];
+      for (int i = 0; i < MAXIMUM_CHUNK_SIZE; i++) {
         if ((resultIndex) == selectResults.size()) {
           incompleteArray = true;
           break;
@@ -427,9 +427,9 @@ public abstract class BaseCommandQuery extends BaseCommand {
       if (incompleteArray) {
         Object[] newResults;
         if (cqQuery != null) {
-          newResults = new Object[cqResultIndex % maximumChunkSize];
+          newResults = new Object[cqResultIndex % MAXIMUM_CHUNK_SIZE];
         } else {
-          newResults = new Object[resultIndex % maximumChunkSize];
+          newResults = new Object[resultIndex % MAXIMUM_CHUNK_SIZE];
         }
         for (int i = 0; i < newResults.length; i++) {
           newResults[i] = results[i];
@@ -463,8 +463,8 @@ public abstract class BaseCommandQuery extends BaseCommand {
       if (logger.isTraceEnabled()) {
         logger.trace("{}: Creating chunk: {}", servConn.getName(), j);
       }
-      ObjectPartList serializedObjs = new ObjectPartList(maximumChunkSize, false);
-      for (int i = 0; i < maximumChunkSize; i++) {
+      ObjectPartList serializedObjs = new ObjectPartList(MAXIMUM_CHUNK_SIZE, false);
+      for (int i = 0; i < MAXIMUM_CHUNK_SIZE; i++) {
         if ((resultIndex) == objs.size()) {
           break;
         }


[18/43] geode git commit: Add ServerLauncherUtils and CacheServerUtils

Posted by kl...@apache.org.
Add ServerLauncherUtils and CacheServerUtils


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/cbfc667b
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/cbfc667b
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/cbfc667b

Branch: refs/heads/feature/GEODE-2632-17
Commit: cbfc667bfb171e37fe664d0cd20d96350715139f
Parents: c5031d1
Author: Kirk Lund <kl...@apache.org>
Authored: Fri May 19 14:57:44 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Tue May 30 10:21:08 2017 -0700

----------------------------------------------------------------------
 .../geode/distributed/ServerLauncherUtils.java  | 30 +++++++++++
 .../cache/tier/sockets/CacheServerUtils.java    | 55 ++++++++++++++++++++
 2 files changed, 85 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/cbfc667b/geode-core/src/test/java/org/apache/geode/distributed/ServerLauncherUtils.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/distributed/ServerLauncherUtils.java b/geode-core/src/test/java/org/apache/geode/distributed/ServerLauncherUtils.java
new file mode 100644
index 0000000..017e0f5
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/distributed/ServerLauncherUtils.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.distributed;
+
+import org.apache.geode.cache.Cache;
+
+/**
+ * Provides tests a way to access non-public state in ServerLauncher
+ */
+public class ServerLauncherUtils {
+
+  /**
+   * Returns the Cache from an online in-process ServerLauncher instance
+   */
+  public static Cache getCache(final ServerLauncher serverLauncher) {
+    return serverLauncher.getCache();
+  }
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/cbfc667b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/CacheServerUtils.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/CacheServerUtils.java b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/CacheServerUtils.java
new file mode 100644
index 0000000..8cd7622
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/CacheServerUtils.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache.tier.sockets;
+
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.server.CacheServer;
+import org.apache.geode.internal.cache.CacheServerImpl;
+
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Provides tests a way to access CacheServer, AcceptorImpl and ServerConnection
+ */
+public class CacheServerUtils {
+
+  /**
+   * Returns single CacheServer for the specified Cache instance
+   */
+  public static CacheServer getCacheServer(final Cache cache) {
+    List<CacheServer> cacheServers = cache.getCacheServers();
+    CacheServer cacheServer = cacheServers.get(0);
+    return cacheServer;
+  }
+
+  /**
+   * Returns AcceptorImpl for the specified CacheServer instance
+   */
+  public static AcceptorImpl getAcceptorImpl(final CacheServer cacheServer) {
+    AcceptorImpl acceptor = ((CacheServerImpl) cacheServer).getAcceptor();
+    return acceptor;
+  }
+
+  /**
+   * Returns single ServerConnection for the specified CacheServer instance
+   */
+  public static ServerConnection getServerConnection(final CacheServer cacheServer) {
+    AcceptorImpl acceptor = ((CacheServerImpl) cacheServer).getAcceptor();
+    Set<ServerConnection> serverConnections = acceptor.getAllServerConnections();
+    ServerConnection serverConnection = serverConnections.iterator().next(); // null
+    return serverConnection;
+  }
+}


[36/43] geode git commit: Do NOT close HeapDataOutputStream that is passed to Part

Posted by kl...@apache.org.
Do NOT close HeapDataOutputStream that is passed to Part


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/b8e41f7f
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/b8e41f7f
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/b8e41f7f

Branch: refs/heads/feature/GEODE-2632-17
Commit: b8e41f7fd24cfccc769a5769ea383ffdce8e8975
Parents: 659b9d4
Author: Kirk Lund <kl...@apache.org>
Authored: Tue May 23 14:46:34 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Tue May 30 10:21:10 2017 -0700

----------------------------------------------------------------------
 .../internal/cache/tier/sockets/Message.java    | 53 +++++++-------------
 1 file changed, 18 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/b8e41f7f/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/Message.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/Message.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/Message.java
index 2ac6fea..1f9ef91 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/Message.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/Message.java
@@ -14,8 +14,6 @@
  */
 package org.apache.geode.internal.cache.tier.sockets;
 
-import static org.apache.geode.internal.util.IOUtils.close;
-
 import org.apache.geode.SerializationException;
 import org.apache.geode.distributed.internal.DistributionConfig;
 import org.apache.geode.internal.Assert;
@@ -111,26 +109,20 @@ public class Message {
   private static final byte[] FALSE = defineFalse();
 
   private static byte[] defineTrue() {
-    HeapDataOutputStream hdos = new HeapDataOutputStream(10, null);
-    try {
+    try (HeapDataOutputStream hdos = new HeapDataOutputStream(10, null)) {
       BlobHelper.serializeTo(Boolean.TRUE, hdos);
       return hdos.toByteArray();
     } catch (IOException e) {
       throw new IllegalStateException(e);
-    } finally {
-      close(hdos);
     }
   }
 
   private static byte[] defineFalse() {
-    HeapDataOutputStream hdos = new HeapDataOutputStream(10, null);
-    try {
+    try (HeapDataOutputStream hdos = new HeapDataOutputStream(10, null)) {
       BlobHelper.serializeTo(Boolean.FALSE, hdos);
       return hdos.toByteArray();
     } catch (IOException e) {
       throw new IllegalStateException(e);
-    } finally {
-      close(hdos);
     }
   }
 
@@ -288,23 +280,17 @@ public class Message {
     if (enableCaching) {
       byte[] bytes = CACHED_STRINGS.get(str);
       if (bytes == null) {
-        HeapDataOutputStream hdos = new HeapDataOutputStream(str);
-        try {
+        try (HeapDataOutputStream hdos = new HeapDataOutputStream(str)) {
           bytes = hdos.toByteArray();
           CACHED_STRINGS.put(str, bytes);
-        } finally {
-          close(hdos);
         }
       }
       part.setPartState(bytes, false);
+
     } else {
-      HeapDataOutputStream hdos = new HeapDataOutputStream(str);
-      try {
-        this.messageModified = true;
-        part.setPartState(hdos, false);
-      } finally {
-        close(hdos);
-      }
+      // do NOT close the HeapDataOutputStream
+      this.messageModified = true;
+      part.setPartState(new HeapDataOutputStream(str), false);
     }
     this.currentPart++;
   }
@@ -380,20 +366,18 @@ public class Message {
       v = null;
     }
 
-    // create the HDOS with a flag telling it that it can keep any byte[] or ByteBuffers/ByteSources
-    // passed to it.
+    // Create the HDOS with a flag telling it that it can keep any byte[] or ByteBuffers/ByteSources
+    // passed to it. Do NOT close the HeapDataOutputStream!
     HeapDataOutputStream hdos = new HeapDataOutputStream(this.chunkSize, v, true);
     try {
       BlobHelper.serializeTo(o, hdos);
-      this.messageModified = true;
-      Part part = this.partsList[this.currentPart];
-      part.setPartState(hdos, true);
-      this.currentPart++;
     } catch (IOException ex) {
       throw new SerializationException("failed serializing object", ex);
-    } finally {
-      close(hdos);
     }
+    this.messageModified = true;
+    Part part = this.partsList[this.currentPart];
+    part.setPartState(hdos, true);
+    this.currentPart++;
   }
 
   private void serializeAndAddPart(Object o, boolean zipValues) {
@@ -406,18 +390,17 @@ public class Message {
       v = null;
     }
 
+    // do NOT close the HeapDataOutputStream
     HeapDataOutputStream hdos = new HeapDataOutputStream(this.chunkSize, v);
     try {
       BlobHelper.serializeTo(o, hdos);
-      this.messageModified = true;
-      Part part = this.partsList[this.currentPart];
-      part.setPartState(hdos, true);
-      this.currentPart++;
     } catch (IOException ex) {
       throw new SerializationException("failed serializing object", ex);
-    } finally {
-      close(hdos);
     }
+    this.messageModified = true;
+    Part part = this.partsList[this.currentPart];
+    part.setPartState(hdos, true);
+    this.currentPart++;
   }
 
   public void addIntPart(int v) {


[21/43] geode git commit: Cleanup BaseCommand

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put65.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put65.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put65.java
index d53c89e..581aec6 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put65.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put65.java
@@ -63,7 +63,7 @@ public class Put65 extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long p_start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long p_start)
       throws IOException, InterruptedException {
     long start = p_start;
     Part regionNamePart = null, keyPart = null, valuePart = null, callbackArgPart = null;
@@ -72,11 +72,11 @@ public class Put65 extends BaseCommand {
     Part eventPart = null;
     StringBuffer errMessage = new StringBuffer();
     boolean isDelta = false;
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    CacheServerStats stats = servConn.getCacheServerStats();
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
 
     // requiresResponse = true;
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
     {
       long oldStart = start;
       start = DistributionStats.getStatTime();
@@ -84,50 +84,50 @@ public class Put65 extends BaseCommand {
     }
     // Retrieve the data from the message parts
     int idx = 0;
-    regionNamePart = msg.getPart(idx++);
+    regionNamePart = clientMessage.getPart(idx++);
     Operation operation;
     try {
-      operation = (Operation) msg.getPart(idx++).getObject();
+      operation = (Operation) clientMessage.getPart(idx++).getObject();
       if (operation == null) { // native clients send a null since the op is java-serialized
         operation = Operation.UPDATE;
       }
     } catch (ClassNotFoundException e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
-    int flags = msg.getPart(idx++).getInt();
+    int flags = clientMessage.getPart(idx++).getInt();
     boolean requireOldValue = ((flags & 0x01) == 0x01);
     boolean haveExpectedOldValue = ((flags & 0x02) == 0x02);
     Object expectedOldValue = null;
     if (haveExpectedOldValue) {
       try {
-        expectedOldValue = msg.getPart(idx++).getObject();
+        expectedOldValue = clientMessage.getPart(idx++).getObject();
       } catch (ClassNotFoundException e) {
-        writeException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeException(clientMessage, e, false, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
     }
-    keyPart = msg.getPart(idx++);
+    keyPart = clientMessage.getPart(idx++);
     try {
-      isDelta = ((Boolean) msg.getPart(idx).getObject()).booleanValue();
+      isDelta = ((Boolean) clientMessage.getPart(idx).getObject()).booleanValue();
       idx += 1;
     } catch (Exception e) {
-      writeException(msg, MessageType.PUT_DELTA_ERROR, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, MessageType.PUT_DELTA_ERROR, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       // CachePerfStats not available here.
       return;
     }
-    valuePart = msg.getPart(idx++);
-    eventPart = msg.getPart(idx++);
-    if (msg.getNumberOfParts() > idx) {
-      callbackArgPart = msg.getPart(idx++);
+    valuePart = clientMessage.getPart(idx++);
+    eventPart = clientMessage.getPart(idx++);
+    if (clientMessage.getNumberOfParts() > idx) {
+      callbackArgPart = clientMessage.getPart(idx++);
       try {
         callbackArg = callbackArgPart.getObject();
       } catch (Exception e) {
-        writeException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeException(clientMessage, e, false, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
     }
@@ -136,8 +136,8 @@ public class Put65 extends BaseCommand {
     try {
       key = keyPart.getStringOrObject();
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -145,8 +145,8 @@ public class Put65 extends BaseCommand {
     if (isDebugEnabled) {
       logger.debug(
           "{}: Received {}put request ({} bytes) from {} for region {} key {} txId {} posdup: {}",
-          servConn.getName(), (isDelta ? " delta " : " "), msg.getPayloadLength(),
-          servConn.getSocketString(), regionName, key, msg.getTransactionId(), msg.isRetry());
+          serverConnection.getName(), (isDelta ? " delta " : " "), clientMessage.getPayloadLength(),
+          serverConnection.getSocketString(), regionName, key, clientMessage.getTransactionId(), clientMessage.isRetry());
     }
 
     // Process the put request
@@ -154,27 +154,27 @@ public class Put65 extends BaseCommand {
       if (key == null) {
         String putMsg = " The input key for the put request is null";
         if (isDebugEnabled) {
-          logger.debug("{}:{}", servConn.getName(), putMsg);
+          logger.debug("{}:{}", serverConnection.getName(), putMsg);
         }
         errMessage.append(putMsg);
       }
       if (regionName == null) {
         String putMsg = " The input region name for the put request is null";
         if (isDebugEnabled) {
-          logger.debug("{}:{}", servConn.getName(), putMsg);
+          logger.debug("{}:{}", serverConnection.getName(), putMsg);
         }
         errMessage.append(putMsg);
       }
-      writeErrorResponse(msg, MessageType.PUT_DATA_ERROR, errMessage.toString(), servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    LocalRegion region = (LocalRegion) servConn.getCache().getRegion(regionName);
+    LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
     if (region == null) {
       String reason = " was not found during put request";
-      writeRegionDestroyedEx(msg, regionName, reason, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -182,11 +182,11 @@ public class Put65 extends BaseCommand {
       // Invalid to 'put' a null value in an existing key
       String putMsg = " Attempted to put a null value for existing key " + key;
       if (isDebugEnabled) {
-        logger.debug("{}:{}", servConn.getName(), putMsg);
+        logger.debug("{}:{}", serverConnection.getName(), putMsg);
       }
       errMessage.append(putMsg);
-      writeErrorResponse(msg, MessageType.PUT_DATA_ERROR, errMessage.toString(), servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -195,12 +195,12 @@ public class Put65 extends BaseCommand {
     long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
 
     EventIDHolder clientEvent =
-        new EventIDHolder(new EventID(servConn.getEventMemberIDByteArray(), threadId, sequenceId));
+        new EventIDHolder(new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId));
 
     Breadcrumbs.setEventId(clientEvent.getEventId());
 
     // msg.isRetry might be set by v7.0 and later clients
-    if (msg.isRetry()) {
+    if (clientMessage.isRetry()) {
       // if (logger.isDebugEnabled()) {
       // logger.debug("DEBUG: encountered isRetry in Put65");
       // }
@@ -226,13 +226,13 @@ public class Put65 extends BaseCommand {
       }
       boolean isObject = valuePart.isObject();
       boolean isMetaRegion = region.isUsedForMetaRegion();
-      msg.setMetaRegion(isMetaRegion);
+      clientMessage.setMetaRegion(isMetaRegion);
 
       this.securityService.authorizeRegionWrite(regionName, key.toString());
 
       AuthorizeRequest authzRequest = null;
       if (!isMetaRegion) {
-        authzRequest = servConn.getAuthzRequest();
+        authzRequest = serverConnection.getAuthzRequest();
       }
       if (authzRequest != null) {
         if (DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
@@ -257,7 +257,7 @@ public class Put65 extends BaseCommand {
       // to be publicly accessible.
       if (operation == Operation.PUT_IF_ABSENT) {
         // try {
-        if (msg.isRetry() && clientEvent.getVersionTag() != null) {
+        if (clientMessage.isRetry() && clientEvent.getVersionTag() != null) {
           // bug #46590 the operation was successful the last time since it
           // was applied to the cache, so return success and the recovered
           // version tag
@@ -267,16 +267,16 @@ public class Put65 extends BaseCommand {
           }
           // invoke basicBridgePutIfAbsent anyway to ensure that the event is distributed to all
           // servers - bug #51664
-          region.basicBridgePutIfAbsent(key, value, isObject, callbackArg, servConn.getProxyID(),
+          region.basicBridgePutIfAbsent(key, value, isObject, callbackArg, serverConnection.getProxyID(),
               true, clientEvent);
           oldValue = null;
         } else {
           oldValue = region.basicBridgePutIfAbsent(key, value, isObject, callbackArg,
-              servConn.getProxyID(), true, clientEvent);
+              serverConnection.getProxyID(), true, clientEvent);
         }
         sendOldValue = true;
         oldValueIsObject = true;
-        Version clientVersion = servConn.getClientVersion();
+        Version clientVersion = serverConnection.getClientVersion();
         if (oldValue instanceof CachedDeserializable) {
           oldValue = ((CachedDeserializable) oldValue).getSerializedValue();
         } else if (oldValue instanceof byte[]) {
@@ -299,7 +299,7 @@ public class Put65 extends BaseCommand {
       } else if (operation == Operation.REPLACE) {
         // try {
         if (requireOldValue) { // <V> replace(<K>, <V>)
-          if (msg.isRetry() && clientEvent.isConcurrencyConflict()
+          if (clientMessage.isRetry() && clientEvent.isConcurrencyConflict()
               && clientEvent.getVersionTag() != null) {
             if (isDebugEnabled) {
               logger.debug("replace(k,v) operation was successful last time with version {}",
@@ -307,10 +307,10 @@ public class Put65 extends BaseCommand {
             }
           }
           oldValue = region.basicBridgeReplace(key, value, isObject, callbackArg,
-              servConn.getProxyID(), true, clientEvent);
+              serverConnection.getProxyID(), true, clientEvent);
           sendOldValue = !clientEvent.isConcurrencyConflict();
           oldValueIsObject = true;
-          Version clientVersion = servConn.getClientVersion();
+          Version clientVersion = serverConnection.getClientVersion();
           if (oldValue instanceof CachedDeserializable) {
             oldValue = ((CachedDeserializable) oldValue).getSerializedValue();
           } else if (oldValue instanceof byte[]) {
@@ -330,8 +330,8 @@ public class Put65 extends BaseCommand {
         } else { // boolean replace(<K>, <V>, <V>) {
           boolean didPut;
           didPut = region.basicBridgeReplace(key, expectedOldValue, value, isObject, callbackArg,
-              servConn.getProxyID(), true, clientEvent);
-          if (msg.isRetry() && clientEvent.getVersionTag() != null) {
+              serverConnection.getProxyID(), true, clientEvent);
+          if (clientMessage.isRetry() && clientEvent.getVersionTag() != null) {
             if (isDebugEnabled) {
               logger.debug("replace(k,v,v) operation was successful last time with version {}",
                   clientEvent.getVersionTag());
@@ -356,9 +356,9 @@ public class Put65 extends BaseCommand {
         // Create the null entry. Since the value is null, the value of the
         // isObject
         // the true after null doesn't matter and is not used.
-        result = region.basicBridgeCreate(key, null, true, callbackArg, servConn.getProxyID(), true,
+        result = region.basicBridgeCreate(key, null, true, callbackArg, serverConnection.getProxyID(), true,
             clientEvent, false);
-        if (msg.isRetry() && clientEvent.isConcurrencyConflict()
+        if (clientMessage.isRetry() && clientEvent.isConcurrencyConflict()
             && clientEvent.getVersionTag() != null) {
           result = true;
           if (isDebugEnabled) {
@@ -372,16 +372,16 @@ public class Put65 extends BaseCommand {
         if (isDelta) {
           delta = valuePart.getSerializedForm();
         }
-        TXManagerImpl txMgr = (TXManagerImpl) servConn.getCache().getCacheTransactionManager();
+        TXManagerImpl txMgr = (TXManagerImpl) serverConnection.getCache().getCacheTransactionManager();
         // bug 43068 - use create() if in a transaction and op is CREATE
         if (txMgr.getTXState() != null && operation.isCreate()) {
           result = region.basicBridgeCreate(key, (byte[]) value, isObject, callbackArg,
-              servConn.getProxyID(), true, clientEvent, true);
+              serverConnection.getProxyID(), true, clientEvent, true);
         } else {
           result = region.basicBridgePut(key, value, delta, isObject, callbackArg,
-              servConn.getProxyID(), true, clientEvent);
+              serverConnection.getProxyID(), true, clientEvent);
         }
-        if (msg.isRetry() && clientEvent.isConcurrencyConflict()
+        if (clientMessage.isRetry() && clientEvent.isConcurrencyConflict()
             && clientEvent.getVersionTag() != null) {
           if (isDebugEnabled) {
             logger.debug("put(k,v) operation was successful last time with version {}",
@@ -391,46 +391,46 @@ public class Put65 extends BaseCommand {
         }
       }
       if (result) {
-        servConn.setModificationInfo(true, regionName, key);
+        serverConnection.setModificationInfo(true, regionName, key);
       } else {
-        String message = servConn.getName() + ": Failed to put entry for region " + regionName
-            + " key " + key + " value " + valuePart;
+        String message = serverConnection.getName() + ": Failed to put entry for region " + regionName
+                         + " key " + key + " value " + valuePart;
         if (isDebugEnabled) {
           logger.debug(message);
         }
         throw new Exception(message);
       }
     } catch (RegionDestroyedException rde) {
-      writeException(msg, rde, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, rde, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (ResourceException re) {
-      writeException(msg, re, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, re, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (InvalidDeltaException ide) {
       logger.info(LocalizedMessage.create(
           LocalizedStrings.UpdateOperation_ERROR_APPLYING_DELTA_FOR_KEY_0_OF_REGION_1,
           new Object[] {key, regionName}));
-      writeException(msg, MessageType.PUT_DELTA_ERROR, ide, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, MessageType.PUT_DELTA_ERROR, ide, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       region.getCachePerfStats().incDeltaFullValuesRequested();
       return;
     } catch (Exception ce) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, ce);
+      checkForInterrupt(serverConnection, ce);
 
       // If an exception occurs during the put, preserve the connection
-      writeException(msg, ce, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, ce, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       if (ce instanceof GemFireSecurityException) {
         // Fine logging for security exceptions since these are already
         // logged by the security logger
         if (isDebugEnabled) {
-          logger.debug("{}: Unexpected Security exception", servConn.getName(), ce);
+          logger.debug("{}: Unexpected Security exception", serverConnection.getName(), ce);
         }
       } else if (isDebugEnabled) {
-        logger.debug("{}: Unexpected Exception", servConn.getName(), ce);
+        logger.debug("{}: Unexpected Exception", serverConnection.getName(), ce);
       }
       return;
     } finally {
@@ -443,21 +443,21 @@ public class Put65 extends BaseCommand {
     if (region instanceof PartitionedRegion) {
       PartitionedRegion pr = (PartitionedRegion) region;
       if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
-        writeReplyWithRefreshMetadata(msg, servConn, pr, sendOldValue, oldValueIsObject, oldValue,
+        writeReplyWithRefreshMetadata(clientMessage, serverConnection, pr, sendOldValue, oldValueIsObject, oldValue,
             pr.getNetworkHopType(), clientEvent.getVersionTag());
         pr.clearNetworkHopData();
       } else {
-        writeReply(msg, servConn, sendOldValue, oldValueIsObject, oldValue,
+        writeReply(clientMessage, serverConnection, sendOldValue, oldValueIsObject, oldValue,
             clientEvent.getVersionTag());
       }
     } else {
-      writeReply(msg, servConn, sendOldValue, oldValueIsObject, oldValue,
+      writeReply(clientMessage, serverConnection, sendOldValue, oldValueIsObject, oldValue,
           clientEvent.getVersionTag());
     }
-    servConn.setAsTrue(RESPONDED);
+    serverConnection.setAsTrue(RESPONDED);
     if (isDebugEnabled) {
       logger.debug("{}: Sent put response back to {} for region {} key {} value {}",
-          servConn.getName(), servConn.getSocketString(), regionName, key, valuePart);
+          serverConnection.getName(), serverConnection.getSocketString(), regionName, key, valuePart);
     }
     stats.incWritePutResponseTime(DistributionStats.getStatTime() - start);
 
@@ -471,7 +471,7 @@ public class Put65 extends BaseCommand {
     replyMsg.setMessageType(MessageType.REPLY);
     replyMsg.setNumberOfParts(sendOldValue ? 3 : 1);
     replyMsg.setTransactionId(origMsg.getTransactionId());
-    replyMsg.addBytesPart(OK_BYTES);
+    replyMsg.addBytesPart(okBytes());
     if (sendOldValue) {
       replyMsg.addIntPart(oldValueIsObject ? 1 : 0);
       replyMsg.addObjPart(oldValue);
@@ -499,7 +499,7 @@ public class Put65 extends BaseCommand {
     replyMsg.send(servConn);
     pr.getPrStats().incPRMetaDataSentCount();
     if (logger.isTraceEnabled()) {
-      logger.trace("{}: rpl with REFRESH_METADAT tx: {} parts={}", servConn.getName(),
+      logger.trace("{}: rpl with REFRESH_METADATA tx: {} parts={}", servConn.getName(),
           origMsg.getTransactionId(), replyMsg.getNumberOfParts());
     }
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put70.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put70.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put70.java
index 38eb7ef..395dbce 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put70.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put70.java
@@ -62,7 +62,7 @@ public class Put70 extends Put65 {
     }
     replyMsg.setNumberOfParts(parts);
     replyMsg.setTransactionId(origMsg.getTransactionId());
-    replyMsg.addBytesPart(OK_BYTES);
+    replyMsg.addBytesPart(okBytes());
     replyMsg.addIntPart(flags);
     if (sendOldValue) {
       replyMsg.addObjPart(oldValue);
@@ -114,7 +114,7 @@ public class Put70 extends Put65 {
     replyMsg.send(servConn);
     pr.getPrStats().incPRMetaDataSentCount();
     if (logger.isTraceEnabled()) {
-      logger.trace("{}: rpl with REFRESH_METADAT tx: {} parts={}", servConn.getName(),
+      logger.trace("{}: rpl with REFRESH_METADATA tx: {} parts={}", servConn.getName(),
           origMsg.getTransactionId(), replyMsg.getNumberOfParts());
     }
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll.java
index 0bcfd1b..281f737 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll.java
@@ -59,7 +59,7 @@ public class PutAll extends BaseCommand {
   private PutAll() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
     Part regionNamePart = null, numberOfKeysPart = null, keyPart = null, valuePart = null;
     String regionName = null;
@@ -67,12 +67,12 @@ public class PutAll extends BaseCommand {
     Object key = null;
     Part eventPart = null;
     StringBuffer errMessage = new StringBuffer();
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    CacheServerStats stats = servConn.getCacheServerStats();
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
     boolean replyWithMetaData = false;
 
     // requiresResponse = true;
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
     {
       long oldStart = start;
       start = DistributionStats.getStatTime();
@@ -82,64 +82,64 @@ public class PutAll extends BaseCommand {
     try {
       // Retrieve the data from the message parts
       // part 0: region name
-      regionNamePart = msg.getPart(0);
+      regionNamePart = clientMessage.getPart(0);
       regionName = regionNamePart.getString();
 
       if (regionName == null) {
         String putAllMsg =
             LocalizedStrings.PutAll_THE_INPUT_REGION_NAME_FOR_THE_PUTALL_REQUEST_IS_NULL
                 .toLocalizedString();
-        logger.warn("{}: {}", servConn.getName(), putAllMsg);
+        logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
         errMessage.append(putAllMsg);
-        writeErrorResponse(msg, MessageType.PUT_DATA_ERROR, errMessage.toString(), servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
       LocalRegion region = (LocalRegion) crHelper.getRegion(regionName);
       if (region == null) {
         String reason = " was not found during put request";
-        writeRegionDestroyedEx(msg, regionName, reason, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
 
       // part 1: eventID
-      eventPart = msg.getPart(1);
+      eventPart = clientMessage.getPart(1);
       ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
       long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
       long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-      EventID eventId = new EventID(servConn.getEventMemberIDByteArray(), threadId, sequenceId);
+      EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
       // part 2: number of keys
-      numberOfKeysPart = msg.getPart(2);
+      numberOfKeysPart = clientMessage.getPart(2);
       numberOfKeys = numberOfKeysPart.getInt();
 
       // building the map
       Map map = new LinkedHashMap();
       // Map isObjectMap = new LinkedHashMap();
       for (int i = 0; i < numberOfKeys; i++) {
-        keyPart = msg.getPart(3 + i * 2);
+        keyPart = clientMessage.getPart(3 + i * 2);
         key = keyPart.getStringOrObject();
         if (key == null) {
           String putAllMsg =
               LocalizedStrings.PutAll_ONE_OF_THE_INPUT_KEYS_FOR_THE_PUTALL_REQUEST_IS_NULL
                   .toLocalizedString();
-          logger.warn("{}: {}", servConn.getName(), putAllMsg);
+          logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
           errMessage.append(putAllMsg);
-          writeErrorResponse(msg, MessageType.PUT_DATA_ERROR, errMessage.toString(), servConn);
-          servConn.setAsTrue(RESPONDED);
+          writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+          serverConnection.setAsTrue(RESPONDED);
           return;
         }
 
-        valuePart = msg.getPart(3 + i * 2 + 1);
+        valuePart = clientMessage.getPart(3 + i * 2 + 1);
         if (valuePart.isNull()) {
           String putAllMsg =
               LocalizedStrings.PutAll_ONE_OF_THE_INPUT_VALUES_FOR_THE_PUTALL_REQUEST_IS_NULL
                   .toLocalizedString();
-          logger.warn("{}: {}", servConn.getName(), putAllMsg);
+          logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
           errMessage.append(putAllMsg);
-          writeErrorResponse(msg, MessageType.PUT_DATA_ERROR, errMessage.toString(), servConn);
-          servConn.setAsTrue(RESPONDED);
+          writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+          serverConnection.setAsTrue(RESPONDED);
           return;
         }
 
@@ -155,15 +155,15 @@ public class PutAll extends BaseCommand {
         // isObjectMap.put(key, new Boolean(isObject));
       } // for
 
-      if (msg.getNumberOfParts() == (3 + 2 * numberOfKeys + 1)) {// it means optional timeout has
+      if (clientMessage.getNumberOfParts() == (3 + 2 * numberOfKeys + 1)) {// it means optional timeout has
                                                                  // been added
-        int timeout = msg.getPart(3 + 2 * numberOfKeys).getInt();
-        servConn.setRequestSpecificTimeout(timeout);
+        int timeout = clientMessage.getPart(3 + 2 * numberOfKeys).getInt();
+        serverConnection.setRequestSpecificTimeout(timeout);
       }
 
       this.securityService.authorizeRegionWrite(regionName);
 
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         if (DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
           authzRequest.createRegionAuthorize(regionName);
@@ -179,41 +179,41 @@ public class PutAll extends BaseCommand {
 
       if (logger.isDebugEnabled()) {
         logger.debug("{}: Received putAll request ({} bytes) from {} for region {}",
-            servConn.getName(), msg.getPayloadLength(), servConn.getSocketString(), regionName);
+            serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName);
       }
 
       region.basicBridgePutAll(map, Collections.<Object, VersionTag>emptyMap(),
-          servConn.getProxyID(), eventId, false, null);
+          serverConnection.getProxyID(), eventId, false, null);
 
       if (region instanceof PartitionedRegion) {
         PartitionedRegion pr = (PartitionedRegion) region;
         if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
-          writeReplyWithRefreshMetadata(msg, servConn, pr, pr.getNetworkHopType());
+          writeReplyWithRefreshMetadata(clientMessage, serverConnection, pr, pr.getNetworkHopType());
           pr.clearNetworkHopData();
           replyWithMetaData = true;
         }
       }
     } catch (RegionDestroyedException rde) {
-      writeException(msg, rde, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, rde, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (ResourceException re) {
-      writeException(msg, re, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, re, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (PutAllPartialResultException pre) {
-      writeException(msg, pre, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, pre, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (Exception ce) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, ce);
+      checkForInterrupt(serverConnection, ce);
 
       // If an exception occurs during the put, preserve the connection
-      writeException(msg, ce, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, ce, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       logger.warn(LocalizedMessage.create(LocalizedStrings.Generic_0_UNEXPECTED_EXCEPTION,
-          servConn.getName()), ce);
+          serverConnection.getName()), ce);
       return;
     } finally {
       long oldStart = start;
@@ -223,12 +223,12 @@ public class PutAll extends BaseCommand {
 
     // Increment statistics and write the reply
     if (!replyWithMetaData) {
-      writeReply(msg, servConn);
+      writeReply(clientMessage, serverConnection);
     }
-    servConn.setAsTrue(RESPONDED);
+    serverConnection.setAsTrue(RESPONDED);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sent putAll response back to {} for region {}", servConn.getName(),
-          servConn.getSocketString(), regionName);
+      logger.debug("{}: Sent putAll response back to {} for region {}", serverConnection.getName(),
+          serverConnection.getSocketString(), regionName);
     }
     stats.incWritePutAllResponseTime(DistributionStats.getStatTime() - start);
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll70.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll70.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll70.java
index c5fcbae..ae2de09 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll70.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll70.java
@@ -62,7 +62,7 @@ public class PutAll70 extends BaseCommand {
   private PutAll70() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long startp)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long startp)
       throws IOException, InterruptedException {
     long start = startp; // copy this since we need to modify it
     Part regionNamePart = null, numberOfKeysPart = null, keyPart = null, valuePart = null;
@@ -74,11 +74,11 @@ public class PutAll70 extends BaseCommand {
     VersionedObjectList response = null;
 
     StringBuffer errMessage = new StringBuffer();
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    CacheServerStats stats = servConn.getCacheServerStats();
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
 
     // requiresResponse = true;
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
     {
       long oldStart = start;
       start = DistributionStats.getStatTime();
@@ -88,40 +88,40 @@ public class PutAll70 extends BaseCommand {
     try {
       // Retrieve the data from the message parts
       // part 0: region name
-      regionNamePart = msg.getPart(0);
+      regionNamePart = clientMessage.getPart(0);
       regionName = regionNamePart.getString();
 
       if (regionName == null) {
         String putAllMsg =
             LocalizedStrings.PutAll_THE_INPUT_REGION_NAME_FOR_THE_PUTALL_REQUEST_IS_NULL
                 .toLocalizedString();
-        logger.warn("{}: {}", servConn.getName(), putAllMsg);
+        logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
         errMessage.append(putAllMsg);
-        writeErrorResponse(msg, MessageType.PUT_DATA_ERROR, errMessage.toString(), servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
       LocalRegion region = (LocalRegion) crHelper.getRegion(regionName);
       if (region == null) {
         String reason = " was not found during put request";
-        writeRegionDestroyedEx(msg, regionName, reason, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
 
       // part 1: eventID
-      eventPart = msg.getPart(1);
+      eventPart = clientMessage.getPart(1);
       ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
       long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
       long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-      EventID eventId = new EventID(servConn.getEventMemberIDByteArray(), threadId, sequenceId);
+      EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
       // part 2: invoke callbacks (used by import)
-      Part callbacksPart = msg.getPart(2);
+      Part callbacksPart = clientMessage.getPart(2);
       boolean skipCallbacks = callbacksPart.getInt() == 1 ? true : false;
 
       // part 3: number of keys
-      numberOfKeysPart = msg.getPart(3);
+      numberOfKeysPart = clientMessage.getPart(3);
       numberOfKeys = numberOfKeysPart.getInt();
 
       // building the map
@@ -129,28 +129,28 @@ public class PutAll70 extends BaseCommand {
       Map<Object, VersionTag> retryVersions = new LinkedHashMap<Object, VersionTag>();
       // Map isObjectMap = new LinkedHashMap();
       for (int i = 0; i < numberOfKeys; i++) {
-        keyPart = msg.getPart(4 + i * 2);
+        keyPart = clientMessage.getPart(4 + i * 2);
         key = keyPart.getStringOrObject();
         if (key == null) {
           String putAllMsg =
               LocalizedStrings.PutAll_ONE_OF_THE_INPUT_KEYS_FOR_THE_PUTALL_REQUEST_IS_NULL
                   .toLocalizedString();
-          logger.warn("{}: {}", servConn.getName(), putAllMsg);
+          logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
           errMessage.append(putAllMsg);
-          writeErrorResponse(msg, MessageType.PUT_DATA_ERROR, errMessage.toString(), servConn);
-          servConn.setAsTrue(RESPONDED);
+          writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+          serverConnection.setAsTrue(RESPONDED);
           return;
         }
 
-        valuePart = msg.getPart(4 + i * 2 + 1);
+        valuePart = clientMessage.getPart(4 + i * 2 + 1);
         if (valuePart.isNull()) {
           String putAllMsg =
               LocalizedStrings.PutAll_ONE_OF_THE_INPUT_VALUES_FOR_THE_PUTALL_REQUEST_IS_NULL
                   .toLocalizedString();
-          logger.warn("{}: {}", servConn.getName(), putAllMsg);
+          logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
           errMessage.append(putAllMsg);
-          writeErrorResponse(msg, MessageType.PUT_DATA_ERROR, errMessage.toString(), servConn);
-          servConn.setAsTrue(RESPONDED);
+          writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+          serverConnection.setAsTrue(RESPONDED);
           return;
         }
 
@@ -170,7 +170,7 @@ public class PutAll70 extends BaseCommand {
           value = valuePart.getSerializedForm();
         }
         // put serializedform for auth. It will be modified with auth callback
-        if (msg.isRetry()) {
+        if (clientMessage.isRetry()) {
           // Constuct the thread id/sequence id information for this element in the
           // put all map
 
@@ -198,15 +198,15 @@ public class PutAll70 extends BaseCommand {
         // isObjectMap.put(key, new Boolean(isObject));
       } // for
 
-      if (msg.getNumberOfParts() == (4 + 2 * numberOfKeys + 1)) {// it means optional timeout has
+      if (clientMessage.getNumberOfParts() == (4 + 2 * numberOfKeys + 1)) {// it means optional timeout has
                                                                  // been added
-        int timeout = msg.getPart(4 + 2 * numberOfKeys).getInt();
-        servConn.setRequestSpecificTimeout(timeout);
+        int timeout = clientMessage.getPart(4 + 2 * numberOfKeys).getInt();
+        serverConnection.setRequestSpecificTimeout(timeout);
       }
 
       this.securityService.authorizeRegionWrite(regionName);
 
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         if (DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
           authzRequest.createRegionAuthorize(regionName);
@@ -231,10 +231,10 @@ public class PutAll70 extends BaseCommand {
 
       if (logger.isDebugEnabled()) {
         logger.debug("{}: Received putAll request ({} bytes) from {} for region {}",
-            servConn.getName(), msg.getPayloadLength(), servConn.getSocketString(), regionName);
+            serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName);
       }
 
-      response = region.basicBridgePutAll(map, retryVersions, servConn.getProxyID(), eventId,
+      response = region.basicBridgePutAll(map, retryVersions, serverConnection.getProxyID(), eventId,
           skipCallbacks, null);
       if (!region.getConcurrencyChecksEnabled()) {
         // the client only needs this if versioning is being used
@@ -244,33 +244,33 @@ public class PutAll70 extends BaseCommand {
       if (region instanceof PartitionedRegion) {
         PartitionedRegion pr = (PartitionedRegion) region;
         if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
-          writeReplyWithRefreshMetadata(msg, response, servConn, pr, pr.getNetworkHopType());
+          writeReplyWithRefreshMetadata(clientMessage, response, serverConnection, pr, pr.getNetworkHopType());
           pr.clearNetworkHopData();
           replyWithMetaData = true;
         }
       }
     } catch (RegionDestroyedException rde) {
-      writeException(msg, rde, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, rde, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (ResourceException re) {
-      writeException(msg, re, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, re, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (PutAllPartialResultException pre) {
-      writeException(msg, pre, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, pre, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (Exception ce) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, ce);
+      checkForInterrupt(serverConnection, ce);
 
       // If an exception occurs during the put, preserve the connection
-      writeException(msg, ce, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, ce, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       // if (logger.fineEnabled()) {
       logger.warn(LocalizedMessage.create(LocalizedStrings.Generic_0_UNEXPECTED_EXCEPTION,
-          servConn.getName()), ce);
+          serverConnection.getName()), ce);
       // }
       return;
     } finally {
@@ -279,11 +279,11 @@ public class PutAll70 extends BaseCommand {
       stats.incProcessPutAllTime(start - oldStart);
     }
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sending putAll70 response back to {} for region {}: {}", servConn.getName(),
-          servConn.getSocketString(), regionName, response);
+      logger.debug("{}: Sending putAll70 response back to {} for region {}: {}", serverConnection.getName(),
+          serverConnection.getSocketString(), regionName, response);
     }
     // Starting in 7.0.1 we do not send the keys back
-    if (response != null && Version.GFE_70.compareTo(servConn.getClientVersion()) < 0) {
+    if (response != null && Version.GFE_70.compareTo(serverConnection.getClientVersion()) < 0) {
       if (logger.isDebugEnabled()) {
         logger.debug("setting putAll keys to null");
       }
@@ -292,14 +292,14 @@ public class PutAll70 extends BaseCommand {
 
     // Increment statistics and write the reply
     if (!replyWithMetaData) {
-      writeReply(msg, response, servConn);
+      writeReply(clientMessage, response, serverConnection);
     }
-    servConn.setAsTrue(RESPONDED);
+    serverConnection.setAsTrue(RESPONDED);
     stats.incWritePutAllResponseTime(DistributionStats.getStatTime() - start);
   }
 
   @Override
-  protected void writeReply(Message origMsg, ServerConnection servConn) throws IOException {
+  protected void writeReply(Message origMsg, ServerConnection serverConnection) throws IOException {
     throw new UnsupportedOperationException();
   }
 
@@ -311,7 +311,7 @@ public class PutAll70 extends BaseCommand {
     replyMsg.setMessageType(MessageType.REPLY);
     replyMsg.setNumberOfParts(2);
     replyMsg.setTransactionId(origMsg.getTransactionId());
-    replyMsg.addBytesPart(OK_BYTES);
+    replyMsg.addBytesPart(okBytes());
     if (response != null) {
       response.clearObjects();
       replyMsg.addObjPart(response);
@@ -323,7 +323,7 @@ public class PutAll70 extends BaseCommand {
   }
 
   @Override
-  protected void writeReplyWithRefreshMetadata(Message origMsg, ServerConnection servConn,
+  protected void writeReplyWithRefreshMetadata(Message origMsg, ServerConnection serverConnection,
       PartitionedRegion pr, byte nwHop) throws IOException {
     throw new UnsupportedOperationException();
   }
@@ -343,7 +343,7 @@ public class PutAll70 extends BaseCommand {
     replyMsg.send(servConn);
     pr.getPrStats().incPRMetaDataSentCount();
     if (logger.isTraceEnabled()) {
-      logger.trace("{}: rpl with REFRESH_METADAT tx: {}", servConn.getName(),
+      logger.trace("{}: rpl with REFRESH_METADATA tx: {}", servConn.getName(),
           origMsg.getTransactionId());
     }
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll80.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll80.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll80.java
index a6285ed..aed5926 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll80.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll80.java
@@ -75,7 +75,7 @@ public class PutAll80 extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long startp)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long startp)
       throws IOException, InterruptedException {
     long start = startp; // copy this since we need to modify it
     Part regionNamePart = null, numberOfKeysPart = null, keyPart = null, valuePart = null;
@@ -87,12 +87,12 @@ public class PutAll80 extends BaseCommand {
     VersionedObjectList response = null;
 
     StringBuffer errMessage = new StringBuffer();
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    CacheServerStats stats = servConn.getCacheServerStats();
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
 
     // requiresResponse = true;
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE); // new in 8.0
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE); // new in 8.0
     {
       long oldStart = start;
       start = DistributionStats.getStatTime();
@@ -102,60 +102,60 @@ public class PutAll80 extends BaseCommand {
     try {
       // Retrieve the data from the message parts
       // part 0: region name
-      regionNamePart = msg.getPart(0);
+      regionNamePart = clientMessage.getPart(0);
       regionName = regionNamePart.getString();
 
       if (regionName == null) {
         String putAllMsg =
             LocalizedStrings.PutAll_THE_INPUT_REGION_NAME_FOR_THE_PUTALL_REQUEST_IS_NULL
                 .toLocalizedString();
-        logger.warn("{}: {}", servConn.getName(), putAllMsg);
+        logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
         errMessage.append(putAllMsg);
-        writeChunkedErrorResponse(msg, MessageType.PUT_DATA_ERROR, errMessage.toString(), servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
 
       LocalRegion region = (LocalRegion) crHelper.getRegion(regionName);
       if (region == null) {
         String reason = " was not found during putAll request";
-        writeRegionDestroyedEx(msg, regionName, reason, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
 
       final int BASE_PART_COUNT = getBasePartCount();
 
       // part 1: eventID
-      eventPart = msg.getPart(1);
+      eventPart = clientMessage.getPart(1);
       ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
       long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
       long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-      EventID eventId = new EventID(servConn.getEventMemberIDByteArray(), threadId, sequenceId);
+      EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
       Breadcrumbs.setEventId(eventId);
 
       // part 2: invoke callbacks (used by import)
-      Part callbacksPart = msg.getPart(2);
+      Part callbacksPart = clientMessage.getPart(2);
       boolean skipCallbacks = callbacksPart.getInt() == 1 ? true : false;
 
       // part 3: flags
-      int flags = msg.getPart(3).getInt();
+      int flags = clientMessage.getPart(3).getInt();
       boolean clientIsEmpty = (flags & PutAllOp.FLAG_EMPTY) != 0;
       boolean clientHasCCEnabled = (flags & PutAllOp.FLAG_CONCURRENCY_CHECKS) != 0;
 
       // part 4: number of keys
-      numberOfKeysPart = msg.getPart(4);
+      numberOfKeysPart = clientMessage.getPart(4);
       numberOfKeys = numberOfKeysPart.getInt();
 
-      Object callbackArg = getOptionalCallbackArg(msg);
+      Object callbackArg = getOptionalCallbackArg(clientMessage);
 
       if (logger.isDebugEnabled()) {
         StringBuilder buffer = new StringBuilder();
-        buffer.append(servConn.getName()).append(": Received ").append(this.putAllClassName())
-            .append(" request from ").append(servConn.getSocketString()).append(" for region ")
-            .append(regionName).append(callbackArg != null ? (" callbackArg " + callbackArg) : "")
-            .append(" with ").append(numberOfKeys).append(" entries.");
+        buffer.append(serverConnection.getName()).append(": Received ").append(this.putAllClassName())
+              .append(" request from ").append(serverConnection.getSocketString()).append(" for region ")
+              .append(regionName).append(callbackArg != null ? (" callbackArg " + callbackArg) : "")
+              .append(" with ").append(numberOfKeys).append(" entries.");
         logger.debug(buffer.toString());
       }
       // building the map
@@ -163,30 +163,28 @@ public class PutAll80 extends BaseCommand {
       Map<Object, VersionTag> retryVersions = new LinkedHashMap<Object, VersionTag>();
       // Map isObjectMap = new LinkedHashMap();
       for (int i = 0; i < numberOfKeys; i++) {
-        keyPart = msg.getPart(BASE_PART_COUNT + i * 2);
+        keyPart = clientMessage.getPart(BASE_PART_COUNT + i * 2);
         key = keyPart.getStringOrObject();
         if (key == null) {
           String putAllMsg =
               LocalizedStrings.PutAll_ONE_OF_THE_INPUT_KEYS_FOR_THE_PUTALL_REQUEST_IS_NULL
                   .toLocalizedString();
-          logger.warn("{}: {}", servConn.getName(), putAllMsg);
+          logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
           errMessage.append(putAllMsg);
-          writeChunkedErrorResponse(msg, MessageType.PUT_DATA_ERROR, errMessage.toString(),
-              servConn);
-          servConn.setAsTrue(RESPONDED);
+          writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+          serverConnection.setAsTrue(RESPONDED);
           return;
         }
 
-        valuePart = msg.getPart(BASE_PART_COUNT + i * 2 + 1);
+        valuePart = clientMessage.getPart(BASE_PART_COUNT + i * 2 + 1);
         if (valuePart.isNull()) {
           String putAllMsg =
               LocalizedStrings.PutAll_ONE_OF_THE_INPUT_VALUES_FOR_THE_PUTALL_REQUEST_IS_NULL
                   .toLocalizedString();
-          logger.warn("{}: {}", servConn.getName(), putAllMsg);
+          logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
           errMessage.append(putAllMsg);
-          writeChunkedErrorResponse(msg, MessageType.PUT_DATA_ERROR, errMessage.toString(),
-              servConn);
-          servConn.setAsTrue(RESPONDED);
+          writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+          serverConnection.setAsTrue(RESPONDED);
           return;
         }
 
@@ -206,7 +204,7 @@ public class PutAll80 extends BaseCommand {
           value = valuePart.getSerializedForm();
         }
         // put serializedform for auth. It will be modified with auth callback
-        if (msg.isRetry()) {
+        if (clientMessage.isRetry()) {
           // Constuct the thread id/sequence id information for this element in the
           // put all map
 
@@ -234,16 +232,16 @@ public class PutAll80 extends BaseCommand {
         // isObjectMap.put(key, new Boolean(isObject));
       } // for
 
-      if (msg.getNumberOfParts() == (BASE_PART_COUNT + 2 * numberOfKeys + 1)) {// it means optional
+      if (clientMessage.getNumberOfParts() == (BASE_PART_COUNT + 2 * numberOfKeys + 1)) {// it means optional
                                                                                // timeout has been
                                                                                // added
-        int timeout = msg.getPart(BASE_PART_COUNT + 2 * numberOfKeys).getInt();
-        servConn.setRequestSpecificTimeout(timeout);
+        int timeout = clientMessage.getPart(BASE_PART_COUNT + 2 * numberOfKeys).getInt();
+        serverConnection.setRequestSpecificTimeout(timeout);
       }
 
       this.securityService.authorizeRegionWrite(regionName);
 
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         if (DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
           authzRequest.createRegionAuthorize(regionName);
@@ -267,7 +265,7 @@ public class PutAll80 extends BaseCommand {
          */
       }
 
-      response = region.basicBridgePutAll(map, retryVersions, servConn.getProxyID(), eventId,
+      response = region.basicBridgePutAll(map, retryVersions, serverConnection.getProxyID(), eventId,
           skipCallbacks, callbackArg);
       if (!region.getConcurrencyChecksEnabled() || clientIsEmpty || !clientHasCCEnabled) {
         // the client only needs this if versioning is being used and the client
@@ -283,32 +281,32 @@ public class PutAll80 extends BaseCommand {
       if (region instanceof PartitionedRegion) {
         PartitionedRegion pr = (PartitionedRegion) region;
         if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
-          writeReplyWithRefreshMetadata(msg, response, servConn, pr, pr.getNetworkHopType());
+          writeReplyWithRefreshMetadata(clientMessage, response, serverConnection, pr, pr.getNetworkHopType());
           pr.clearNetworkHopData();
           replyWithMetaData = true;
         }
       }
     } catch (RegionDestroyedException rde) {
-      writeChunkedException(msg, rde, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, rde, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (ResourceException re) {
-      writeChunkedException(msg, re, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, re, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (PutAllPartialResultException pre) {
-      writeChunkedException(msg, pre, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, pre, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (Exception ce) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, ce);
+      checkForInterrupt(serverConnection, ce);
 
       // If an exception occurs during the put, preserve the connection
-      writeChunkedException(msg, ce, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, ce, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       logger.warn(LocalizedMessage.create(LocalizedStrings.Generic_0_UNEXPECTED_EXCEPTION,
-          servConn.getName()), ce);
+          serverConnection.getName()), ce);
       return;
     } finally {
       long oldStart = start;
@@ -316,21 +314,21 @@ public class PutAll80 extends BaseCommand {
       stats.incProcessPutAllTime(start - oldStart);
     }
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sending {} response back to {} for regin {} {}", servConn.getName(),
-          putAllClassName(), servConn.getSocketString(), regionName,
+      logger.debug("{}: Sending {} response back to {} for regin {} {}", serverConnection.getName(),
+          putAllClassName(), serverConnection.getSocketString(), regionName,
           (logger.isTraceEnabled() ? ": " + response : ""));
     }
 
     // Increment statistics and write the reply
     if (!replyWithMetaData) {
-      writeReply(msg, response, servConn);
+      writeReply(clientMessage, response, serverConnection);
     }
-    servConn.setAsTrue(RESPONDED);
+    serverConnection.setAsTrue(RESPONDED);
     stats.incWritePutAllResponseTime(DistributionStats.getStatTime() - start);
   }
 
   @Override
-  protected void writeReply(Message origMsg, ServerConnection servConn) throws IOException {
+  protected void writeReply(Message origMsg, ServerConnection serverConnection) throws IOException {
     throw new UnsupportedOperationException();
   }
 
@@ -351,7 +349,7 @@ public class PutAll80 extends BaseCommand {
     }
     replyMsg.sendHeader();
     if (listSize > 0) {
-      int chunkSize = 2 * maximumChunkSize;
+      int chunkSize = 2 * MAXIMUM_CHUNK_SIZE;
       // Chunker will stream over the list in its toData method
       VersionedObjectList.Chunker chunk =
           new VersionedObjectList.Chunker(response, chunkSize, false, false);
@@ -383,7 +381,7 @@ public class PutAll80 extends BaseCommand {
   }
 
   @Override
-  protected void writeReplyWithRefreshMetadata(Message origMsg, ServerConnection servConn,
+  protected void writeReplyWithRefreshMetadata(Message origMsg, ServerConnection serverConnection,
       PartitionedRegion pr, byte nwHop) throws IOException {
     throw new UnsupportedOperationException();
   }
@@ -411,7 +409,7 @@ public class PutAll80 extends BaseCommand {
       replyMsg.setLastChunk(false);
       replyMsg.sendChunk(servConn);
 
-      int chunkSize = 2 * maximumChunkSize; // maximumChunkSize
+      int chunkSize = 2 * MAXIMUM_CHUNK_SIZE; // MAXIMUM_CHUNK_SIZE
       // Chunker will stream over the list in its toData method
       VersionedObjectList.Chunker chunk =
           new VersionedObjectList.Chunker(response, chunkSize, false, false);
@@ -437,7 +435,7 @@ public class PutAll80 extends BaseCommand {
     }
     pr.getPrStats().incPRMetaDataSentCount();
     if (logger.isTraceEnabled()) {
-      logger.trace("{}: rpl with REFRESH_METADAT tx: {}", servConn.getName(),
+      logger.trace("{}: rpl with REFRESH_METADATA tx: {}", servConn.getName(),
           origMsg.getTransactionId());
     }
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutUserCredentials.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutUserCredentials.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutUserCredentials.java
index 198eed6..dc3de67 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutUserCredentials.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutUserCredentials.java
@@ -32,39 +32,39 @@ public class PutUserCredentials extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException, InterruptedException {
-    boolean isSecureMode = msg.isSecureMode();
+    boolean isSecureMode = clientMessage.isSecureMode();
 
     // if (!isSecureMode)
     // client has not send secuirty header, need to send exception and log this in security (file)
 
     if (isSecureMode) {
 
-      int numberOfParts = msg.getNumberOfParts();
+      int numberOfParts = clientMessage.getNumberOfParts();
 
       if (numberOfParts == 1) {
         // need to get credentials
         try {
-          servConn.setAsTrue(REQUIRES_RESPONSE);
-          byte[] uniqueId = servConn.setCredentials(msg);
-          writeResponse(uniqueId, null, msg, false, servConn);
+          serverConnection.setAsTrue(REQUIRES_RESPONSE);
+          byte[] uniqueId = serverConnection.setCredentials(clientMessage);
+          writeResponse(uniqueId, null, clientMessage, false, serverConnection);
         } catch (GemFireSecurityException gfse) {
-          if (servConn.getSecurityLogWriter().warningEnabled()) {
-            servConn.getSecurityLogWriter().warning(LocalizedStrings.ONE_ARG, servConn.getName()
-                + ": Security exception: " + gfse.toString()
-                + (gfse.getCause() != null ? ", caused by: " + gfse.getCause().toString() : ""));
+          if (serverConnection.getSecurityLogWriter().warningEnabled()) {
+            serverConnection.getSecurityLogWriter().warning(LocalizedStrings.ONE_ARG, serverConnection.getName()
+                                                                                      + ": Security exception: " + gfse.toString()
+                                                                                      + (gfse.getCause() != null ? ", caused by: " + gfse.getCause().toString() : ""));
           }
-          writeException(msg, gfse, false, servConn);
+          writeException(clientMessage, gfse, false, serverConnection);
         } catch (Exception ex) {
-          if (servConn.getLogWriter().warningEnabled()) {
-            servConn.getLogWriter().warning(
+          if (serverConnection.getLogWriter().warningEnabled()) {
+            serverConnection.getLogWriter().warning(
                 LocalizedStrings.CacheClientNotifier_AN_EXCEPTION_WAS_THROWN_FOR_CLIENT_0_1,
-                new Object[] {servConn.getProxyID(), ""}, ex);
+                new Object[] { serverConnection.getProxyID(), ""}, ex);
           }
-          writeException(msg, ex, false, servConn);
+          writeException(clientMessage, ex, false, serverConnection);
         } finally {
-          servConn.setAsTrue(RESPONDED);
+          serverConnection.setAsTrue(RESPONDED);
         }
 
       } else {

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Query.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Query.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Query.java
index d3c0393..8b5b35e 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Query.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Query.java
@@ -43,38 +43,38 @@ public class Query extends BaseCommandQuery {
   protected Query() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
 
     // Based on MessageType.DESTROY
     // Added by gregp 10/18/05
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
     // Retrieve the data from the message parts
-    String queryString = msg.getPart(0).getString();
+    String queryString = clientMessage.getPart(0).getString();
 
     // this is optional part for message specific timeout, which right now send by native client
     // need to take care while adding new message
 
-    if (msg.getNumberOfParts() == 3) {
-      int timeout = msg.getPart(2).getInt();
-      servConn.setRequestSpecificTimeout(timeout);
+    if (clientMessage.getNumberOfParts() == 3) {
+      int timeout = clientMessage.getPart(2).getInt();
+      serverConnection.setRequestSpecificTimeout(timeout);
     }
 
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Received query request from {} queryString: {}", servConn.getName(),
-          servConn.getSocketString(), queryString);
+      logger.debug("{}: Received query request from {} queryString: {}", serverConnection.getName(),
+          serverConnection.getSocketString(), queryString);
     }
     try {
       // Create query
       QueryService queryService =
-          servConn.getCachedRegionHelper().getCache().getLocalQueryService();
+          serverConnection.getCachedRegionHelper().getCache().getLocalQueryService();
       org.apache.geode.cache.query.Query query = queryService.newQuery(queryString);
       Set regionNames = ((DefaultQuery) query).getRegionsInQuery(null);
 
       // Authorization check
       QueryOperationContext queryContext = null;
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         queryContext = authzRequest.queryAuthorize(queryString, regionNames);
         String newQueryString = queryContext.getQuery();
@@ -88,11 +88,11 @@ public class Query extends BaseCommandQuery {
         }
       }
 
-      processQuery(msg, query, queryString, regionNames, start, null, queryContext, servConn, true);
+      processQuery(clientMessage, query, queryString, regionNames, start, null, queryContext, serverConnection, true);
     } catch (QueryInvalidException e) {
       throw new QueryInvalidException(e.getMessage() + queryString);
     } catch (QueryExecutionLowMemoryException e) {
-      writeQueryResponseException(msg, e, false, servConn);
+      writeQueryResponseException(clientMessage, e, serverConnection);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Query651.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Query651.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Query651.java
index 5849431..97f5d56 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Query651.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Query651.java
@@ -44,40 +44,40 @@ public class Query651 extends BaseCommandQuery {
   protected Query651() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
 
     // Based on MessageType.DESTROY
     // Added by gregp 10/18/05
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
     // Retrieve the data from the message parts
-    String queryString = msg.getPart(0).getString();
+    String queryString = clientMessage.getPart(0).getString();
     long compiledQueryId = 0;
     Object[] queryParams = null;
     try {
-      if (msg.getMessageType() == MessageType.QUERY_WITH_PARAMETERS) {
+      if (clientMessage.getMessageType() == MessageType.QUERY_WITH_PARAMETERS) {
         // Query with parameters supported from 6.6 onwards.
-        int params = msg.getPart(1).getInt(); // Number of parameters.
+        int params = clientMessage.getPart(1).getInt(); // Number of parameters.
         // In case of native client there will be extra two parameters at 2 and 3 index.
         int paramStartIndex = 2;
-        if (msg.getNumberOfParts() > (1 /* type */ + 1 /* query string */ + 1 /* params length */
-            + params /* number of params */)) {
-          int timeout = msg.getPart(3).getInt();
-          servConn.setRequestSpecificTimeout(timeout);
+        if (clientMessage.getNumberOfParts() > (1 /* type */ + 1 /* query string */ + 1 /* params length */
+                                                + params /* number of params */)) {
+          int timeout = clientMessage.getPart(3).getInt();
+          serverConnection.setRequestSpecificTimeout(timeout);
           paramStartIndex = 4;
         }
         // Get the query execution parameters.
         queryParams = new Object[params];
         for (int i = 0; i < queryParams.length; i++) {
-          queryParams[i] = msg.getPart(i + paramStartIndex).getObject();
+          queryParams[i] = clientMessage.getPart(i + paramStartIndex).getObject();
         }
       } else {
         // this is optional part for message specific timeout, which right now send by native client
         // need to take care while adding new message
-        if (msg.getNumberOfParts() == 3) {
-          int timeout = msg.getPart(2).getInt();
-          servConn.setRequestSpecificTimeout(timeout);
+        if (clientMessage.getNumberOfParts() == 3) {
+          int timeout = clientMessage.getPart(2).getInt();
+          serverConnection.setRequestSpecificTimeout(timeout);
         }
       }
     } catch (ClassNotFoundException cne) {
@@ -85,19 +85,19 @@ public class Query651 extends BaseCommandQuery {
     }
 
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Received query request from {} queryString: {}{}", servConn.getName(),
-          servConn.getSocketString(), queryString,
+      logger.debug("{}: Received query request from {} queryString: {}{}", serverConnection.getName(),
+          serverConnection.getSocketString(), queryString,
           (queryParams != null ? (" with num query parameters :" + queryParams.length) : ""));
     }
     try {
       // Create query
       QueryService queryService =
-          servConn.getCachedRegionHelper().getCache().getLocalQueryService();
+          serverConnection.getCachedRegionHelper().getCache().getLocalQueryService();
       org.apache.geode.cache.query.Query query = null;
 
       if (queryParams != null) {
         // Its a compiled query.
-        CacheClientNotifier ccn = servConn.getAcceptor().getCacheClientNotifier();
+        CacheClientNotifier ccn = serverConnection.getAcceptor().getCacheClientNotifier();
         query = ccn.getCompiledQuery(queryString);
         if (query == null) {
           // This is first time the query is seen by this server.
@@ -114,7 +114,7 @@ public class Query651 extends BaseCommandQuery {
 
       // Authorization check
       QueryOperationContext queryContext = null;
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         queryContext = authzRequest.queryAuthorize(queryString, regionNames, queryParams);
         String newQueryString = queryContext.getQuery();
@@ -128,8 +128,7 @@ public class Query651 extends BaseCommandQuery {
         }
       }
 
-      processQueryUsingParams(msg, query, queryString, regionNames, start, null, queryContext,
-          servConn, true, queryParams);
+      processQueryUsingParams(clientMessage, query, queryString, regionNames, start, null, queryContext, serverConnection, true, queryParams);
     } catch (QueryInvalidException e) {
       throw new QueryInvalidException(e.getMessage() + queryString);
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterDataSerializers.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterDataSerializers.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterDataSerializers.java
index 7d28d52..d1c101f 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterDataSerializers.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterDataSerializers.java
@@ -37,22 +37,22 @@ public class RegisterDataSerializers extends BaseCommand {
 
   private RegisterDataSerializers() {}
 
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException {
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received register dataserializer request ({} parts) from {}",
-          servConn.getName(), msg.getNumberOfParts(), servConn.getSocketString());
+          serverConnection.getName(), clientMessage.getNumberOfParts(), serverConnection.getSocketString());
     }
-    int noOfParts = msg.getNumberOfParts();
+    int noOfParts = clientMessage.getNumberOfParts();
 
     // 2 parts per instantiator and one eventId part
     int noOfDataSerializers = (noOfParts - 1) / 2;
 
     // retrieve eventID from the last Part
-    ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(msg.getPart(noOfParts - 1).getSerializedForm());
+    ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(clientMessage.getPart(noOfParts - 1).getSerializedForm());
     long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
     long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-    EventID eventId = new EventID(servConn.getEventMemberIDByteArray(), threadId, sequenceId);
+    EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
     byte[][] serializedDataSerializers = new byte[noOfDataSerializers * 2][];
     boolean caughtCNFE = false;
@@ -60,12 +60,12 @@ public class RegisterDataSerializers extends BaseCommand {
     try {
       for (int i = 0; i < noOfParts - 1; i = i + 2) {
 
-        Part dataSerializerClassNamePart = msg.getPart(i);
+        Part dataSerializerClassNamePart = clientMessage.getPart(i);
         serializedDataSerializers[i] = dataSerializerClassNamePart.getSerializedForm();
         String dataSerializerClassName =
             (String) CacheServerHelper.deserialize(serializedDataSerializers[i]);
 
-        Part idPart = msg.getPart(i + 1);
+        Part idPart = clientMessage.getPart(i + 1);
         serializedDataSerializers[i + 1] = idPart.getSerializedForm();
         int id = idPart.getInt();
 
@@ -73,7 +73,7 @@ public class RegisterDataSerializers extends BaseCommand {
         try {
           dataSerializerClass = InternalDataSerializer.getCachedClass(dataSerializerClassName);
           InternalDataSerializer.register(dataSerializerClass, true, eventId,
-              servConn.getProxyID());
+              serverConnection.getProxyID());
         } catch (ClassNotFoundException e) {
           // If a ClassNotFoundException is caught, store it, but continue
           // processing other instantiators
@@ -82,26 +82,26 @@ public class RegisterDataSerializers extends BaseCommand {
         }
       }
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
     }
 
     // If a ClassNotFoundException was caught while processing the
     // instantiators, send it back to the client. Note: This only sends
     // the last CNFE.
     if (caughtCNFE) {
-      writeException(msg, cnfe, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, cnfe, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
     }
 
     // Send reply to client if necessary. If an exception occurs in the above
     // code, then the reply has already been sent.
-    if (!servConn.getTransientFlag(RESPONDED)) {
-      writeReply(msg, servConn);
+    if (!serverConnection.getTransientFlag(RESPONDED)) {
+      writeReply(clientMessage, serverConnection);
     }
 
     if (logger.isDebugEnabled()) {
-      logger.debug("Registered dataserializer for MembershipId = {}", servConn.getMembershipID());
+      logger.debug("Registered dataserializer for MembershipId = {}", serverConnection.getMembershipID());
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInstantiators.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInstantiators.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInstantiators.java
index 1e701fc..2b63337 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInstantiators.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInstantiators.java
@@ -49,23 +49,23 @@ public class RegisterInstantiators extends BaseCommand {
   private RegisterInstantiators() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException {
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received register instantiator request ({} parts) from {}",
-          servConn.getName(), msg.getNumberOfParts(), servConn.getSocketString());
+          serverConnection.getName(), clientMessage.getNumberOfParts(), serverConnection.getSocketString());
     }
-    int noOfParts = msg.getNumberOfParts();
+    int noOfParts = clientMessage.getNumberOfParts();
     // Assert parts
     Assert.assertTrue((noOfParts - 1) % 3 == 0);
     // 3 parts per instantiator and one eventId part
     int noOfInstantiators = (noOfParts - 1) / 3;
 
     // retrieve eventID from the last Part
-    ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(msg.getPart(noOfParts - 1).getSerializedForm());
+    ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(clientMessage.getPart(noOfParts - 1).getSerializedForm());
     long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
     long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-    EventID eventId = new EventID(servConn.getEventMemberIDByteArray(), threadId, sequenceId);
+    EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
     byte[][] serializedInstantiators = new byte[noOfInstantiators * 3][];
     boolean caughtCNFE = false;
@@ -73,17 +73,17 @@ public class RegisterInstantiators extends BaseCommand {
     try {
       for (int i = 0; i < noOfParts - 1; i = i + 3) {
 
-        Part instantiatorPart = msg.getPart(i);
+        Part instantiatorPart = clientMessage.getPart(i);
         serializedInstantiators[i] = instantiatorPart.getSerializedForm();
         String instantiatorClassName =
             (String) CacheServerHelper.deserialize(serializedInstantiators[i]);
 
-        Part instantiatedPart = msg.getPart(i + 1);
+        Part instantiatedPart = clientMessage.getPart(i + 1);
         serializedInstantiators[i + 1] = instantiatedPart.getSerializedForm();
         String instantiatedClassName =
             (String) CacheServerHelper.deserialize(serializedInstantiators[i + 1]);
 
-        Part idPart = msg.getPart(i + 2);
+        Part idPart = clientMessage.getPart(i + 2);
         serializedInstantiators[i + 2] = idPart.getSerializedForm();
         int id = idPart.getInt();
 
@@ -92,7 +92,7 @@ public class RegisterInstantiators extends BaseCommand {
           instantiatorClass = InternalDataSerializer.getCachedClass(instantiatorClassName);
           instantiatedClass = InternalDataSerializer.getCachedClass(instantiatedClassName);
           InternalInstantiator.register(instantiatorClass, instantiatedClass, id, true, eventId,
-              servConn.getProxyID());
+              serverConnection.getProxyID());
         } catch (ClassNotFoundException e) {
           // If a ClassNotFoundException is caught, store it, but continue
           // processing other instantiators
@@ -102,17 +102,17 @@ public class RegisterInstantiators extends BaseCommand {
       }
     } catch (Exception e) {
       logger.warn(LocalizedMessage.create(LocalizedStrings.RegisterInstantiators_BAD_CLIENT,
-          new Object[] {servConn.getMembershipID(), e.getLocalizedMessage()}));
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+          new Object[] { serverConnection.getMembershipID(), e.getLocalizedMessage()}));
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
     }
 
     // If a ClassNotFoundException was caught while processing the
     // instantiators, send it back to the client. Note: This only sends
     // the last CNFE.
     if (caughtCNFE) {
-      writeException(msg, cnfe, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, cnfe, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
 
       // Send the instantiators on to other clients if we hit an error
       // due to a missing class, because they were not distributed
@@ -120,7 +120,7 @@ public class RegisterInstantiators extends BaseCommand {
       // been distributed if successfully registered.
       ClientInstantiatorMessage clientInstantiatorMessage =
           new ClientInstantiatorMessage(EnumListenerEvent.AFTER_REGISTER_INSTANTIATOR,
-              serializedInstantiators, servConn.getProxyID(), eventId);
+              serializedInstantiators, serverConnection.getProxyID(), eventId);
 
       // Notify other clients
       CacheClientNotifier.routeClientMessage(clientInstantiatorMessage);
@@ -129,12 +129,12 @@ public class RegisterInstantiators extends BaseCommand {
 
     // Send reply to client if necessary. If an exception occurs in the above
     // code, then the reply has already been sent.
-    if (!servConn.getTransientFlag(RESPONDED)) {
-      writeReply(msg, servConn);
+    if (!serverConnection.getTransientFlag(RESPONDED)) {
+      writeReply(clientMessage, serverConnection);
     }
 
     if (logger.isDebugEnabled()) {
-      logger.debug("Registered instantiators for MembershipId = {}", servConn.getMembershipID());
+      logger.debug("Registered instantiators for MembershipId = {}", serverConnection.getMembershipID());
     }
   }
 


[14/43] geode git commit: GEODE-2951 Remove --pageSize from docs of gfsh search lucene

Posted by kl...@apache.org.
GEODE-2951 Remove --pageSize from docs of gfsh search lucene


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/d3543d22
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/d3543d22
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/d3543d22

Branch: refs/heads/feature/GEODE-2632-17
Commit: d3543d229de46a3a582881484bccf3b77d8ac505
Parents: 6e56a73
Author: Karen Miller <km...@pivotal.io>
Authored: Fri May 26 10:54:58 2017 -0700
Committer: Karen Miller <km...@pivotal.io>
Committed: Tue May 30 09:27:21 2017 -0700

----------------------------------------------------------------------
 .../tools_modules/gfsh/command-pages/search.html.md.erb       | 7 +------
 1 file changed, 1 insertion(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/d3543d22/geode-docs/tools_modules/gfsh/command-pages/search.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/gfsh/command-pages/search.html.md.erb b/geode-docs/tools_modules/gfsh/command-pages/search.html.md.erb
index 7f239e9..6cdf362 100644
--- a/geode-docs/tools_modules/gfsh/command-pages/search.html.md.erb
+++ b/geode-docs/tools_modules/gfsh/command-pages/search.html.md.erb
@@ -31,7 +31,7 @@ See also [create lucene index](create.html#create_lucene_index), [describe lucen
 
 ``` pre
 search lucene --name=value --region=value --queryStrings=value --defaultField=value
-    [--limit=value] [--pageSize=value] [--keys-only=value]
+    [--limit=value] [--keys-only=value]
 ```
 
 **Parameters, search lucene:**
@@ -75,11 +75,6 @@ search lucene --name=value --region=value --queryStrings=value --defaultField=va
 <td>Number of search results needed.</td>
 <td>If the parameter is not specified: -1</td>
 </tr>
-<tr>
-<td><span class="keyword parmname">\-\-pageSize</span></td>
-<td>Number of results to be returned in a page.</td>
-<td>If the parameter is not specified: -1</td>
-</tr>
 <td><span class="keyword parmname">\-\-keys-only</span></td>
 <td>Return only keys of search results.</td>
 <td>If the parameter is not specified: false</td>


[22/43] geode git commit: Cleanup BaseCommand

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetFunctionAttribute.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetFunctionAttribute.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetFunctionAttribute.java
index 8ec16ef..f56a4d9 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetFunctionAttribute.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetFunctionAttribute.java
@@ -34,15 +34,15 @@ public class GetFunctionAttribute extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    String functionId = msg.getPart(0).getString();
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    String functionId = clientMessage.getPart(0).getString();
     if (functionId == null) {
       String message =
           LocalizedStrings.GetFunctionAttribute_THE_INPUT_0_FOR_GET_FUNCTION_ATTRIBUTE_REQUEST_IS_NULL
               .toLocalizedString("functionId");
-      logger.warn("{}: {}", servConn.getName(), message);
-      sendError(msg, message, servConn);
+      logger.warn("{}: {}", serverConnection.getName(), message);
+      sendError(clientMessage, message, serverConnection);
       return;
     }
 
@@ -52,8 +52,8 @@ public class GetFunctionAttribute extends BaseCommand {
       message =
           LocalizedStrings.GetFunctionAttribute_THE_FUNCTION_IS_NOT_REGISTERED_FOR_FUNCTION_ID_0
               .toLocalizedString(functionId);
-      logger.warn("{}: {}", servConn.getName(), message);
-      sendError(msg, message, servConn);
+      logger.warn("{}: {}", serverConnection.getName(), message);
+      sendError(clientMessage, message, serverConnection);
       return;
     }
 
@@ -61,7 +61,7 @@ public class GetFunctionAttribute extends BaseCommand {
     functionAttributes[0] = (byte) (function.hasResult() ? 1 : 0);
     functionAttributes[1] = (byte) (function.isHA() ? 1 : 0);
     functionAttributes[2] = (byte) (function.optimizeForWrite() ? 1 : 0);
-    writeResponseWithFunctionAttribute(functionAttributes, msg, servConn);
+    writeResponseWithFunctionAttribute(functionAttributes, clientMessage, serverConnection);
   }
 
   private void sendError(Message msg, String message, ServerConnection servConn)

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXEnumById.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXEnumById.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXEnumById.java
index 54a21ed..cc7dd05 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXEnumById.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXEnumById.java
@@ -36,32 +36,32 @@ public class GetPDXEnumById extends BaseCommand {
   private GetPDXEnumById() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException {
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Received get pdx enum by id request ({} parts) from {}", servConn.getName(),
-          msg.getNumberOfParts(), servConn.getSocketString());
+      logger.debug("{}: Received get pdx enum by id request ({} parts) from {}", serverConnection.getName(),
+          clientMessage.getNumberOfParts(), serverConnection.getSocketString());
     }
-    int enumId = msg.getPart(0).getInt();
+    int enumId = clientMessage.getPart(0).getInt();
 
     EnumInfo result;
     try {
-      InternalCache cache = servConn.getCache();
+      InternalCache cache = serverConnection.getCache();
       TypeRegistry registry = cache.getPdxRegistry();
       result = registry.getEnumInfoById(enumId);
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    Message responseMsg = servConn.getResponseMessage();
+    Message responseMsg = serverConnection.getResponseMessage();
     responseMsg.setMessageType(MessageType.RESPONSE);
     responseMsg.setNumberOfParts(1);
-    responseMsg.setTransactionId(msg.getTransactionId());
+    responseMsg.setTransactionId(clientMessage.getTransactionId());
     responseMsg.addObjPart(result);
-    responseMsg.send(servConn);
-    servConn.setAsTrue(RESPONDED);
+    responseMsg.send(serverConnection);
+    serverConnection.setAsTrue(RESPONDED);
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXIdForEnum.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXIdForEnum.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXIdForEnum.java
index 1b21383..7bf5b4f 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXIdForEnum.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXIdForEnum.java
@@ -36,33 +36,33 @@ public class GetPDXIdForEnum extends BaseCommand {
   private GetPDXIdForEnum() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException {
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received get pdx id for enum request ({} parts) from {}",
-          servConn.getName(), msg.getNumberOfParts(), servConn.getSocketString());
+          serverConnection.getName(), clientMessage.getNumberOfParts(), serverConnection.getSocketString());
     }
 
-    EnumInfo enumInfo = (EnumInfo) msg.getPart(0).getObject();
+    EnumInfo enumInfo = (EnumInfo) clientMessage.getPart(0).getObject();
 
     int enumId;
     try {
-      InternalCache cache = servConn.getCache();
+      InternalCache cache = serverConnection.getCache();
       TypeRegistry registry = cache.getPdxRegistry();
       enumId = registry.defineEnum(enumInfo);
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    Message responseMsg = servConn.getResponseMessage();
+    Message responseMsg = serverConnection.getResponseMessage();
     responseMsg.setMessageType(MessageType.RESPONSE);
     responseMsg.setNumberOfParts(1);
-    responseMsg.setTransactionId(msg.getTransactionId());
+    responseMsg.setTransactionId(clientMessage.getTransactionId());
     responseMsg.addIntPart(enumId);
-    responseMsg.send(servConn);
-    servConn.setAsTrue(RESPONDED);
+    responseMsg.send(serverConnection);
+    serverConnection.setAsTrue(RESPONDED);
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXIdForType.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXIdForType.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXIdForType.java
index 2054196..e5dc5f0 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXIdForType.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXIdForType.java
@@ -36,34 +36,34 @@ public class GetPDXIdForType extends BaseCommand {
   private GetPDXIdForType() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException {
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received get pdx id for type request ({} parts) from {}",
-          servConn.getName(), msg.getNumberOfParts(), servConn.getSocketString());
+          serverConnection.getName(), clientMessage.getNumberOfParts(), serverConnection.getSocketString());
     }
-    int noOfParts = msg.getNumberOfParts();
+    int noOfParts = clientMessage.getNumberOfParts();
 
-    PdxType type = (PdxType) msg.getPart(0).getObject();
+    PdxType type = (PdxType) clientMessage.getPart(0).getObject();
 
     int pdxId;
     try {
-      InternalCache cache = servConn.getCache();
+      InternalCache cache = serverConnection.getCache();
       TypeRegistry registry = cache.getPdxRegistry();
       pdxId = registry.defineType(type);
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    Message responseMsg = servConn.getResponseMessage();
+    Message responseMsg = serverConnection.getResponseMessage();
     responseMsg.setMessageType(MessageType.RESPONSE);
     responseMsg.setNumberOfParts(1);
-    responseMsg.setTransactionId(msg.getTransactionId());
+    responseMsg.setTransactionId(clientMessage.getTransactionId());
     responseMsg.addIntPart(pdxId);
-    responseMsg.send(servConn);
-    servConn.setAsTrue(RESPONDED);
+    responseMsg.send(serverConnection);
+    serverConnection.setAsTrue(RESPONDED);
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXTypeById.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXTypeById.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXTypeById.java
index 2470893..032e8b3 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXTypeById.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXTypeById.java
@@ -36,32 +36,32 @@ public class GetPDXTypeById extends BaseCommand {
   private GetPDXTypeById() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException {
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Received get pdx type by id request ({} parts) from {}", servConn.getName(),
-          msg.getNumberOfParts(), servConn.getSocketString());
+      logger.debug("{}: Received get pdx type by id request ({} parts) from {}", serverConnection.getName(),
+          clientMessage.getNumberOfParts(), serverConnection.getSocketString());
     }
-    int pdxId = msg.getPart(0).getInt();
+    int pdxId = clientMessage.getPart(0).getInt();
 
     PdxType type;
     try {
-      InternalCache cache = servConn.getCache();
+      InternalCache cache = serverConnection.getCache();
       TypeRegistry registry = cache.getPdxRegistry();
       type = registry.getType(pdxId);
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    Message responseMsg = servConn.getResponseMessage();
+    Message responseMsg = serverConnection.getResponseMessage();
     responseMsg.setMessageType(MessageType.RESPONSE);
     responseMsg.setNumberOfParts(1);
-    responseMsg.setTransactionId(msg.getTransactionId());
+    responseMsg.setTransactionId(clientMessage.getTransactionId());
     responseMsg.addObjPart(type);
-    responseMsg.send(servConn);
-    servConn.setAsTrue(RESPONDED);
+    responseMsg.send(serverConnection);
+    serverConnection.setAsTrue(RESPONDED);
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPdxEnums70.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPdxEnums70.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPdxEnums70.java
index 19551c4..7753584 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPdxEnums70.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPdxEnums70.java
@@ -36,31 +36,31 @@ public class GetPdxEnums70 extends BaseCommand {
   private GetPdxEnums70() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException {
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Received get pdx enums from {}", servConn.getName(),
-          servConn.getSocketString());
+      logger.debug("{}: Received get pdx enums from {}", serverConnection.getName(),
+          serverConnection.getSocketString());
     }
 
     Map<Integer, EnumInfo> enums;
     try {
-      InternalCache cache = servConn.getCache();
+      InternalCache cache = serverConnection.getCache();
       enums = cache.getPdxRegistry().enumMap();
 
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    Message responseMsg = servConn.getResponseMessage();
+    Message responseMsg = serverConnection.getResponseMessage();
     responseMsg.setMessageType(MessageType.RESPONSE);
     responseMsg.setNumberOfParts(1);
-    responseMsg.setTransactionId(msg.getTransactionId());
+    responseMsg.setTransactionId(clientMessage.getTransactionId());
     responseMsg.addObjPart(enums);
-    responseMsg.send(servConn);
-    servConn.setAsTrue(RESPONDED);
+    responseMsg.send(serverConnection);
+    serverConnection.setAsTrue(RESPONDED);
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPdxTypes70.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPdxTypes70.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPdxTypes70.java
index cc96b8e..c31a375 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPdxTypes70.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPdxTypes70.java
@@ -36,30 +36,30 @@ public class GetPdxTypes70 extends BaseCommand {
   private GetPdxTypes70() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException {
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Received get pdx types from {}", servConn.getName(),
-          servConn.getSocketString());
+      logger.debug("{}: Received get pdx types from {}", serverConnection.getName(),
+          serverConnection.getSocketString());
     }
 
     Map<Integer, PdxType> types;
     try {
-      InternalCache cache = servConn.getCache();
+      InternalCache cache = serverConnection.getCache();
       types = cache.getPdxRegistry().typeMap();
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    Message responseMsg = servConn.getResponseMessage();
+    Message responseMsg = serverConnection.getResponseMessage();
     responseMsg.setMessageType(MessageType.RESPONSE);
     responseMsg.setNumberOfParts(1);
-    responseMsg.setTransactionId(msg.getTransactionId());
+    responseMsg.setTransactionId(clientMessage.getTransactionId());
     responseMsg.addObjPart(types);
-    responseMsg.send(servConn);
-    servConn.setAsTrue(RESPONDED);
+    responseMsg.send(serverConnection);
+    serverConnection.setAsTrue(RESPONDED);
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalid.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalid.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalid.java
index 24d623a..314ba07 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalid.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalid.java
@@ -36,12 +36,13 @@ public class Invalid extends BaseCommand {
   private Invalid() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
     logger.error(
         LocalizedMessage.create(LocalizedStrings.Invalid_0_INVALID_MESSAGE_TYPE_WITH_TX_1_FROM_2,
-            new Object[] {servConn.getName(), Integer.valueOf(msg.getTransactionId()),
-                servConn.getSocketString()}));
-    writeErrorResponse(msg, MessageType.INVALID, servConn);
+            new Object[] {
+              serverConnection.getName(), Integer.valueOf(clientMessage.getTransactionId()),
+                serverConnection.getSocketString()}));
+    writeErrorResponse(clientMessage, MessageType.INVALID, serverConnection);
 
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalidate.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalidate.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalidate.java
index 42b2497..22bf6f4 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalidate.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalidate.java
@@ -48,15 +48,15 @@ public class Invalidate extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
     Part regionNamePart = null, keyPart = null, callbackArgPart = null;
     String regionName = null;
     Object callbackArg = null, key = null;
     Part eventPart = null;
     StringBuffer errMessage = new StringBuffer();
-    CacheServerStats stats = servConn.getCacheServerStats();
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    CacheServerStats stats = serverConnection.getCacheServerStats();
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
 
     {
       long oldStart = start;
@@ -64,17 +64,17 @@ public class Invalidate extends BaseCommand {
       stats.incReadInvalidateRequestTime(start - oldStart);
     }
     // Retrieve the data from the message parts
-    regionNamePart = msg.getPart(0);
-    keyPart = msg.getPart(1);
-    eventPart = msg.getPart(2);
+    regionNamePart = clientMessage.getPart(0);
+    keyPart = clientMessage.getPart(1);
+    eventPart = clientMessage.getPart(2);
     // callbackArgPart = null; (redundant assignment)
-    if (msg.getNumberOfParts() > 3) {
-      callbackArgPart = msg.getPart(3);
+    if (clientMessage.getNumberOfParts() > 3) {
+      callbackArgPart = clientMessage.getPart(3);
       try {
         callbackArg = callbackArgPart.getObject();
       } catch (Exception e) {
-        writeException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeException(clientMessage, e, false, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
     }
@@ -82,14 +82,14 @@ public class Invalidate extends BaseCommand {
     try {
       key = keyPart.getStringOrObject();
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     if (logger.isDebugEnabled()) {
-      logger.debug(servConn.getName() + ": Received invalidate request (" + msg.getPayloadLength()
-          + " bytes) from " + servConn.getSocketString() + " for region " + regionName + " key "
-          + key);
+      logger.debug(serverConnection.getName() + ": Received invalidate request (" + clientMessage.getPayloadLength()
+                   + " bytes) from " + serverConnection.getSocketString() + " for region " + regionName + " key "
+                   + key);
     }
 
     // Process the invalidate request
@@ -108,23 +108,23 @@ public class Invalidate extends BaseCommand {
             .append(LocalizedStrings.BaseCommand__THE_INPUT_REGION_NAME_FOR_THE_0_REQUEST_IS_NULL
                 .toLocalizedString("invalidate"));
       }
-      writeErrorResponse(msg, MessageType.DESTROY_DATA_ERROR, errMessage.toString(), servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.DESTROY_DATA_ERROR, errMessage.toString(), serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
-    LocalRegion region = (LocalRegion) servConn.getCache().getRegion(regionName);
+    LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
     if (region == null) {
       String reason = LocalizedStrings.BaseCommand__0_WAS_NOT_FOUND_DURING_1_REQUEST
           .toLocalizedString(regionName, "invalidate");
-      writeRegionDestroyedEx(msg, regionName, reason, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     // Invalidate the entry
     ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
     long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
     long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-    EventID eventId = new EventID(servConn.getEventMemberIDByteArray(), threadId, sequenceId);
+    EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
     Breadcrumbs.setEventId(eventId);
 
@@ -134,7 +134,7 @@ public class Invalidate extends BaseCommand {
       // for integrated security
       this.securityService.authorizeRegionWrite(regionName, key.toString());
 
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         InvalidateOperationContext invalidateContext =
             authzRequest.invalidateAuthorize(regionName, key, callbackArg);
@@ -143,7 +143,7 @@ public class Invalidate extends BaseCommand {
       EventIDHolder clientEvent = new EventIDHolder(eventId);
 
       // msg.isRetry might be set by v7.0 and later clients
-      if (msg.isRetry()) {
+      if (clientMessage.isRetry()) {
         // if (logger.isDebugEnabled()) {
         // logger.debug("DEBUG: encountered isRetry in Invalidate");
         // }
@@ -157,9 +157,9 @@ public class Invalidate extends BaseCommand {
         }
       }
 
-      region.basicBridgeInvalidate(key, callbackArg, servConn.getProxyID(), true, clientEvent);
+      region.basicBridgeInvalidate(key, callbackArg, serverConnection.getProxyID(), true, clientEvent);
       tag = clientEvent.getVersionTag();
-      servConn.setModificationInfo(true, regionName, key);
+      serverConnection.setModificationInfo(true, regionName, key);
     } catch (EntryNotFoundException e) {
       // Don't send an exception back to the client if this
       // exception happens. Just log it and continue.
@@ -167,25 +167,25 @@ public class Invalidate extends BaseCommand {
           LocalizedStrings.BaseCommand_DURING_0_NO_ENTRY_WAS_FOUND_FOR_KEY_1,
           new Object[] {"invalidate", key}));
     } catch (RegionDestroyedException rde) {
-      writeException(msg, rde, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, rde, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (Exception e) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, e);
+      checkForInterrupt(serverConnection, e);
 
       // If an exception occurs during the destroy, preserve the connection
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       if (e instanceof GemFireSecurityException) {
         // Fine logging for security exceptions since these are already
         // logged by the security logger
         if (logger.isDebugEnabled()) {
-          logger.debug("{}: Unexpected Security exception", servConn.getName(), e);
+          logger.debug("{}: Unexpected Security exception", serverConnection.getName(), e);
         }
       } else {
         logger.warn(LocalizedMessage.create(LocalizedStrings.BaseCommand_0_UNEXPECTED_EXCEPTION,
-            servConn.getName()), e);
+            serverConnection.getName()), e);
       }
       return;
     }
@@ -199,17 +199,17 @@ public class Invalidate extends BaseCommand {
     if (region instanceof PartitionedRegion) {
       PartitionedRegion pr = (PartitionedRegion) region;
       if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
-        writeReplyWithRefreshMetadata(msg, servConn, pr, pr.getNetworkHopType(), tag);
+        writeReplyWithRefreshMetadata(clientMessage, serverConnection, pr, pr.getNetworkHopType(), tag);
         pr.clearNetworkHopData();
       } else {
-        writeReply(msg, servConn, tag);
+        writeReply(clientMessage, serverConnection, tag);
       }
     } else {
-      writeReply(msg, servConn, tag);
+      writeReply(clientMessage, serverConnection, tag);
     }
-    servConn.setAsTrue(RESPONDED);
+    serverConnection.setAsTrue(RESPONDED);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sent invalidate response for region {} key {}", servConn.getName(),
+      logger.debug("{}: Sent invalidate response for region {} key {}", serverConnection.getName(),
           regionName, key);
     }
     stats.incWriteInvalidateResponseTime(DistributionStats.getStatTime() - start);

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalidate70.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalidate70.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalidate70.java
index 4ac5023..2531e1d 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalidate70.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalidate70.java
@@ -60,7 +60,7 @@ public class Invalidate70 extends Invalidate {
     pr.getPrStats().incPRMetaDataSentCount();
     replyMsg.send(servConn);
     if (logger.isTraceEnabled()) {
-      logger.trace("{}: rpl with REFRESH_METADAT tx: {}", servConn.getName(),
+      logger.trace("{}: rpl with REFRESH_METADATA tx: {}", servConn.getName(),
           origMsg.getTransactionId());
     }
   }
@@ -90,7 +90,7 @@ public class Invalidate70 extends Invalidate {
         logger.debug("response has no version tag");
       }
     }
-    replyMsg.addBytesPart(OK_BYTES); // make old single-hop code happy by putting byte[]{0} here
+    replyMsg.addBytesPart(okBytes()); // make old single-hop code happy by putting byte[]{0} here
     replyMsg.send(servConn);
     if (logger.isTraceEnabled()) {
       logger.trace("{}: rpl tx: {} parts={}", servConn.getName(), origMsg.getTransactionId(),

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/KeySet.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/KeySet.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/KeySet.java
index 5f7388c..a35c4b0 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/KeySet.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/KeySet.java
@@ -26,7 +26,6 @@ import java.util.Set;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.operations.KeySetOperationContext;
 import org.apache.geode.internal.cache.LocalRegion;
-import org.apache.geode.internal.cache.tier.CachedRegionHelper;
 import org.apache.geode.internal.cache.tier.Command;
 import org.apache.geode.internal.cache.tier.MessageType;
 import org.apache.geode.internal.cache.tier.sockets.BaseCommand;
@@ -49,21 +48,21 @@ public class KeySet extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
     Part regionNamePart = null;
     String regionName = null;
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
 
     // Retrieve the region name from the message parts
-    regionNamePart = msg.getPart(0);
+    regionNamePart = clientMessage.getPart(0);
     regionName = regionNamePart.getString();
-    ChunkedMessage chunkedResponseMsg = servConn.getChunkedResponseMessage();
+    ChunkedMessage chunkedResponseMsg = serverConnection.getChunkedResponseMessage();
     final boolean isDebugEnabled = logger.isDebugEnabled();
     if (isDebugEnabled) {
       logger.debug("{}: Received key set request ({} bytes) from {} for region {}",
-          servConn.getName(), msg.getPayloadLength(), servConn.getSocketString(), regionName);
+          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName);
     }
 
     // Process the key set request
@@ -72,41 +71,41 @@ public class KeySet extends BaseCommand {
       // if (regionName == null) (can only be null)
       {
         message = LocalizedStrings.KeySet_0_THE_INPUT_REGION_NAME_FOR_THE_KEY_SET_REQUEST_IS_NULL
-            .toLocalizedString(servConn.getName());
+            .toLocalizedString(serverConnection.getName());
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.KeySet_0_THE_INPUT_REGION_NAME_FOR_THE_KEY_SET_REQUEST_IS_NULL,
-            servConn.getName()));
+            serverConnection.getName()));
       }
-      writeKeySetErrorResponse(msg, MessageType.KEY_SET_DATA_ERROR, message, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeKeySetErrorResponse(clientMessage, MessageType.KEY_SET_DATA_ERROR, message, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    LocalRegion region = (LocalRegion) servConn.getCache().getRegion(regionName);
+    LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
     if (region == null) {
       String reason = LocalizedStrings.KeySet__0_WAS_NOT_FOUND_DURING_KEY_SET_REQUEST
           .toLocalizedString(regionName);
-      writeRegionDestroyedEx(msg, regionName, reason, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     try {
       this.securityService.authorizeRegionRead(regionName);
     } catch (NotAuthorizedException ex) {
-      writeChunkedException(msg, ex, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, ex, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     KeySetOperationContext keySetContext = null;
-    AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+    AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
     if (authzRequest != null) {
       try {
         keySetContext = authzRequest.keySetAuthorize(regionName);
       } catch (NotAuthorizedException ex) {
-        writeChunkedException(msg, ex, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeChunkedException(clientMessage, ex, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
     }
@@ -117,20 +116,20 @@ public class KeySet extends BaseCommand {
 
     // Send header
     chunkedResponseMsg.setMessageType(MessageType.RESPONSE);
-    chunkedResponseMsg.setTransactionId(msg.getTransactionId());
+    chunkedResponseMsg.setTransactionId(clientMessage.getTransactionId());
     chunkedResponseMsg.sendHeader();
 
     // Send chunk response
     try {
-      fillAndSendKeySetResponseChunks(region, regionName, keySetContext, servConn);
-      servConn.setAsTrue(RESPONDED);
+      fillAndSendKeySetResponseChunks(region, regionName, keySetContext, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
     } catch (Exception e) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, e);
+      checkForInterrupt(serverConnection, e);
 
       // Otherwise, write an exception message and continue
-      writeChunkedException(msg, e, false, servConn, servConn.getChunkedResponseMessage());
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection, serverConnection.getChunkedResponseMessage());
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -138,7 +137,7 @@ public class KeySet extends BaseCommand {
       // logger.fine(getName() + ": Sent chunk (1 of 1) of register interest
       // response (" + chunkedResponseMsg.getBufferLength() + " bytes) for
       // region " + regionName + " key " + key);
-      logger.debug("{}: Sent key set response for the region {}", servConn.getName(), regionName);
+      logger.debug("{}: Sent key set response for the region {}", serverConnection.getName(), regionName);
     }
     // bserverStats.incLong(writeDestroyResponseTimeId,
     // DistributionStats.getStatTime() - start);
@@ -160,7 +159,7 @@ public class KeySet extends BaseCommand {
       keySet = keySetContext.getKeySet();
     }
 
-    List keyList = new ArrayList(maximumChunkSize);
+    List keyList = new ArrayList(MAXIMUM_CHUNK_SIZE);
     final boolean isTraceEnabled = logger.isTraceEnabled();
     for (Iterator it = keySet.iterator(); it.hasNext();) {
       Object entryKey = it.next();
@@ -169,7 +168,7 @@ public class KeySet extends BaseCommand {
         logger.trace("{}: fillAndSendKeySetResponseKey <{}>; list size was {}; region: {}",
             servConn.getName(), entryKey, keyList.size(), region.getFullPath());
       }
-      if (keyList.size() == maximumChunkSize) {
+      if (keyList.size() == MAXIMUM_CHUNK_SIZE) {
         // Send the chunk and clear the list
         sendKeySetResponseChunk(region, keyList, false, servConn);
         keyList.clear();
@@ -185,7 +184,7 @@ public class KeySet extends BaseCommand {
 
     chunkedResponseMsg.setNumberOfParts(1);
     chunkedResponseMsg.setLastChunk(lastChunk);
-    chunkedResponseMsg.addObjPart(list, zipValues);
+    chunkedResponseMsg.addObjPart(list, false);
 
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Sending {} key set response chunk for region={}{}", servConn.getName(),

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/MakePrimary.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/MakePrimary.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/MakePrimary.java
index fc8fe44..0786990 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/MakePrimary.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/MakePrimary.java
@@ -34,30 +34,30 @@ public class MakePrimary extends BaseCommand {
   private MakePrimary() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException {
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    Part isClientReadyPart = msg.getPart(0);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    Part isClientReadyPart = clientMessage.getPart(0);
     byte[] isClientReadyPartBytes = (byte[]) isClientReadyPart.getObject();
     boolean isClientReady = isClientReadyPartBytes[0] == 0x01;
     final boolean isDebugEnabled = logger.isDebugEnabled();
     if (isDebugEnabled) {
       logger.debug("{}: Received make primary request ({} bytes) isClientReady={}: from {}",
-          servConn.getName(), msg.getPayloadLength(), isClientReady, servConn.getSocketString());
+          serverConnection.getName(), clientMessage.getPayloadLength(), isClientReady, serverConnection.getSocketString());
     }
     try {
-      servConn.getAcceptor().getCacheClientNotifier().makePrimary(servConn.getProxyID(),
+      serverConnection.getAcceptor().getCacheClientNotifier().makePrimary(serverConnection.getProxyID(),
           isClientReady);
-      writeReply(msg, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeReply(clientMessage, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
 
       if (isDebugEnabled) {
-        logger.debug("{}: Sent make primary response for {}", servConn.getName(),
-            servConn.getSocketString());
+        logger.debug("{}: Sent make primary response for {}", serverConnection.getName(),
+            serverConnection.getSocketString());
       }
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ManagementCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ManagementCommand.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ManagementCommand.java
index b5e1ca3..9a79540 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ManagementCommand.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ManagementCommand.java
@@ -28,7 +28,7 @@ import org.apache.geode.internal.cache.tier.sockets.ServerConnection;
 public class ManagementCommand extends BaseCommand {
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException, InterruptedException {
     // TODO Auto-generated method stub
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PeriodicAck.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PeriodicAck.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PeriodicAck.java
index d301909..e57385f 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PeriodicAck.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PeriodicAck.java
@@ -33,43 +33,43 @@ public class PeriodicAck extends BaseCommand {
   private PeriodicAck() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException {
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Received periodic ack request ({} bytes) from {}", servConn.getName(),
-          msg.getPayloadLength(), servConn.getSocketString());
+      logger.debug("{}: Received periodic ack request ({} bytes) from {}", serverConnection.getName(),
+          clientMessage.getPayloadLength(), serverConnection.getSocketString());
     }
     try {
-      int numEvents = msg.getNumberOfParts();
+      int numEvents = clientMessage.getNumberOfParts();
       boolean success = false;
-      CacheClientNotifier ccn = servConn.getAcceptor().getCacheClientNotifier();
-      CacheClientProxy proxy = ccn.getClientProxy(servConn.getProxyID());
+      CacheClientNotifier ccn = serverConnection.getAcceptor().getCacheClientNotifier();
+      CacheClientProxy proxy = ccn.getClientProxy(serverConnection.getProxyID());
       if (proxy != null) {
         proxy.getHARegionQueue().createAckedEventsMap();
         for (int i = 0; i < numEvents; i++) {
-          Part eventIdPart = msg.getPart(i);
-          eventIdPart.setVersion(servConn.getClientVersion());
+          Part eventIdPart = clientMessage.getPart(i);
+          eventIdPart.setVersion(serverConnection.getClientVersion());
           EventID eid = (EventID) eventIdPart.getObject();
-          success = ccn.processDispatchedMessage(servConn.getProxyID(), eid);
+          success = ccn.processDispatchedMessage(serverConnection.getProxyID(), eid);
           if (!success)
             break;
         }
       }
       if (success) {
         proxy.getHARegionQueue().setAckedEvents();
-        writeReply(msg, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeReply(clientMessage, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
       }
 
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
     }
 
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sent periodic ack response for {}", servConn.getName(),
-          servConn.getSocketString());
+      logger.debug("{}: Sent periodic ack response for {}", serverConnection.getName(),
+          serverConnection.getSocketString());
     }
 
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Ping.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Ping.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Ping.java
index 7a12ce1..9755410 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Ping.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Ping.java
@@ -39,35 +39,35 @@ public class Ping extends BaseCommand {
   private Ping() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
     final boolean isDebugEnabled = logger.isDebugEnabled();
     if (isDebugEnabled) {
-      logger.debug("{}: rcv tx: {} from {} rcvTime: {}", servConn.getName(), msg.getTransactionId(),
-          servConn.getSocketString(), (DistributionStats.getStatTime() - start));
+      logger.debug("{}: rcv tx: {} from {} rcvTime: {}", serverConnection.getName(), clientMessage.getTransactionId(),
+          serverConnection.getSocketString(), (DistributionStats.getStatTime() - start));
     }
     ClientHealthMonitor chm = ClientHealthMonitor.getInstance();
     if (chm != null)
-      chm.receivedPing(servConn.getProxyID());
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
+      chm.receivedPing(serverConnection.getProxyID());
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
 
-    writeReply(msg, servConn);
-    servConn.setAsTrue(RESPONDED);
+    writeReply(clientMessage, serverConnection);
+    serverConnection.setAsTrue(RESPONDED);
     if (isDebugEnabled) {
-      logger.debug("{}: Sent ping reply to {}", servConn.getName(), servConn.getSocketString());
+      logger.debug("{}: Sent ping reply to {}", serverConnection.getName(), serverConnection.getSocketString());
     }
   }
 
   @Override
-  protected void writeReply(Message origMsg, ServerConnection servConn) throws IOException {
-    Message replyMsg = servConn.getReplyMessage();
-    servConn.getCache().getCancelCriterion().checkCancelInProgress(null);
+  protected void writeReply(Message origMsg, ServerConnection serverConnection) throws IOException {
+    Message replyMsg = serverConnection.getReplyMessage();
+    serverConnection.getCache().getCancelCriterion().checkCancelInProgress(null);
     replyMsg.setMessageType(MessageType.REPLY);
     replyMsg.setNumberOfParts(1);
     replyMsg.setTransactionId(origMsg.getTransactionId());
-    replyMsg.addBytesPart(OK_BYTES);
-    replyMsg.send(servConn);
+    replyMsg.addBytesPart(okBytes());
+    replyMsg.send(serverConnection);
     if (logger.isTraceEnabled()) {
-      logger.trace("{}: rpl tx: {}", servConn.getName(), origMsg.getTransactionId());
+      logger.trace("{}: rpl tx: {}", serverConnection.getName(), origMsg.getTransactionId());
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put.java
index 2a235c0..d724f66 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put.java
@@ -49,36 +49,36 @@ public class Put extends BaseCommand {
 
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
     Part regionNamePart = null, keyPart = null, valuePart = null, callbackArgPart = null;
     String regionName = null;
     Object callbackArg = null, key = null;
     Part eventPart = null;
     String errMessage = "";
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    CacheServerStats stats = servConn.getCacheServerStats();
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
 
     // requiresResponse = true;
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
     {
       long oldStart = start;
       start = DistributionStats.getStatTime();
       stats.incReadPutRequestTime(start - oldStart);
     }
     // Retrieve the data from the message parts
-    regionNamePart = msg.getPart(0);
-    keyPart = msg.getPart(1);
-    valuePart = msg.getPart(2);
-    eventPart = msg.getPart(3);
+    regionNamePart = clientMessage.getPart(0);
+    keyPart = clientMessage.getPart(1);
+    valuePart = clientMessage.getPart(2);
+    eventPart = clientMessage.getPart(3);
     // callbackArgPart = null; (redundant assignment)
-    if (msg.getNumberOfParts() > 4) {
-      callbackArgPart = msg.getPart(4);
+    if (clientMessage.getNumberOfParts() > 4) {
+      callbackArgPart = clientMessage.getPart(4);
       try {
         callbackArg = callbackArgPart.getObject();
       } catch (Exception e) {
-        writeException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeException(clientMessage, e, false, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
     }
@@ -87,14 +87,14 @@ public class Put extends BaseCommand {
     try {
       key = keyPart.getStringOrObject();
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     if (logger.isTraceEnabled()) {
       logger.trace("{}: Received put request ({} bytes) from {} for region {} key {} value {}",
-          servConn.getName(), msg.getPayloadLength(), servConn.getSocketString(), regionName, key,
+          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName, key,
           valuePart);
     }
 
@@ -102,28 +102,28 @@ public class Put extends BaseCommand {
     if (key == null || regionName == null) {
       if (key == null) {
         logger.warn(LocalizedMessage.create(
-            LocalizedStrings.Put_0_THE_INPUT_KEY_FOR_THE_PUT_REQUEST_IS_NULL, servConn.getName()));
+            LocalizedStrings.Put_0_THE_INPUT_KEY_FOR_THE_PUT_REQUEST_IS_NULL, serverConnection.getName()));
         errMessage =
             LocalizedStrings.Put_THE_INPUT_KEY_FOR_THE_PUT_REQUEST_IS_NULL.toLocalizedString();
       }
       if (regionName == null) {
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.Put_0_THE_INPUT_REGION_NAME_FOR_THE_PUT_REQUEST_IS_NULL,
-            servConn.getName()));
+            serverConnection.getName()));
         errMessage = LocalizedStrings.Put_THE_INPUT_REGION_NAME_FOR_THE_PUT_REQUEST_IS_NULL
             .toLocalizedString();
       }
-      writeErrorResponse(msg, MessageType.PUT_DATA_ERROR, errMessage.toString(), servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    LocalRegion region = (LocalRegion) servConn.getCache().getRegion(regionName);
+    LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
     if (region == null) {
       String reason =
           LocalizedStrings.Put_REGION_WAS_NOT_FOUND_DURING_PUT_REQUEST.toLocalizedString();
-      writeRegionDestroyedEx(msg, regionName, reason, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -131,18 +131,18 @@ public class Put extends BaseCommand {
       // Invalid to 'put' a null value in an existing key
       logger.info(LocalizedMessage.create(
           LocalizedStrings.Put_0_ATTEMPTED_TO_PUT_A_NULL_VALUE_FOR_EXISTING_KEY_1,
-          new Object[] {servConn.getName(), key}));
+          new Object[] { serverConnection.getName(), key}));
       errMessage =
           LocalizedStrings.Put_ATTEMPTED_TO_PUT_A_NULL_VALUE_FOR_EXISTING_KEY_0.toLocalizedString();
-      writeErrorResponse(msg, MessageType.PUT_DATA_ERROR, errMessage, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
     long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
     long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-    EventID eventId = new EventID(servConn.getEventMemberIDByteArray(), threadId, sequenceId);
+    EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
     try {
       byte[] value = valuePart.getSerializedForm();
@@ -150,7 +150,7 @@ public class Put extends BaseCommand {
 
       this.securityService.authorizeRegionWrite(regionName, key.toString());
 
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         if (DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
           authzRequest.createRegionAuthorize((String) key);
@@ -173,46 +173,46 @@ public class Put extends BaseCommand {
         // Create the null entry. Since the value is null, the value of the
         // isObject
         // the true after null doesn't matter and is not used.
-        result = region.basicBridgeCreate(key, null, true, callbackArg, servConn.getProxyID(), true,
+        result = region.basicBridgeCreate(key, null, true, callbackArg, serverConnection.getProxyID(), true,
             new EventIDHolder(eventId), false);
       } else {
         // Put the entry
         result = region.basicBridgePut(key, value, null, isObject, callbackArg,
-            servConn.getProxyID(), true, new EventIDHolder(eventId));
+            serverConnection.getProxyID(), true, new EventIDHolder(eventId));
       }
       if (result) {
-        servConn.setModificationInfo(true, regionName, key);
+        serverConnection.setModificationInfo(true, regionName, key);
       } else {
         StringId message = LocalizedStrings.PUT_0_FAILED_TO_PUT_ENTRY_FOR_REGION_1_KEY_2_VALUE_3;
-        Object[] messageArgs = new Object[] {servConn.getName(), regionName, key, valuePart};
+        Object[] messageArgs = new Object[] { serverConnection.getName(), regionName, key, valuePart};
         String s = message.toLocalizedString(messageArgs);
         logger.info(s);
         throw new Exception(s);
       }
     } catch (RegionDestroyedException rde) {
-      writeException(msg, rde, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, rde, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (ResourceException re) {
-      writeException(msg, re, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, re, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (Exception ce) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, ce);
+      checkForInterrupt(serverConnection, ce);
 
       // If an exception occurs during the put, preserve the connection
-      writeException(msg, ce, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, ce, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       if (ce instanceof GemFireSecurityException) {
         // Fine logging for security exceptions since these are already
         // logged by the security logger
         if (logger.isDebugEnabled()) {
-          logger.debug("{}: Unexpected Security exception", servConn.getName(), ce);
+          logger.debug("{}: Unexpected Security exception", serverConnection.getName(), ce);
         }
       } else {
         logger.warn(LocalizedMessage.create(LocalizedStrings.PUT_0_UNEXPECTED_EXCEPTION,
-            servConn.getName()), ce);
+            serverConnection.getName()), ce);
       }
       return;
     } finally {
@@ -222,12 +222,12 @@ public class Put extends BaseCommand {
     }
 
     // Increment statistics and write the reply
-    writeReply(msg, servConn);
+    writeReply(clientMessage, serverConnection);
 
-    servConn.setAsTrue(RESPONDED);
+    serverConnection.setAsTrue(RESPONDED);
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Sent put response back to {} for region {} key {} value {}",
-          servConn.getName(), servConn.getSocketString(), regionName, key, valuePart);
+          serverConnection.getName(), serverConnection.getSocketString(), regionName, key, valuePart);
     }
     stats.incWritePutResponseTime(DistributionStats.getStatTime() - start);
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put61.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put61.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put61.java
index d123ea2..3f9a72e 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put61.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put61.java
@@ -55,7 +55,7 @@ public class Put61 extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long p_start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long p_start)
       throws IOException, InterruptedException {
     long start = p_start;
     Part regionNamePart = null, keyPart = null, valuePart = null, callbackArgPart = null;
@@ -64,36 +64,36 @@ public class Put61 extends BaseCommand {
     Part eventPart = null;
     StringBuffer errMessage = new StringBuffer();
     boolean isDelta = false;
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    CacheServerStats stats = servConn.getCacheServerStats();
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
 
     // requiresResponse = true;
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
     {
       long oldStart = start;
       start = DistributionStats.getStatTime();
       stats.incReadPutRequestTime(start - oldStart);
     }
     // Retrieve the data from the message parts
-    regionNamePart = msg.getPart(0);
-    keyPart = msg.getPart(1);
+    regionNamePart = clientMessage.getPart(0);
+    keyPart = clientMessage.getPart(1);
     try {
-      isDelta = (Boolean) msg.getPart(2).getObject();
+      isDelta = (Boolean) clientMessage.getPart(2).getObject();
     } catch (Exception e) {
-      writeException(msg, MessageType.PUT_DELTA_ERROR, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, MessageType.PUT_DELTA_ERROR, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       // CachePerfStats not available here.
       return;
     }
-    valuePart = msg.getPart(3);
-    eventPart = msg.getPart(4);
-    if (msg.getNumberOfParts() > 5) {
-      callbackArgPart = msg.getPart(5);
+    valuePart = clientMessage.getPart(3);
+    eventPart = clientMessage.getPart(4);
+    if (clientMessage.getNumberOfParts() > 5) {
+      callbackArgPart = clientMessage.getPart(5);
       try {
         callbackArg = callbackArgPart.getObject();
       } catch (Exception e) {
-        writeException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeException(clientMessage, e, false, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
     }
@@ -102,16 +102,16 @@ public class Put61 extends BaseCommand {
     try {
       key = keyPart.getStringOrObject();
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     final boolean isDebugEnabled = logger.isDebugEnabled();
     if (isDebugEnabled) {
       logger.debug("{}: Received 6.1{}put request ({} bytes) from {} for region {} key {}",
-          servConn.getName(), (isDelta ? " delta " : " "), msg.getPayloadLength(),
-          servConn.getSocketString(), regionName, key);
+          serverConnection.getName(), (isDelta ? " delta " : " "), clientMessage.getPayloadLength(),
+          serverConnection.getSocketString(), regionName, key);
     }
 
     // Process the put request
@@ -119,27 +119,27 @@ public class Put61 extends BaseCommand {
       if (key == null) {
         String putMsg = " The input key for the 6.1 put request is null";
         if (isDebugEnabled) {
-          logger.debug("{}:{}", servConn.getName(), putMsg);
+          logger.debug("{}:{}", serverConnection.getName(), putMsg);
         }
         errMessage.append(putMsg);
       }
       if (regionName == null) {
         String putMsg = " The input region name for the 6.1 put request is null";
         if (isDebugEnabled) {
-          logger.debug("{}:{}", servConn.getName(), putMsg);
+          logger.debug("{}:{}", serverConnection.getName(), putMsg);
         }
         errMessage.append(putMsg);
       }
-      writeErrorResponse(msg, MessageType.PUT_DATA_ERROR, errMessage.toString(), servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    LocalRegion region = (LocalRegion) servConn.getCache().getRegion(regionName);
+    LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
     if (region == null) {
       String reason = " was not found during 6.1 put request";
-      writeRegionDestroyedEx(msg, regionName, reason, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -147,11 +147,11 @@ public class Put61 extends BaseCommand {
       // Invalid to 'put' a null value in an existing key
       String putMsg = " Attempted to 6.1 put a null value for existing key " + key;
       if (isDebugEnabled) {
-        logger.debug("{}:{}", servConn.getName(), putMsg);
+        logger.debug("{}:{}", serverConnection.getName(), putMsg);
       }
       errMessage.append(putMsg);
-      writeErrorResponse(msg, MessageType.PUT_DATA_ERROR, errMessage.toString(), servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -160,7 +160,7 @@ public class Put61 extends BaseCommand {
     ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
     long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
     long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-    EventID eventId = new EventID(servConn.getEventMemberIDByteArray(), threadId, sequenceId);
+    EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
     try {
       Object value = null;
@@ -169,13 +169,13 @@ public class Put61 extends BaseCommand {
       }
       boolean isObject = valuePart.isObject();
       boolean isMetaRegion = region.isUsedForMetaRegion();
-      msg.setMetaRegion(isMetaRegion);
+      clientMessage.setMetaRegion(isMetaRegion);
 
       this.securityService.authorizeRegionWrite(regionName, key.toString());
 
       AuthorizeRequest authzRequest = null;
       if (!isMetaRegion) {
-        authzRequest = servConn.getAuthzRequest();
+        authzRequest = serverConnection.getAuthzRequest();
       }
       if (authzRequest != null) {
         if (DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
@@ -199,7 +199,7 @@ public class Put61 extends BaseCommand {
         // Create the null entry. Since the value is null, the value of the
         // isObject
         // the true after null doesn't matter and is not used.
-        result = region.basicBridgeCreate(key, null, true, callbackArg, servConn.getProxyID(), true,
+        result = region.basicBridgeCreate(key, null, true, callbackArg, serverConnection.getProxyID(), true,
             new EventIDHolder(eventId), false);
       } else {
         // Put the entry
@@ -208,50 +208,50 @@ public class Put61 extends BaseCommand {
           delta = valuePart.getSerializedForm();
         }
         result = region.basicBridgePut(key, value, delta, isObject, callbackArg,
-            servConn.getProxyID(), true, new EventIDHolder(eventId));
+            serverConnection.getProxyID(), true, new EventIDHolder(eventId));
       }
       if (result) {
-        servConn.setModificationInfo(true, regionName, key);
+        serverConnection.setModificationInfo(true, regionName, key);
       } else {
-        String message = servConn.getName() + ": Failed to 6.1 put entry for region " + regionName
-            + " key " + key + " value " + valuePart;
+        String message = serverConnection.getName() + ": Failed to 6.1 put entry for region " + regionName
+                         + " key " + key + " value " + valuePart;
         if (isDebugEnabled) {
           logger.debug(message);
         }
         throw new Exception(message);
       }
     } catch (RegionDestroyedException rde) {
-      writeException(msg, rde, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, rde, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (ResourceException re) {
-      writeException(msg, re, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, re, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (InvalidDeltaException ide) {
       logger.info(LocalizedMessage.create(
           LocalizedStrings.UpdateOperation_ERROR_APPLYING_DELTA_FOR_KEY_0_OF_REGION_1,
           new Object[] {key, regionName}));
-      writeException(msg, MessageType.PUT_DELTA_ERROR, ide, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, MessageType.PUT_DELTA_ERROR, ide, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       region.getCachePerfStats().incDeltaFullValuesRequested();
       return;
 
     } catch (Exception ce) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, ce);
+      checkForInterrupt(serverConnection, ce);
 
       // If an exception occurs during the put, preserve the connection
-      writeException(msg, ce, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, ce, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       if (ce instanceof GemFireSecurityException) {
         // Fine logging for security exceptions since these are already
         // logged by the security logger
         if (isDebugEnabled) {
-          logger.debug("{}: Unexpected Security exception", servConn.getName(), ce);
+          logger.debug("{}: Unexpected Security exception", serverConnection.getName(), ce);
         }
       } else if (isDebugEnabled) {
-        logger.debug("{}: Unexpected Exception", servConn.getName(), ce);
+        logger.debug("{}: Unexpected Exception", serverConnection.getName(), ce);
       }
       return;
     } finally {
@@ -264,18 +264,18 @@ public class Put61 extends BaseCommand {
     if (region instanceof PartitionedRegion) {
       PartitionedRegion pr = (PartitionedRegion) region;
       if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
-        writeReplyWithRefreshMetadata(msg, servConn, pr, pr.getNetworkHopType());
+        writeReplyWithRefreshMetadata(clientMessage, serverConnection, pr, pr.getNetworkHopType());
         pr.clearNetworkHopData();
       } else {
-        writeReply(msg, servConn);
+        writeReply(clientMessage, serverConnection);
       }
     } else {
-      writeReply(msg, servConn);
+      writeReply(clientMessage, serverConnection);
     }
-    servConn.setAsTrue(RESPONDED);
+    serverConnection.setAsTrue(RESPONDED);
     if (isDebugEnabled) {
       logger.debug("{}: Sent 6.1 put response back to {} for region {} key {} value {}",
-          servConn.getName(), servConn.getSocketString(), regionName, key, valuePart);
+          serverConnection.getName(), serverConnection.getSocketString(), regionName, key, valuePart);
     }
     stats.incWritePutResponseTime(DistributionStats.getStatTime() - start);
   }


[39/43] geode git commit: Fix spotless format

Posted by kl...@apache.org.
Fix spotless format


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/4f6a7a70
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/4f6a7a70
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/4f6a7a70

Branch: refs/heads/feature/GEODE-2632-17
Commit: 4f6a7a706e443e10a8ba6088adb7439a8d7cfa55
Parents: 9a97112
Author: Kirk Lund <kl...@apache.org>
Authored: Wed May 24 16:43:01 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Tue May 30 10:21:11 2017 -0700

----------------------------------------------------------------------
 .../cache/ha/BlockingHARegionJUnitTest.java     | 18 ++++++++++------
 .../SerializableErrorCollector.java             | 22 +++++++++-----------
 2 files changed, 22 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/4f6a7a70/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionJUnitTest.java
index 3c1adc3..1534192 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionJUnitTest.java
@@ -48,7 +48,8 @@ import org.apache.geode.test.junit.rules.serializable.SerializableErrorCollector
  * #40314: Filled up queue causes all publishers to block
  *
  * <p>
- * #37627: In case of out of order messages, (sequence Id violation), in spite of HARQ not full, the capacity (putPermits) of the HARQ exhausted.
+ * #37627: In case of out of order messages, (sequence Id violation), in spite of HARQ not full, the
+ * capacity (putPermits) of the HARQ exhausted.
  */
 @Category({IntegrationTest.class, ClientSubscriptionTest.class})
 public class BlockingHARegionJUnitTest {
@@ -117,7 +118,8 @@ public class BlockingHARegionJUnitTest {
   @Test
   public void testBoundedPuts() throws Exception {
     this.queueAttributes.setBlockingQueueCapacity(1);
-    HARegionQueue hrq = getHARegionQueueInstance(REGION, this.cache, this.queueAttributes, BLOCKING_HA_QUEUE, false);
+    HARegionQueue hrq = getHARegionQueueInstance(REGION, this.cache, this.queueAttributes,
+        BLOCKING_HA_QUEUE, false);
     hrq.setPrimary(true); // fix for 40314 - capacity constraint is checked for primary only
 
     startDoPuts(hrq, 1000);
@@ -135,7 +137,8 @@ public class BlockingHARegionJUnitTest {
   @Test
   public void testPutBeingBlocked() throws Exception {
     this.queueAttributes.setBlockingQueueCapacity(1);
-    HARegionQueue hrq = getHARegionQueueInstance(REGION, this.cache, this.queueAttributes, BLOCKING_HA_QUEUE, false);
+    HARegionQueue hrq = getHARegionQueueInstance(REGION, this.cache, this.queueAttributes,
+        BLOCKING_HA_QUEUE, false);
     hrq.setPrimary(true); // fix for 40314 - capacity constraint is checked for primary only
 
     Thread doPuts = startDoPuts(hrq, 2);
@@ -160,7 +163,8 @@ public class BlockingHARegionJUnitTest {
   @Test
   public void testConcurrentPutsNotExceedingLimit() throws Exception {
     this.queueAttributes.setBlockingQueueCapacity(10000);
-    HARegionQueue hrq = getHARegionQueueInstance(REGION, this.cache, this.queueAttributes, BLOCKING_HA_QUEUE, false);
+    HARegionQueue hrq = getHARegionQueueInstance(REGION, this.cache, this.queueAttributes,
+        BLOCKING_HA_QUEUE, false);
     hrq.setPrimary(true); // fix for 40314 - capacity constraint is checked for primary only
 
     Thread doPuts1 = startDoPuts(hrq, 20000, 1);
@@ -191,7 +195,8 @@ public class BlockingHARegionJUnitTest {
   @Test
   public void testConcurrentPutsTakesNotExceedingLimit() throws Exception {
     this.queueAttributes.setBlockingQueueCapacity(10000);
-    HARegionQueue hrq = getHARegionQueueInstance(REGION, this.cache, this.queueAttributes, BLOCKING_HA_QUEUE, false);
+    HARegionQueue hrq = getHARegionQueueInstance(REGION, this.cache, this.queueAttributes,
+        BLOCKING_HA_QUEUE, false);
     hrq.setPrimary(true); // fix for 40314 - capacity constraint is checked for primary only
 
     Thread doPuts1 = startDoPuts(hrq, 40000, 1);
@@ -234,7 +239,8 @@ public class BlockingHARegionJUnitTest {
   public void testHARQMaxCapacity_Bug37627() throws Exception {
     this.queueAttributes.setBlockingQueueCapacity(1);
     this.queueAttributes.setExpiryTime(180);
-    HARegionQueue hrq = getHARegionQueueInstance(REGION, this.cache, this.queueAttributes, BLOCKING_HA_QUEUE, false);
+    HARegionQueue hrq = getHARegionQueueInstance(REGION, this.cache, this.queueAttributes,
+        BLOCKING_HA_QUEUE, false);
     hrq.setPrimary(true); // fix for 40314 - capacity constraint is checked for primary only
 
     EventID event1 = new EventID(new byte[] {1}, 1, 2); // violation

http://git-wip-us.apache.org/repos/asf/geode/blob/4f6a7a70/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableErrorCollector.java
----------------------------------------------------------------------
diff --git a/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableErrorCollector.java b/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableErrorCollector.java
index 0abfdaf..5557f1b 100644
--- a/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableErrorCollector.java
+++ b/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableErrorCollector.java
@@ -1,18 +1,16 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
  *
- *      http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
  */
 package org.apache.geode.test.junit.rules.serializable;
 


[31/43] geode git commit: Cleanup HARegionQueueJUnitTest and BlockingHARegionQueueJUnitTest

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/659b9d4e/geode-core/src/test/java/org/apache/geode/internal/cache/ha/HARegionQueueJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/ha/HARegionQueueJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/ha/HARegionQueueJUnitTest.java
index 4028ab3..929093d 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/ha/HARegionQueueJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/ha/HARegionQueueJUnitTest.java
@@ -15,6 +15,8 @@
 package org.apache.geode.internal.cache.ha;
 
 import static org.apache.geode.distributed.ConfigurationProperties.*;
+import static org.hamcrest.CoreMatchers.*;
+import static org.hamcrest.number.OrderingComparison.*;
 import static org.junit.Assert.*;
 
 import java.io.IOException;
@@ -23,101 +25,75 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
-import java.util.Properties;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.CyclicBarrier;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.awaitility.Awaitility;
-
-import org.apache.geode.internal.cache.InternalCache;
-import org.apache.geode.test.junit.categories.ClientSubscriptionTest;
 import org.junit.After;
 import org.junit.Before;
-import org.junit.Ignore;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.ErrorCollector;
+import org.junit.rules.TestName;
 
-import org.apache.geode.LogWriter;
-import org.apache.geode.SystemFailure;
-import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheException;
 import org.apache.geode.cache.CacheFactory;
 import org.apache.geode.cache.CacheListener;
 import org.apache.geode.cache.EntryEvent;
 import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionExistsException;
 import org.apache.geode.cache.util.CacheListenerAdapter;
-import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.internal.cache.Conflatable;
 import org.apache.geode.internal.cache.EventID;
+import org.apache.geode.internal.cache.InternalCache;
 import org.apache.geode.internal.cache.RegionQueue;
 import org.apache.geode.test.dunit.ThreadUtils;
+import org.apache.geode.test.dunit.rules.DistributedRestoreSystemProperties;
+import org.apache.geode.test.junit.categories.ClientSubscriptionTest;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 
 /**
  * This is a test for the APIs of a HARegionQueue and verifies that the head, tail and size counters
  * are updated properly.
+ *
+ * TODO: need to rewrite a bunch of tests in HARegionQueueJUnitTest
  */
 @Category({IntegrationTest.class, ClientSubscriptionTest.class})
 public class HARegionQueueJUnitTest {
 
-  /** The cache instance */
-  protected InternalCache cache = null;
+  /** total number of threads doing put operations */
+  private static final int TOTAL_PUT_THREADS = 10;
 
-  /** Logger for this test */
-  protected LogWriter logger;
+  private static HARegionQueue hrqForTestSafeConflationRemoval;
+  private static List list1;
 
-  /** The <code>RegionQueue</code> instance */
-  protected HARegionQueue rq;
+  protected InternalCache cache;
+  private HARegionQueue haRegionQueue;
 
-  /** total number of threads doing put operations */
-  private static final int TOTAL_PUT_THREADS = 10;
+  @Rule
+  public DistributedRestoreSystemProperties restoreSystemProperties =
+      new DistributedRestoreSystemProperties();
 
-  boolean expiryCalled = false;
+  @Rule
+  public ErrorCollector errorCollector = new ErrorCollector();
 
-  volatile boolean encounteredException = false;
-  boolean allowExpiryToProceed = false;
-  boolean complete = false;
+  @Rule
+  public TestName testName = new TestName();
 
   @Before
   public void setUp() throws Exception {
-    cache = createCache();
-    logger = cache.getLogger();
-    encounteredException = false;
+    this.cache = createCache();
   }
 
   @After
   public void tearDown() throws Exception {
-    cache.close();
-  }
-
-  /**
-   * Creates the cache instance for the test
-   */
-  private InternalCache createCache() throws CacheException {
-    return (InternalCache) new CacheFactory().set(MCAST_PORT, "0").create();
-  }
-
-  /**
-   * Creates HA region-queue object
-   */
-  private HARegionQueue createHARegionQueue(String name)
-      throws IOException, ClassNotFoundException, CacheException, InterruptedException {
-    HARegionQueue regionqueue = HARegionQueue.getHARegionQueueInstance(name, cache,
-        HARegionQueue.NON_BLOCKING_HA_QUEUE, false);
-    return regionqueue;
-  }
-
-  /**
-   * Creates region-queue object
-   */
-  private HARegionQueue createHARegionQueue(String name, HARegionQueueAttributes attrs)
-      throws IOException, ClassNotFoundException, CacheException, InterruptedException {
-    HARegionQueue regionqueue = HARegionQueue.getHARegionQueueInstance(name, cache, attrs,
-        HARegionQueue.NON_BLOCKING_HA_QUEUE, false);
-    return regionqueue;
+    this.cache.close();
+    hrqForTestSafeConflationRemoval = null;
   }
 
   /**
@@ -129,14 +105,10 @@ public class HARegionQueueJUnitTest {
    */
   @Test
   public void testQueuePutWithoutConflation() throws Exception {
-    logger.info("HARegionQueueJUnitTest : testQueuePutWithoutConflation BEGIN");
-
-    rq = createHARegionQueue("testOfferNoConflation");
+    this.haRegionQueue = createHARegionQueue(this.testName.getMethodName());
     int putPerProducer = 20;
     createAndRunProducers(false, false, false, putPerProducer);
-    assertEquals(putPerProducer * TOTAL_PUT_THREADS, rq.size());
-
-    logger.info("HARegionQueueJUnitTest : testQueuePutWithoutConflation END");
+    assertThat(this.haRegionQueue.size(), is(putPerProducer * TOTAL_PUT_THREADS));
   }
 
   /**
@@ -149,14 +121,10 @@ public class HARegionQueueJUnitTest {
    */
   @Test
   public void testQueuePutWithConflation() throws Exception {
-    logger.info("HARegionQueueJUnitTest : testQueuePutWithConflation BEGIN");
-
-    rq = createHARegionQueue("testOfferConflation");
+    this.haRegionQueue = createHARegionQueue(this.testName.getMethodName());
     int putPerProducer = 20;
     createAndRunProducers(true, false, true, putPerProducer);
-    assertEquals(putPerProducer, rq.size());
-
-    logger.info("HARegionQueueJUnitTest : testQueuePutWithConflation END");
+    assertThat(this.haRegionQueue.size(), is(putPerProducer));
   }
 
   /**
@@ -166,319 +134,150 @@ public class HARegionQueueJUnitTest {
    * 3)Wait till all put-threads complete their job <br>
    * 4)verify that the size of the queue is equal to the total number of puts done by one thread (as
    * rest of them will be duplicates and hence will be replaced)
-   *
-   * TODO:Dinesh : Work on optimizing the handling of receiving duplicate events
    */
   @Test
   public void testQueuePutWithDuplicates() throws Exception {
-    logger.info("HARegionQueueJUnitTest : testQueuePutWithDuplicates BEGIN");
-
-    rq = createHARegionQueue("testQueuePutWithDuplicates");
+    this.haRegionQueue = createHARegionQueue(this.testName.getMethodName());
     int putPerProducer = 20;
-    // createAndRunProducers(false, true, true, putPerProducer);
-    /* Suyog: Only one thread can enter DACE at a time */
     createAndRunProducers(false, false, true, putPerProducer);
-    assertEquals(putPerProducer * TOTAL_PUT_THREADS, rq.size());
-
-    logger.info("HARegionQueueJUnitTest : testQueuePutWithDuplicates END");
-  }
-
-  /**
-   * Creates and runs the put threads which will create the conflatable objects and add them to the
-   * queue
-   * 
-   * @param generateSameKeys - if all the producers need to put objects with same set of keys
-   *        (needed for conflation testing)
-   * @param generateSameIds - if all the producers need to put objects with same set of ids (needed
-   *        for duplicates testing)
-   * @param conflationEnabled - true if all producers need to put objects with conflation enabled,
-   *        false otherwise.
-   * @param putPerProducer - number of objects offered to the queue by each producer
-   * @throws Exception - thrown if any problem occurs in test execution
-   */
-  private void createAndRunProducers(boolean generateSameKeys, boolean generateSameIds,
-      boolean conflationEnabled, int putPerProducer) throws Exception {
-    Producer[] putThreads = new Producer[TOTAL_PUT_THREADS];
-
-    int i = 0;
-
-    // Create the put-threads, each generating same/different set of ids/keys as
-    // per the parameters
-    for (i = 0; i < TOTAL_PUT_THREADS; i++) {
-      String keyPrefix = null;
-      long startId;
-      if (generateSameKeys) {
-        keyPrefix = "key";
-      } else {
-        keyPrefix = i + "key";
-      }
-      if (generateSameIds) {
-        startId = 1;
-      } else {
-        startId = i * 100000;
-      }
-      putThreads[i] =
-          new Producer("Producer-" + i, keyPrefix, startId, putPerProducer, conflationEnabled);
-    }
-
-    // start the put-threads
-    for (i = 0; i < TOTAL_PUT_THREADS; i++) {
-      putThreads[i].start();
-    }
-
-    // call join on the put-threads so that this thread waits till they complete
-    // before doing verfication
-    for (i = 0; i < TOTAL_PUT_THREADS; i++) {
-      ThreadUtils.join(putThreads[i], 30 * 1000);
-    }
-    assertFalse(encounteredException);
+    assertThat(this.haRegionQueue.size(), is(putPerProducer * TOTAL_PUT_THREADS));
   }
 
   /*
    * Test method for 'org.apache.geode.internal.cache.ha.HARegionQueue.addDispatchedMessage(Object)'
    */
   @Test
-  public void testAddDispatchedMessageObject() {
-    try {
-      // HARegionQueue haRegionQueue = new HARegionQueue("testing", cache);
-      HARegionQueue haRegionQueue = createHARegionQueue("testing");
-      assertTrue(HARegionQueue.getDispatchedMessagesMapForTesting().isEmpty());
-      // TODO:
-
-      haRegionQueue.addDispatchedMessage(new ThreadIdentifier(new byte[1], 1), 1);
-      haRegionQueue.addDispatchedMessage(new ThreadIdentifier(new byte[1], 2), 2);
+  public void testAddDispatchedMessageObject() throws Exception {
+    this.haRegionQueue = createHARegionQueue(this.testName.getMethodName());
+    assertThat(HARegionQueue.getDispatchedMessagesMapForTesting().isEmpty(), is(true));
 
-      assertTrue(!HARegionQueue.getDispatchedMessagesMapForTesting().isEmpty());
-      // HARegionQueue.getDispatchedMessagesMapForTesting().clear();
+    this.haRegionQueue.addDispatchedMessage(new ThreadIdentifier(new byte[1], 1), 1);
+    this.haRegionQueue.addDispatchedMessage(new ThreadIdentifier(new byte[1], 2), 2);
 
-    } catch (Exception e) {
-      throw new AssertionError("Test encountered an exception due to ", e);
-    }
+    assertThat(!HARegionQueue.getDispatchedMessagesMapForTesting().isEmpty(), is(true));
   }
 
   /**
    * tests the blocking peek functionality of BlockingHARegionQueue
    */
   @Test
-  public void testBlockQueue() {
-    exceptionInThread = false;
-    testFailed = false;
-    try {
-      final HARegionQueue bQ = HARegionQueue.getHARegionQueueInstance("testing", cache,
-          HARegionQueue.BLOCKING_HA_QUEUE, false);
-      Thread[] threads = new Thread[10];
-      final CyclicBarrier barrier = new CyclicBarrier(threads.length + 1);
-      for (int i = 0; i < threads.length; i++) {
-        threads[i] = new Thread() {
-          public void run() {
-            try {
-              barrier.await();
-              long startTime = System.currentTimeMillis();
-              Object obj = bQ.peek();
-              if (obj == null) {
-                testFailed = true;
-                message.append(
-                    " Failed :  failed since object was null and was not expected to be null \n");
-              }
-              long totalTime = System.currentTimeMillis() - startTime;
+  public void testBlockQueue() throws Exception {
+    HARegionQueue regionQueue = HARegionQueue.getHARegionQueueInstance(
+        this.testName.getMethodName(), this.cache, HARegionQueue.BLOCKING_HA_QUEUE, false);
+    Thread[] threads = new Thread[10];
+    int threadsLength = threads.length;
+    CyclicBarrier barrier = new CyclicBarrier(threadsLength + 1);
+
+    for (int i = 0; i < threadsLength; i++) {
+      threads[i] = new Thread() {
+        @Override
+        public void run() {
+          try {
+            barrier.await();
+            long startTime = System.currentTimeMillis();
+            Object obj = regionQueue.peek();
+            if (obj == null) {
+              errorCollector.addError(new AssertionError(
+                  "Failed :  failed since object was null and was not expected to be null"));
+            }
+            long totalTime = System.currentTimeMillis() - startTime;
 
-              if (totalTime < 2000) {
-                testFailed = true;
-                message
-                    .append(" Failed :  Expected time to be greater than 2000 but it is not so ");
-              }
-            } catch (Exception e) {
-              exceptionInThread = true;
-              exception = e;
+            if (totalTime < 2000) {
+              errorCollector.addError(new AssertionError(
+                  " Failed :  Expected time to be greater than 2000 but it is not so "));
             }
+          } catch (Exception e) {
+            errorCollector.addError(e);
           }
-        };
-
-      }
-
-      for (int k = 0; k < threads.length; k++) {
-        threads[k].start();
-      }
-      barrier.await();
-      Thread.sleep(5000);
-
-      EventID id = new EventID(new byte[] {1}, 1, 1);
-      bQ.put(new ConflatableObject("key", "value", id, false, "testing"));
-
-      long startTime = System.currentTimeMillis();
-      for (int k = 0; k < threads.length; k++) {
-        ThreadUtils.join(threads[k], 60 * 1000);
-      }
-
-      long totalTime = System.currentTimeMillis() - startTime;
-
-      if (totalTime >= 60000) {
-        fail(" Test taken too long ");
-      }
-
-      if (testFailed) {
-        fail(" test failed due to " + message);
-      }
-
-    } catch (Exception e) {
-      throw new AssertionError(" Test failed due to ", e);
+        }
+      };
     }
-  }
-
-  private static volatile int counter = 0;
-
-  protected boolean exceptionInThread = false;
-
-  protected boolean testFailed = false;
-
-  protected StringBuffer message = new StringBuffer();
-
-  protected Exception exception = null;
 
-  private synchronized int getCounter() {
-    return ++counter;
-  }
-
-  /**
-   * Thread to perform PUTs into the queue
-   */
-  class Producer extends Thread {
-    /** total number of puts by this thread */
-    long totalPuts = 0;
-
-    /** sleep between successive puts */
-    long sleeptime = 10;
-
-    /** prefix to keys of all objects put by this thread */
-    String keyPrefix;
+    for (Thread thread1 : threads) {
+      thread1.start();
+    }
 
-    /** startingId for sequence-ids of all objects put by this thread */
-    long startingId;
+    barrier.await();
 
-    /** name of this producer thread */
-    String producerName;
+    Thread.sleep(5000);
 
-    /**
-     * boolean to indicate whether this thread should create conflation enabled entries
-     */
-    boolean createConflatables;
+    EventID id = new EventID(new byte[] {1}, 1, 1);
+    regionQueue
+        .put(new ConflatableObject("key", "value", id, false, this.testName.getMethodName()));
 
-    /**
-     * Constructor
-     * 
-     * @param name - name for this thread
-     * @param keyPrefix - prefix to keys of all objects put by this thread
-     * @param startingId - startingId for sequence-ids of all objects put by this thread
-     * @param totalPuts total number of puts by this thread
-     * @param createConflatableEvents - boolean to indicate whether this thread should create
-     *        conflation enabled entries
-     */
-    Producer(String name, String keyPrefix, long startingId, long totalPuts,
-        boolean createConflatableEvents) {
-      super(name);
-      this.producerName = name;
-      this.keyPrefix = keyPrefix;
-      this.startingId = startingId;
-      this.totalPuts = totalPuts;
-      this.createConflatables = createConflatableEvents;
-      setDaemon(true);
+    long startTime = System.currentTimeMillis();
+    for (Thread thread : threads) {
+      ThreadUtils.join(thread, 60 * 1000);
     }
 
-    /** Create Conflatable objects and put them into the Queue. */
-    @Override
-    public void run() {
-      if (producerName == null) {
-        producerName = Thread.currentThread().getName();
-      }
-      for (long i = 0; i < totalPuts; i++) {
-        String REGION_NAME = "test";
-        try {
-          ConflatableObject event = new ConflatableObject(keyPrefix + i, "val" + i,
-              new EventID(new byte[] {1}, startingId, startingId + i), createConflatables,
-              REGION_NAME);
-
-          logger.fine("putting for key =  " + keyPrefix + i);
-          rq.put(event);
-          Thread.sleep(sleeptime);
-        } catch (VirtualMachineError e) {
-          SystemFailure.initiateFailure(e);
-          throw e;
-        } catch (Throwable e) {
-          logger.severe("Exception while running Producer;continue running.", e);
-          encounteredException = true;
-          break;
-        }
-      }
-      logger.info(producerName + " :  Puts completed");
+    long totalTime = System.currentTimeMillis() - startTime;
+
+    if (totalTime >= 60000) {
+      fail(" Test taken too long ");
     }
   }
 
   /**
-   * tests whether expiry of entry in the regin queue occurs as expected
+   * tests whether expiry of entry in the region queue occurs as expected
    */
   @Test
-  public void testExpiryPositive()
-      throws InterruptedException, IOException, ClassNotFoundException {
+  public void testExpiryPositive() throws Exception {
     HARegionQueueAttributes haa = new HARegionQueueAttributes();
     haa.setExpiryTime(1);
-    HARegionQueue regionqueue = createHARegionQueue("testing", haa);
+
+    HARegionQueue regionQueue = createHARegionQueue(this.testName.getMethodName(), haa);
     long start = System.currentTimeMillis();
-    regionqueue.put(
-        new ConflatableObject("key", "value", new EventID(new byte[] {1}, 1, 1), true, "testing"));
-    Map map = (Map) regionqueue.getConflationMapForTesting().get("testing");
+
+    regionQueue.put(new ConflatableObject("key", "value", new EventID(new byte[] {1}, 1, 1), true,
+        this.testName.getMethodName()));
+
+    Map map = (Map) regionQueue.getConflationMapForTesting().get(this.testName.getMethodName());
     waitAtLeast(1000, start, () -> {
-      assertEquals(Collections.EMPTY_MAP, map);
-      assertEquals(Collections.EMPTY_SET, regionqueue.getRegion().keys());
+      assertThat(map, is(Collections.emptyMap()));
+      assertThat(regionQueue.getRegion().keys(), is(Collections.emptySet()));
     });
   }
 
   /**
-   * Wait until a given runnable stops throwing exceptions. It should take at least
-   * minimumElapsedTime after the supplied start time to happen.
-   *
-   * This is useful for validating that an entry doesn't expire until a certain amount of time has
-   * passed
-   */
-  protected void waitAtLeast(final int minimumElapsedTIme, final long start,
-      final Runnable runnable) {
-    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(runnable);
-    long elapsed = System.currentTimeMillis() - start;
-    assertTrue(elapsed >= minimumElapsedTIme);
-  }
-
-  /**
    * tests whether expiry of a conflated entry in the region queue occurs as expected
    */
   @Test
-  public void testExpiryPositiveWithConflation()
-      throws InterruptedException, IOException, ClassNotFoundException {
+  public void testExpiryPositiveWithConflation() throws Exception {
     HARegionQueueAttributes haa = new HARegionQueueAttributes();
     haa.setExpiryTime(1);
-    HARegionQueue regionqueue = createHARegionQueue("testing", haa);
+
+    HARegionQueue regionQueue = createHARegionQueue(this.testName.getMethodName(), haa);
     long start = System.currentTimeMillis();
-    regionqueue.put(
-        new ConflatableObject("key", "value", new EventID(new byte[] {1}, 1, 1), true, "testing"));
-    regionqueue.put(new ConflatableObject("key", "newValue", new EventID(new byte[] {1}, 1, 2),
-        true, "testing"));
-    assertTrue(
+
+    regionQueue.put(new ConflatableObject("key", "value", new EventID(new byte[] {1}, 1, 1), true,
+        this.testName.getMethodName()));
+
+    regionQueue.put(new ConflatableObject("key", "newValue", new EventID(new byte[] {1}, 1, 2),
+        true, this.testName.getMethodName()));
+
+    assertThat(
         " Expected region size not to be zero since expiry time has not been exceeded but it is not so ",
-        !(regionqueue.size() == 0));
-    assertTrue(
+        !regionQueue.isEmpty(), is(true));
+    assertThat(
         " Expected the available id's size not  to be zero since expiry time has not  been exceeded but it is not so ",
-        !(regionqueue.getAvalaibleIds().size() == 0));
-    assertTrue(
+        !regionQueue.getAvalaibleIds().isEmpty(), is(true));
+    assertThat(
         " Expected conflation map size not  to be zero since expiry time has not been exceeded but it is not so "
-            + ((((Map) (regionqueue.getConflationMapForTesting().get("testing"))).get("key"))),
-        !((((Map) (regionqueue.getConflationMapForTesting().get("testing"))).get("key")) == null));
-    assertTrue(
+            + ((Map) regionQueue.getConflationMapForTesting().get(this.testName.getMethodName()))
+                .get("key"),
+        ((Map) regionQueue.getConflationMapForTesting().get(this.testName.getMethodName()))
+            .get("key"),
+        not(sameInstance(null)));
+    assertThat(
         " Expected eventID map size not to be zero since expiry time has not been exceeded but it is not so ",
-        !(regionqueue.getEventsMapForTesting().size() == 0));
+        !regionQueue.getEventsMapForTesting().isEmpty(), is(true));
 
     waitAtLeast(1000, start, () -> {
-      assertEquals(Collections.EMPTY_SET, regionqueue.getRegion().keys());
-      assertEquals(Collections.EMPTY_SET, regionqueue.getAvalaibleIds());
-      assertEquals(Collections.EMPTY_MAP, regionqueue.getConflationMapForTesting().get("testing"));
-      assertEquals(Collections.EMPTY_MAP, regionqueue.getEventsMapForTesting());
+      assertThat(regionQueue.getRegion().keys(), is(Collections.emptySet()));
+      assertThat(regionQueue.getAvalaibleIds(), is(Collections.emptySet()));
+      assertThat(regionQueue.getConflationMapForTesting().get(this.testName.getMethodName()),
+          is(Collections.emptyMap()));
+      assertThat(regionQueue.getEventsMapForTesting(), is(Collections.emptyMap()));
     });
   }
 
@@ -486,38 +285,37 @@ public class HARegionQueueJUnitTest {
    * tests a ThreadId not being expired if it was updated
    */
   @Test
-  public void testNoExpiryOfThreadId() {
-    try {
-      HARegionQueueAttributes haa = new HARegionQueueAttributes();
-      haa.setExpiryTime(45);
-      // RegionQueue regionqueue = new HARegionQueue("testing", cache, haa);
-      HARegionQueue regionqueue = createHARegionQueue("testing", haa);
-      EventID ev1 = new EventID(new byte[] {1}, 1, 1);
-      EventID ev2 = new EventID(new byte[] {1}, 1, 2);
-      Conflatable cf1 = new ConflatableObject("key", "value", ev1, true, "testing");
-      Conflatable cf2 = new ConflatableObject("key", "value2", ev2, true, "testing");
-      regionqueue.put(cf1);
-      final long tailKey = regionqueue.tailKey.get();
-      regionqueue.put(cf2);
-      // Invalidate will trigger the expiration of the entry
-      // See HARegionQueue.createCacheListenerForHARegion
-      regionqueue.getRegion().invalidate(tailKey);
-      assertTrue(
-          " Expected region size not to be zero since expiry time has not been exceeded but it is not so ",
-          !(regionqueue.size() == 0));
-      assertTrue(" Expected the available id's size not  to have counter 1 but it has ",
-          !(regionqueue.getAvalaibleIds().contains(new Long(1))));
-      assertTrue(" Expected the available id's size to have counter 2 but it does not have ",
-          (regionqueue.getAvalaibleIds().contains(new Long(2))));
-      assertTrue(" Expected eventID map not to have the first event, but it has",
-          !(regionqueue.getCurrentCounterSet(ev1).contains(new Long(1))));
-      assertTrue(" Expected eventID map to have the second event, but it does not",
-          (regionqueue.getCurrentCounterSet(ev2).contains(new Long(2))));
-    }
+  public void testNoExpiryOfThreadId() throws Exception {
+    HARegionQueueAttributes haa = new HARegionQueueAttributes();
+    haa.setExpiryTime(45);
 
-    catch (Exception e) {
-      throw new AssertionError("test failed due to ", e);
-    }
+    HARegionQueue regionQueue = createHARegionQueue(this.testName.getMethodName(), haa);
+    EventID ev1 = new EventID(new byte[] {1}, 1, 1);
+    EventID ev2 = new EventID(new byte[] {1}, 1, 2);
+    Conflatable cf1 =
+        new ConflatableObject("key", "value", ev1, true, this.testName.getMethodName());
+    Conflatable cf2 =
+        new ConflatableObject("key", "value2", ev2, true, this.testName.getMethodName());
+
+    regionQueue.put(cf1);
+    long tailKey = regionQueue.tailKey.get();
+    regionQueue.put(cf2);
+
+    // Invalidate will trigger the expiration of the entry
+    // See HARegionQueue.createCacheListenerForHARegion
+    regionQueue.getRegion().invalidate(tailKey);
+
+    assertThat(
+        " Expected region size not to be zero since expiry time has not been exceeded but it is not so ",
+        !regionQueue.isEmpty(), is(true));
+    assertThat(" Expected the available id's size not  to have counter 1 but it has ",
+        !regionQueue.getAvalaibleIds().contains(1L), is(true));
+    assertThat(" Expected the available id's size to have counter 2 but it does not have ",
+        regionQueue.getAvalaibleIds().contains(2L), is(true));
+    assertThat(" Expected eventID map not to have the first event, but it has",
+        !regionQueue.getCurrentCounterSet(ev1).contains(1L), is(true));
+    assertThat(" Expected eventID map to have the second event, but it does not",
+        regionQueue.getCurrentCounterSet(ev2).contains(2L), is(true));
   }
 
   /**
@@ -525,66 +323,64 @@ public class HARegionQueueJUnitTest {
    * being put in the queue
    */
   @Test
-  public void testQRMComingBeforeLocalPut() {
-    try {
-      // RegionQueue regionqueue = new HARegionQueue("testing", cache);
-      HARegionQueue regionqueue = createHARegionQueue("testing");
-      EventID id = new EventID(new byte[] {1}, 1, 1);
-      regionqueue.removeDispatchedEvents(id);
-      regionqueue.put(new ConflatableObject("key", "value", id, true, "testing"));
-      assertTrue(" Expected key to be null since QRM for the message id had already arrived ",
-          !regionqueue.getRegion().containsKey(new Long(1)));
-    } catch (Exception e) {
-      throw new AssertionError("test failed due to ", e);
-    }
+  public void testQRMComingBeforeLocalPut() throws Exception {
+    HARegionQueue regionQueue = createHARegionQueue(this.testName.getMethodName());
+    EventID id = new EventID(new byte[] {1}, 1, 1);
+
+    regionQueue.removeDispatchedEvents(id);
+    regionQueue.put(new ConflatableObject("key", "value", id, true, this.testName.getMethodName()));
+
+    assertThat(" Expected key to be null since QRM for the message id had already arrived ",
+        !regionQueue.getRegion().containsKey(1L), is(true));
   }
 
   /**
    * test verifies correct expiry of ThreadIdentifier in the HARQ if no corresponding put comes
    */
   @Test
-  public void testOnlyQRMComing() throws InterruptedException, IOException, ClassNotFoundException {
+  public void testOnlyQRMComing() throws Exception {
     HARegionQueueAttributes harqAttr = new HARegionQueueAttributes();
     harqAttr.setExpiryTime(1);
-    // RegionQueue regionqueue = new HARegionQueue("testing", cache, harqAttr);
-    HARegionQueue regionqueue = createHARegionQueue("testing", harqAttr);
+
+    HARegionQueue regionQueue = createHARegionQueue(this.testName.getMethodName(), harqAttr);
     EventID id = new EventID(new byte[] {1}, 1, 1);
     long start = System.currentTimeMillis();
-    regionqueue.removeDispatchedEvents(id);
-    assertTrue(" Expected testingID to be present since only QRM achieved ",
-        regionqueue.getRegion().containsKey(new ThreadIdentifier(new byte[] {1}, 1)));
+
+    regionQueue.removeDispatchedEvents(id);
+
+    assertThat(" Expected testingID to be present since only QRM achieved ",
+        regionQueue.getRegion().containsKey(new ThreadIdentifier(new byte[] {1}, 1)), is(true));
+
     waitAtLeast(1000, start,
-        () -> assertTrue(
+        () -> assertThat(
             " Expected testingID not to be present since it should have expired after 2.5 seconds",
-            !regionqueue.getRegion().containsKey(new ThreadIdentifier(new byte[] {1}, 1))));
+            !regionQueue.getRegion().containsKey(new ThreadIdentifier(new byte[] {1}, 1)),
+            is(true)));
   }
 
   /**
    * test all relevant data structures are updated on a local put
    */
   @Test
-  public void testPutPath() {
-    try {
-      HARegionQueue regionqueue = createHARegionQueue("testing");
-      Conflatable cf =
-          new ConflatableObject("key", "value", new EventID(new byte[] {1}, 1, 1), true, "testing");
-      regionqueue.put(cf);
-      assertTrue(" Expected region peek to return cf but it is not so ",
-          (regionqueue.peek().equals(cf)));
-      assertTrue(
-          " Expected the available id's size not  to be zero since expiry time has not  been exceeded but it is not so ",
-          !(regionqueue.getAvalaibleIds().size() == 0));
-      assertTrue(
-          " Expected conflation map to have entry for this key since expiry time has not been exceeded but it is not so ",
-          ((((Map) (regionqueue.getConflationMapForTesting().get("testing"))).get("key"))
-              .equals(new Long(1))));
-      assertTrue(
-          " Expected eventID map size not to be zero since expiry time has not been exceeded but it is not so ",
-          !(regionqueue.getEventsMapForTesting().size() == 0));
+  public void testPutPath() throws Exception {
+    HARegionQueue regionQueue = createHARegionQueue(this.testName.getMethodName());
+    Conflatable cf = new ConflatableObject("key", "value", new EventID(new byte[] {1}, 1, 1), true,
+        this.testName.getMethodName());
 
-    } catch (Exception e) {
-      throw new AssertionError("Exception occurred in test due to ", e);
-    }
+    regionQueue.put(cf);
+
+    assertThat(" Expected region peek to return cf but it is not so ", regionQueue.peek(), is(cf));
+    assertThat(
+        " Expected the available id's size not  to be zero since expiry time has not  been exceeded but it is not so ",
+        !regionQueue.getAvalaibleIds().isEmpty(), is(true));
+    assertThat(
+        " Expected conflation map to have entry for this key since expiry time has not been exceeded but it is not so ",
+        ((Map) regionQueue.getConflationMapForTesting().get(this.testName.getMethodName()))
+            .get("key"),
+        is(1L));
+    assertThat(
+        " Expected eventID map size not to be zero since expiry time has not been exceeded but it is not so ",
+        !regionQueue.getEventsMapForTesting().isEmpty(), is(true));
   }
 
   /**
@@ -592,58 +388,64 @@ public class HARegionQueueJUnitTest {
    * there - verify the next five entries and their relevant data is present
    */
   @Test
-  public void testQRMDispatch() {
-    try {
-      HARegionQueue regionqueue = createHARegionQueue("testing");
-      Conflatable[] cf = new Conflatable[10];
-      // put 10 conflatable objects
-      for (int i = 0; i < 10; i++) {
-        cf[i] = new ConflatableObject("key" + i, "value", new EventID(new byte[] {1}, 1, i), true,
-            "testing");
-        regionqueue.put(cf[i]);
-      }
-      // remove the first 5 by giving the right sequence id
-      regionqueue.removeDispatchedEvents(new EventID(new byte[] {1}, 1, 4));
-      // verify 1-5 not in region
-      for (long i = 1; i < 6; i++) {
-        assertTrue(!regionqueue.getRegion().containsKey(new Long(i)));
-      }
-      // verify 6-10 still in region queue
-      for (long i = 6; i < 11; i++) {
-        assertTrue(regionqueue.getRegion().containsKey(new Long(i)));
-      }
-      // verify 1-5 not in conflation map
-      for (long i = 0; i < 5; i++) {
-        assertTrue(!((Map) regionqueue.getConflationMapForTesting().get("testing"))
-            .containsKey("key" + i));
-      }
-      // verify 6-10 in conflation map
-      for (long i = 5; i < 10; i++) {
-        assertTrue(
-            ((Map) regionqueue.getConflationMapForTesting().get("testing")).containsKey("key" + i));
-      }
+  public void testQRMDispatch() throws Exception {
+    HARegionQueue regionQueue = createHARegionQueue(this.testName.getMethodName());
+    Conflatable[] cf = new Conflatable[10];
+
+    // put 10 conflatable objects
+    for (int i = 0; i < 10; i++) {
+      cf[i] = new ConflatableObject("key" + i, "value", new EventID(new byte[] {1}, 1, i), true,
+          this.testName.getMethodName());
+      regionQueue.put(cf[i]);
+    }
 
-      EventID eid = new EventID(new byte[] {1}, 1, 6);
-      // verify 1-5 not in eventMap
-      for (long i = 1; i < 6; i++) {
-        assertTrue(!regionqueue.getCurrentCounterSet(eid).contains(new Long(i)));
-      }
-      // verify 6-10 in event Map
-      for (long i = 6; i < 11; i++) {
-        assertTrue(regionqueue.getCurrentCounterSet(eid).contains(new Long(i)));
-      }
+    // remove the first 5 by giving the right sequence id
+    regionQueue.removeDispatchedEvents(new EventID(new byte[] {1}, 1, 4));
 
-      // verify 1-5 not in available Id's map
-      for (long i = 1; i < 6; i++) {
-        assertTrue(!regionqueue.getAvalaibleIds().contains(new Long(i)));
-      }
+    // verify 1-5 not in region
+    for (int i = 1; i < 6; i++) {
+      assertThat(!regionQueue.getRegion().containsKey((long) i), is(true));
+    }
 
-      // verify 6-10 in available id's map
-      for (long i = 6; i < 11; i++) {
-        assertTrue(regionqueue.getAvalaibleIds().contains(new Long(i)));
-      }
-    } catch (Exception e) {
-      throw new AssertionError("Exception occurred in test due to ", e);
+    // verify 6-10 still in region queue
+    for (int i = 6; i < 11; i++) {
+      assertThat(regionQueue.getRegion().containsKey((long) i), is(true));
+    }
+
+    // verify 1-5 not in conflation map
+    for (int i = 0; i < 5; i++) {
+      assertThat(
+          !((Map) regionQueue.getConflationMapForTesting().get(this.testName.getMethodName()))
+              .containsKey("key" + i),
+          is(true));
+    }
+
+    // verify 6-10 in conflation map
+    for (int i = 5; i < 10; i++) {
+      assertThat(((Map) regionQueue.getConflationMapForTesting().get(this.testName.getMethodName()))
+          .containsKey("key" + i), is(true));
+    }
+
+    EventID eid = new EventID(new byte[] {1}, 1, 6);
+
+    // verify 1-5 not in eventMap
+    for (int i = 1; i < 6; i++) {
+      assertThat(!regionQueue.getCurrentCounterSet(eid).contains((long) i), is(true));
+    }
+
+    // verify 6-10 in event Map
+    for (int i = 6; i < 11; i++) {
+      assertThat(regionQueue.getCurrentCounterSet(eid).contains((long) i), is(true));
+    }
+
+    // verify 1-5 not in available Id's map
+    for (int i = 1; i < 6; i++) {
+      assertThat(!regionQueue.getAvalaibleIds().contains((long) i), is(true));
+    }
+
+    // verify 6-10 in available id's map
+    for (int i = 6; i < 11; i++) {
+      assertThat(regionQueue.getAvalaibleIds().contains((long) i), is(true));
     }
   }
 
@@ -652,68 +454,74 @@ public class HARegionQueueJUnitTest {
    * 1-7 not there - verify data for 8-10 is there
    */
   @Test
-  public void testQRMBeforePut() {
-    try {
-      HARegionQueue regionqueue = createHARegionQueue("testing");
+  public void testQRMBeforePut() throws Exception {
+    HARegionQueue regionQueue = createHARegionQueue(this.testName.getMethodName());
 
-      EventID[] ids = new EventID[10];
+    EventID[] ids = new EventID[10];
 
-      for (int i = 0; i < 10; i++) {
-        ids[i] = new EventID(new byte[] {1}, 1, i);
-      }
+    for (int i = 0; i < 10; i++) {
+      ids[i] = new EventID(new byte[] {1}, 1, i);
+    }
 
-      // first get the qrm message for the seventh id
-      regionqueue.removeDispatchedEvents(ids[6]);
-      Conflatable[] cf = new Conflatable[10];
-      // put 10 conflatable objects
-      for (int i = 0; i < 10; i++) {
-        cf[i] = new ConflatableObject("key" + i, "value", ids[i], true, "testing");
-        regionqueue.put(cf[i]);
-      }
+    // first get the qrm message for the seventh id
+    regionQueue.removeDispatchedEvents(ids[6]);
+    Conflatable[] cf = new Conflatable[10];
 
-      // verify 1-7 not in region
-      Set values = (Set) regionqueue.getRegion().values();
-      for (int i = 0; i < 7; i++) {
-        System.out.println(i);
-        assertTrue(!values.contains(cf[i]));
-      }
-      // verify 8-10 still in region queue
-      for (int i = 7; i < 10; i++) {
-        System.out.println(i);
-        assertTrue(values.contains(cf[i]));
-      }
-      // verify 1-8 not in conflation map
-      for (long i = 0; i < 7; i++) {
-        assertTrue(!((Map) regionqueue.getConflationMapForTesting().get("testing"))
-            .containsKey("key" + i));
-      }
-      // verify 8-10 in conflation map
-      for (long i = 7; i < 10; i++) {
-        assertTrue(
-            ((Map) regionqueue.getConflationMapForTesting().get("testing")).containsKey("key" + i));
-      }
+    // put 10 conflatable objects
+    for (int i = 0; i < 10; i++) {
+      cf[i] =
+          new ConflatableObject("key" + i, "value", ids[i], true, this.testName.getMethodName());
+      regionQueue.put(cf[i]);
+    }
 
-      EventID eid = new EventID(new byte[] {1}, 1, 6);
-      // verify 1-7 not in eventMap
-      for (long i = 4; i < 11; i++) {
-        assertTrue(!regionqueue.getCurrentCounterSet(eid).contains(new Long(i)));
-      }
-      // verify 8-10 in event Map
-      for (long i = 1; i < 4; i++) {
-        assertTrue(regionqueue.getCurrentCounterSet(eid).contains(new Long(i)));
-      }
+    // verify 1-7 not in region
+    Set values = (Set) regionQueue.getRegion().values();
 
-      // verify 1-7 not in available Id's map
-      for (long i = 4; i < 11; i++) {
-        assertTrue(!regionqueue.getAvalaibleIds().contains(new Long(i)));
-      }
+    for (int i = 0; i < 7; i++) {
+      System.out.println(i);
+      assertThat(!values.contains(cf[i]), is(true));
+    }
 
-      // verify 8-10 in available id's map
-      for (long i = 1; i < 4; i++) {
-        assertTrue(regionqueue.getAvalaibleIds().contains(new Long(i)));
-      }
-    } catch (Exception e) {
-      throw new AssertionError("Exception occurred in test due to ", e);
+    // verify 8-10 still in region queue
+    for (int i = 7; i < 10; i++) {
+      System.out.println(i);
+      assertThat(values.contains(cf[i]), is(true));
+    }
+
+    // verify 1-8 not in conflation map
+    for (int i = 0; i < 7; i++) {
+      assertThat(
+          !((Map) regionQueue.getConflationMapForTesting().get(this.testName.getMethodName()))
+              .containsKey("key" + i),
+          is(true));
+    }
+
+    // verify 8-10 in conflation map
+    for (int i = 7; i < 10; i++) {
+      assertThat(((Map) regionQueue.getConflationMapForTesting().get(this.testName.getMethodName()))
+          .containsKey("key" + i), is(true));
+    }
+
+    EventID eid = new EventID(new byte[] {1}, 1, 6);
+
+    // verify 1-7 not in eventMap
+    for (int i = 4; i < 11; i++) {
+      assertThat(!regionQueue.getCurrentCounterSet(eid).contains((long) i), is(true));
+    }
+
+    // verify 8-10 in event Map
+    for (int i = 1; i < 4; i++) {
+      assertThat(regionQueue.getCurrentCounterSet(eid).contains((long) i), is(true));
+    }
+
+    // verify 1-7 not in available Id's map
+    for (int i = 4; i < 11; i++) {
+      assertThat(!regionQueue.getAvalaibleIds().contains((long) i), is(true));
+    }
+
+    // verify 8-10 in available id's map
+    for (int i = 1; i < 4; i++) {
+      assertThat(regionQueue.getAvalaibleIds().contains((long) i), is(true));
     }
   }
 
@@ -721,33 +529,33 @@ public class HARegionQueueJUnitTest {
    * test to verify conflation happens as expected
    */
   @Test
-  public void testConflation() {
-    try {
-      HARegionQueue regionqueue = createHARegionQueue("testing");
-      EventID ev1 = new EventID(new byte[] {1}, 1, 1);
-      EventID ev2 = new EventID(new byte[] {1}, 2, 2);
-      Conflatable cf1 = new ConflatableObject("key", "value", ev1, true, "testing");
-      Conflatable cf2 = new ConflatableObject("key", "value2", ev2, true, "testing");
-      regionqueue.put(cf1);
-      Map conflationMap = regionqueue.getConflationMapForTesting();
-      assertTrue(((Map) (conflationMap.get("testing"))).get("key").equals(new Long(1)));
-      regionqueue.put(cf2);
-      // verify the conflation map has recorded the new key
-      assertTrue(((Map) (conflationMap.get("testing"))).get("key").equals(new Long(2)));
-      // the old key should not be present
-      assertTrue(!regionqueue.getRegion().containsKey(new Long(1)));
-      // available ids should not contain the old id (the old position)
-      assertTrue(!regionqueue.getAvalaibleIds().contains(new Long(1)));
-      // available id should have the new id (the new position)
-      assertTrue(regionqueue.getAvalaibleIds().contains(new Long(2)));
-      // events map should not contain the old position
-      assertTrue(regionqueue.getCurrentCounterSet(ev1).isEmpty());
-      // events map should contain the new position
-      assertTrue(regionqueue.getCurrentCounterSet(ev2).contains(new Long(2)));
-
-    } catch (Exception e) {
-      throw new AssertionError("Exception occurred in test due to ", e);
-    }
+  public void testConflation() throws Exception {
+    HARegionQueue regionQueue = createHARegionQueue(this.testName.getMethodName());
+    EventID ev1 = new EventID(new byte[] {1}, 1, 1);
+    EventID ev2 = new EventID(new byte[] {1}, 2, 2);
+    Conflatable cf1 =
+        new ConflatableObject("key", "value", ev1, true, this.testName.getMethodName());
+    Conflatable cf2 =
+        new ConflatableObject("key", "value2", ev2, true, this.testName.getMethodName());
+    regionQueue.put(cf1);
+
+    Map conflationMap = regionQueue.getConflationMapForTesting();
+    assertThat(((Map) conflationMap.get(this.testName.getMethodName())).get("key"), is(1L));
+
+    regionQueue.put(cf2);
+
+    // verify the conflation map has recorded the new key
+    assertThat(((Map) conflationMap.get(this.testName.getMethodName())).get("key"), is(2L));
+    // the old key should not be present
+    assertThat(!regionQueue.getRegion().containsKey(1L), is(true));
+    // available ids should not contain the old id (the old position)
+    assertThat(!regionQueue.getAvalaibleIds().contains(1L), is(true));
+    // available id should have the new id (the new position)
+    assertThat(regionQueue.getAvalaibleIds().contains(2L), is(true));
+    // events map should not contain the old position
+    assertThat(regionQueue.getCurrentCounterSet(ev1).isEmpty(), is(true));
+    // events map should contain the new position
+    assertThat(regionQueue.getCurrentCounterSet(ev2).contains(2L), is(true));
   }
 
   /**
@@ -755,97 +563,58 @@ public class HARegionQueueJUnitTest {
    * events which are of ID greater than that contained in QRM should stay
    */
   @Test
-  public void testQRM() {
-    try {
-      RegionQueue regionqueue = createHARegionQueue("testing");
-      for (int i = 0; i < 10; ++i) {
-        regionqueue.put(new ConflatableObject("key" + (i + 1), "value",
-            new EventID(new byte[] {1}, 1, i + 1), true, "testing"));
-      }
-      EventID qrmID = new EventID(new byte[] {1}, 1, 5);
-      ((HARegionQueue) regionqueue).removeDispatchedEvents(qrmID);
-      Map conflationMap = ((HARegionQueue) regionqueue).getConflationMapForTesting();
-      assertTrue(((Map) (conflationMap.get("testing"))).size() == 5);
-
-      Set availableIDs = ((HARegionQueue) regionqueue).getAvalaibleIds();
-      Set counters = ((HARegionQueue) regionqueue).getCurrentCounterSet(qrmID);
-      assertTrue(availableIDs.size() == 5);
-      assertTrue(counters.size() == 5);
-      for (int i = 5; i < 10; ++i) {
-        assertTrue(((Map) (conflationMap.get("testing"))).containsKey("key" + (i + 1)));
-        assertTrue(availableIDs.contains(new Long((i + 1))));
-        assertTrue(counters.contains(new Long((i + 1))));
-      }
-      Region rgn = ((HARegionQueue) regionqueue).getRegion();
-      assertTrue(rgn.keySet().size() == 6);
+  public void testQRM() throws Exception {
+    RegionQueue regionqueue = createHARegionQueue(this.testName.getMethodName());
 
-    } catch (Exception e) {
-      throw new AssertionError("Exception occurred in test due to ", e);
+    for (int i = 0; i < 10; ++i) {
+      regionqueue.put(new ConflatableObject("key" + (i + 1), "value",
+          new EventID(new byte[] {1}, 1, i + 1), true, this.testName.getMethodName()));
     }
-  }
 
-  protected static HARegionQueue hrqFortestSafeConflationRemoval;
+    EventID qrmID = new EventID(new byte[] {1}, 1, 5);
+    ((HARegionQueue) regionqueue).removeDispatchedEvents(qrmID);
+    Map conflationMap = ((HARegionQueue) regionqueue).getConflationMapForTesting();
+    assertThat(((Map) conflationMap.get(this.testName.getMethodName())).size(), is(5));
 
-  /**
-   * This test tests safe removal from the conflation map. i.e operations should only remove old
-   * values and not the latest value
-   */
-  @Test
-  public void testSafeConflationRemoval() {
-    try {
-      hrqFortestSafeConflationRemoval = new HARQTestClass("testSafeConflationRemoval",
+    Set availableIDs = ((HARegionQueue) regionqueue).getAvalaibleIds();
+    Set counters = ((HARegionQueue) regionqueue).getCurrentCounterSet(qrmID);
 
-          cache, this);
-      Conflatable cf1 = new ConflatableObject("key1", "value", new EventID(new byte[] {1}, 1, 1),
-          true, "testSafeConflationRemoval");
-      hrqFortestSafeConflationRemoval.put(cf1);
-      hrqFortestSafeConflationRemoval.removeDispatchedEvents(new EventID(new byte[] {1}, 1, 1));
-      Map map =
-
-          (Map) hrqFortestSafeConflationRemoval.getConflationMapForTesting()
-              .get("testSafeConflationRemoval");
-      assertTrue(
-          "Expected the counter to be 2 since it should not have been deleted but it is not so ",
-          map.get("key1").equals(new Long(2)));
-      hrqFortestSafeConflationRemoval = null;
-    } catch (Exception e) {
-      throw new AssertionError("Test failed due to ", e);
+    assertThat(availableIDs.size(), is(5));
+    assertThat(counters.size(), is(5));
+
+    for (int i = 5; i < 10; ++i) {
+      assertThat(
+          ((Map) (conflationMap.get(this.testName.getMethodName()))).containsKey("key" + (i + 1)),
+          is(true));
+      assertThat(availableIDs.contains((long) (i + 1)), is(true));
+      assertThat(counters.contains((long) (i + 1)), is(true));
     }
+
+    Region rgn = ((HARegionQueue) regionqueue).getRegion();
+    assertThat(rgn.keySet().size(), is(6));
   }
 
   /**
-   * Extends HARegionQueue for testing purposes. used by testSafeConflationRemoval
+   * This test tests safe removal from the conflation map. i.e operations should only remove old
+   * values and not the latest value
    */
-  static class HARQTestClass extends HARegionQueue.TestOnlyHARegionQueue {
+  @Test
+  public void testSafeConflationRemoval() throws Exception {
+    hrqForTestSafeConflationRemoval = new HARQTestClass("testSafeConflationRemoval", this.cache);
+    Conflatable cf1 = new ConflatableObject("key1", "value", new EventID(new byte[] {1}, 1, 1),
+        true, "testSafeConflationRemoval");
 
-    public HARQTestClass(String REGION_NAME, InternalCache cache, HARegionQueueJUnitTest test)
-        throws IOException, ClassNotFoundException, CacheException, InterruptedException {
-      super(REGION_NAME, cache);
-    }
+    hrqForTestSafeConflationRemoval.put(cf1);
+    hrqForTestSafeConflationRemoval.removeDispatchedEvents(new EventID(new byte[] {1}, 1, 1));
 
-    ConcurrentMap createConcurrentMap() {
-      return new ConcHashMap();
-    }
-  }
+    Map map = (Map) hrqForTestSafeConflationRemoval.getConflationMapForTesting()
+        .get("testSafeConflationRemoval");
 
-  /**
-   * Used to override the remove method for testSafeConflationRemoval
-   */
-  static class ConcHashMap extends ConcurrentHashMap implements ConcurrentMap {
-    public boolean remove(Object arg0, Object arg1) {
-      Conflatable cf2 = new ConflatableObject("key1", "value2", new EventID(new byte[] {1}, 1, 2),
-          true, "testSafeConflationRemoval");
-      try {
-        hrqFortestSafeConflationRemoval.put(cf2);
-      } catch (Exception e) {
-        throw new AssertionError("Exception occurred in trying to put ", e);
-      }
-      return super.remove(arg0, arg1);
-    }
+    assertThat(
+        "Expected the counter to be 2 since it should not have been deleted but it is not so ",
+        map.get("key1"), is(2L));
   }
 
-  static List list1;
-
   /**
    * This test tests remove operation is causing the insertion of sequence ID for existing
    * ThreadIdentifier object and concurrently the QRM thread is iterating over the Map to form the
@@ -864,80 +633,86 @@ public class HARegionQueueJUnitTest {
    * It is then verified to see that all the sequence should be greater than x
    */
   @Test
-  public void testConcurrentDispatcherAndRemovalForSameRegionSameThreadId() {
-    try {
-      final long numberOfIterations = 1000;
-      final HARegionQueue hrq = createHARegionQueue("testConcurrentDispatcherAndRemoval");
-      HARegionQueue.stopQRMThread();
-      final ThreadIdentifier[] ids = new ThreadIdentifier[(int) numberOfIterations];
-      for (int i = 0; i < numberOfIterations; i++) {
-        ids[i] = new ThreadIdentifier(new byte[] {1}, i);
-        hrq.addDispatchedMessage(ids[i], i);
-      }
-      Thread thread1 = new Thread() {
-        public void run() {
-          try {
-            Thread.sleep(600);
-          } catch (InterruptedException e) {
-            fail("interrupted");
-          }
-          list1 = HARegionQueue.createMessageListForTesting();
-        };
-      };
-      Thread thread2 = new Thread() {
-        public void run() {
-          try {
-            Thread.sleep(480);
-          } catch (InterruptedException e) {
-            fail("interrupted");
-          }
-          for (int i = 0; i < numberOfIterations; i++) {
-            hrq.addDispatchedMessage(ids[i], i + numberOfIterations);
-          }
-        };
-      };
-      thread1.start();
-      thread2.start();
-      ThreadUtils.join(thread1, 30 * 1000);
-      ThreadUtils.join(thread2, 30 * 1000);
-      List list2 = HARegionQueue.createMessageListForTesting();
-      Iterator iterator = list1.iterator();
-      boolean doOnce = false;
-      EventID id = null;
-      Map map = new HashMap();
-      while (iterator.hasNext()) {
-        if (!doOnce) {
-          iterator.next();
-          iterator.next();
-          doOnce = true;
-        } else {
-          id = (EventID) iterator.next();
-          map.put(new Long(id.getThreadID()), new Long(id.getSequenceID()));
+  public void testConcurrentDispatcherAndRemovalForSameRegionSameThreadId() throws Exception {
+    long numberOfIterations = 1000;
+    HARegionQueue hrq = createHARegionQueue(this.testName.getMethodName());
+    HARegionQueue.stopQRMThread();
+    ThreadIdentifier[] ids = new ThreadIdentifier[(int) numberOfIterations];
+
+    for (int i = 0; i < numberOfIterations; i++) {
+      ids[i] = new ThreadIdentifier(new byte[] {1}, i);
+      hrq.addDispatchedMessage(ids[i], i);
+    }
+
+    Thread thread1 = new Thread() {
+      @Override
+      public void run() {
+        try {
+          Thread.sleep(600);
+        } catch (InterruptedException e) {
+          errorCollector.addError(e);
         }
+        list1 = HARegionQueue.createMessageListForTesting();
       }
-      iterator = list2.iterator();
-      doOnce = false;
-      id = null;
-      while (iterator.hasNext()) {
-        if (!doOnce) {
-          iterator.next();
-          iterator.next();
-          doOnce = true;
-        } else {
-          id = (EventID) iterator.next();
-          map.put(new Long(id.getThreadID()), new Long(id.getSequenceID()));
+    };
+
+    Thread thread2 = new Thread() {
+      @Override
+      public void run() {
+        try {
+          Thread.sleep(480);
+        } catch (InterruptedException e) {
+          errorCollector.addError(e);
+        }
+        for (int i = 0; i < numberOfIterations; i++) {
+          hrq.addDispatchedMessage(ids[i], i + numberOfIterations);
         }
       }
-      iterator = map.values().iterator();
-      Long max = new Long(numberOfIterations);
-      Long next;
-      while (iterator.hasNext()) {
-        next = ((Long) iterator.next());
-        assertTrue(" Expected all the sequence ID's to be greater than " + max
-            + " but it is not so. Got sequence id " + next, next.compareTo(max) >= 0);
+    };
+
+    thread1.start();
+    thread2.start();
+    ThreadUtils.join(thread1, 30 * 1000);
+    ThreadUtils.join(thread2, 30 * 1000);
+    List list2 = HARegionQueue.createMessageListForTesting();
+    Iterator iterator = list1.iterator();
+    boolean doOnce = false;
+    EventID id;
+    Map map = new HashMap();
+
+    while (iterator.hasNext()) {
+      if (!doOnce) {
+        iterator.next();
+        iterator.next();
+        doOnce = true;
+      } else {
+        id = (EventID) iterator.next();
+        map.put(new Long(id.getThreadID()), id.getSequenceID());
+      }
+    }
+
+    iterator = list2.iterator();
+    doOnce = false;
+
+    while (iterator.hasNext()) {
+      if (!doOnce) {
+        iterator.next();
+        iterator.next();
+        doOnce = true;
+      } else {
+        id = (EventID) iterator.next();
+        map.put(id.getThreadID(), id.getSequenceID());
       }
-    } catch (Exception e) {
-      throw new AssertionError("Test failed due to : ", e);
+    }
+
+    iterator = map.values().iterator();
+    Long max = numberOfIterations;
+    while (iterator.hasNext()) {
+      Long next = (Long) iterator.next();
+      assertThat(
+          " Expected all the sequence ID's to be greater than " + max
+              + " but it is not so. Got sequence id " + next,
+          next.compareTo(max), greaterThanOrEqualTo(0));
     }
   }
 
@@ -958,77 +733,81 @@ public class HARegionQueueJUnitTest {
    * It is then verified to see that the map size should be 2x
    */
   @Test
-  public void testConcurrentDispatcherAndRemovalForSameRegionDifferentThreadId() {
-    try {
-      final long numberOfIterations = 1000;
-      final HARegionQueue hrq = createHARegionQueue("testConcurrentDispatcherAndRemoval");
-      HARegionQueue.stopQRMThread();
-      final ThreadIdentifier[] ids = new ThreadIdentifier[(int) numberOfIterations];
-      for (int i = 0; i < numberOfIterations; i++) {
-        ids[i] = new ThreadIdentifier(new byte[] {1}, i);
-        hrq.addDispatchedMessage(ids[i], i);
-      }
-      Thread thread1 = new Thread() {
-        public void run() {
-          try {
-            Thread.sleep(600);
-          } catch (InterruptedException e) {
-            fail("interrupted");
-          }
-          list1 = HARegionQueue.createMessageListForTesting();
-        };
-      };
-      Thread thread2 = new Thread() {
-        public void run() {
-          try {
-            Thread.sleep(480);
-          } catch (InterruptedException e) {
-            fail("interrupted");
-          }
-          for (int i = 0; i < numberOfIterations; i++) {
-            ids[i] = new ThreadIdentifier(new byte[] {1}, i + numberOfIterations);
-            hrq.addDispatchedMessage(ids[i], i + numberOfIterations);
-          }
-        };
-      };
-      thread1.start();
-      thread2.start();
-      ThreadUtils.join(thread1, 30 * 1000);
-      ThreadUtils.join(thread2, 30 * 1000);
-      List list2 = HARegionQueue.createMessageListForTesting();
-      Iterator iterator = list1.iterator();
-      boolean doOnce = false;
-      EventID id = null;
-      Map map = new HashMap();
-      while (iterator.hasNext()) {
-        if (!doOnce) {
-          iterator.next();
-          iterator.next();
-          doOnce = true;
-        } else {
-          id = (EventID) iterator.next();
-          map.put(new Long(id.getThreadID()), new Long(id.getSequenceID()));
+  public void testConcurrentDispatcherAndRemovalForSameRegionDifferentThreadId() throws Exception {
+    int numberOfIterations = 1000;
+    HARegionQueue hrq = createHARegionQueue(this.testName.getMethodName());
+    HARegionQueue.stopQRMThread();
+    ThreadIdentifier[] ids = new ThreadIdentifier[(int) numberOfIterations];
+
+    for (int i = 0; i < numberOfIterations; i++) {
+      ids[i] = new ThreadIdentifier(new byte[] {1}, i);
+      hrq.addDispatchedMessage(ids[i], i);
+    }
+
+    Thread thread1 = new Thread() {
+      @Override
+      public void run() {
+        try {
+          Thread.sleep(600);
+        } catch (InterruptedException e) {
+          errorCollector.addError(e);
         }
+        list1 = HARegionQueue.createMessageListForTesting();
       }
-      iterator = list2.iterator();
-      doOnce = false;
-      id = null;
-      while (iterator.hasNext()) {
-        if (!doOnce) {
-          iterator.next();
-          iterator.next();
-          doOnce = true;
-        } else {
-          id = (EventID) iterator.next();
-          map.put(new Long(id.getThreadID()), new Long(id.getSequenceID()));
+    };
+
+    Thread thread2 = new Thread() {
+      @Override
+      public void run() {
+        try {
+          Thread.sleep(480);
+        } catch (InterruptedException e) {
+          errorCollector.addError(e);
+        }
+        for (int i = 0; i < numberOfIterations; i++) {
+          ids[i] = new ThreadIdentifier(new byte[] {1}, i + numberOfIterations);
+          hrq.addDispatchedMessage(ids[i], i + numberOfIterations);
         }
       }
-      assertTrue(
-          " Expected the map size to be " + (2 * numberOfIterations) + " but it is " + map.size(),
-          map.size() == (2 * numberOfIterations));
-    } catch (Exception e) {
-      throw new AssertionError("Test failed due to an unexpected exception : ", e);
+    };
+
+    thread1.start();
+    thread2.start();
+    ThreadUtils.join(thread1, 30 * 1000);
+    ThreadUtils.join(thread2, 30 * 1000);
+    List list2 = HARegionQueue.createMessageListForTesting();
+    Iterator iterator = list1.iterator();
+    boolean doOnce = false;
+    EventID id;
+    Map map = new HashMap();
+
+    while (iterator.hasNext()) {
+      if (!doOnce) {
+        iterator.next();
+        iterator.next();
+        doOnce = true;
+      } else {
+        id = (EventID) iterator.next();
+        map.put(id.getThreadID(), id.getSequenceID());
+      }
+    }
+
+    iterator = list2.iterator();
+    doOnce = false;
+
+    while (iterator.hasNext()) {
+      if (!doOnce) {
+        iterator.next();
+        iterator.next();
+        doOnce = true;
+      } else {
+        id = (EventID) iterator.next();
+        map.put(id.getThreadID(), id.getSequenceID());
+      }
     }
+    assertThat(
+        " Expected the map size to be " + 2 * numberOfIterations + " but it is " + map.size(),
+        map.size(), is(2 * numberOfIterations));
   }
 
   /**
@@ -1050,101 +829,96 @@ public class HARegionQueueJUnitTest {
    * It is then verified to see that a total of x entries are present in the map
    */
   @Test
-  public void testConcurrentDispatcherAndRemovalForMultipleRegionsSameThreadId() {
-    try {
-      final long numberOfIterations = 10000;
-      final HARegionQueue hrq1 = createHARegionQueue("testConcurrentDispatcherAndRemoval1");
-
-      final HARegionQueue hrq2 = createHARegionQueue("testConcurrentDispatcherAndRemoval2");
+  public void testConcurrentDispatcherAndRemovalForMultipleRegionsSameThreadId() throws Exception {
+    int numberOfIterations = 10000;
+    HARegionQueue hrq1 = createHARegionQueue(this.testName.getMethodName() + "-1");
+    HARegionQueue hrq2 = createHARegionQueue(this.testName.getMethodName() + "-2");
+    HARegionQueue hrq3 = createHARegionQueue(this.testName.getMethodName() + "-3");
+    HARegionQueue hrq4 = createHARegionQueue(this.testName.getMethodName() + "-4");
+    HARegionQueue hrq5 = createHARegionQueue(this.testName.getMethodName() + "-5");
 
-      final HARegionQueue hrq3 = createHARegionQueue("testConcurrentDispatcherAndRemoval3");
+    HARegionQueue.stopQRMThread();
 
-      final HARegionQueue hrq4 = createHARegionQueue("testConcurrentDispatcherAndRemoval4");
+    ThreadIdentifier[] ids = new ThreadIdentifier[(int) numberOfIterations];
 
-      final HARegionQueue hrq5 = createHARegionQueue("testConcurrentDispatcherAndRemoval5");
+    for (int i = 0; i < numberOfIterations; i++) {
+      ids[i] = new ThreadIdentifier(new byte[] {1}, i);
+      hrq1.addDispatchedMessage(ids[i], i);
+      hrq2.addDispatchedMessage(ids[i], i);
 
-      HARegionQueue.stopQRMThread();
-      final ThreadIdentifier[] ids = new ThreadIdentifier[(int) numberOfIterations];
-
-      for (int i = 0; i < numberOfIterations; i++) {
-        ids[i] = new ThreadIdentifier(new byte[] {1}, i);
-        hrq1.addDispatchedMessage(ids[i], i);
-        hrq2.addDispatchedMessage(ids[i], i);
+    }
 
+    Thread thread1 = new Thread() {
+      @Override
+      public void run() {
+        try {
+          Thread.sleep(600);
+        } catch (InterruptedException e) {
+          errorCollector.addError(e);
+        }
+        list1 = HARegionQueue.createMessageListForTesting();
       }
+    };
 
-      Thread thread1 = new Thread() {
-        public void run() {
-          try {
-            Thread.sleep(600);
-          } catch (InterruptedException e) {
-            fail("interrupted");
-          }
-          list1 = HARegionQueue.createMessageListForTesting();
-        };
-      };
-      Thread thread2 = new Thread() {
-        public void run() {
-          try {
-            Thread.sleep(480);
-          } catch (InterruptedException e) {
-            fail("interrupted");
-          }
-          for (int i = 0; i < numberOfIterations; i++) {
-            hrq3.addDispatchedMessage(ids[i], i);
-            hrq4.addDispatchedMessage(ids[i], i);
-            hrq5.addDispatchedMessage(ids[i], i);
-          }
-        };
-      };
-      thread1.start();
-      thread2.start();
-      ThreadUtils.join(thread1, 30 * 1000);
-      ThreadUtils.join(thread2, 30 * 1000);
-      List list2 = HARegionQueue.createMessageListForTesting();
-      Iterator iterator = list1.iterator();
-      boolean doOnce = true;
-      EventID id = null;
-      Map map = new HashMap();
-      while (iterator.hasNext()) {
-        if (!doOnce) {
-          iterator.next(); // read the total message size
-          doOnce = true;
-        } else {
-          iterator.next();// region name;
-          int size = ((Integer) iterator.next()).intValue();
-          for (int i = 0; i < size; i++) {
-            id = (EventID) iterator.next();
-            map.put(new ThreadIdentifier(id.getMembershipID(), id.getThreadID()),
-                new Long(id.getSequenceID()));
-          }
+    Thread thread2 = new Thread() {
+      @Override
+      public void run() {
+        try {
+          Thread.sleep(480);
+        } catch (InterruptedException e) {
+          errorCollector.addError(e);
+        }
+        for (int i = 0; i < numberOfIterations; i++) {
+          hrq3.addDispatchedMessage(ids[i], i);
+          hrq4.addDispatchedMessage(ids[i], i);
+          hrq5.addDispatchedMessage(ids[i], i);
         }
       }
-
-      iterator = list2.iterator();
-      doOnce = true;
-      id = null;
-      while (iterator.hasNext()) {
-        if (!doOnce) {
-          iterator.next(); // read the total message size
-          doOnce = true;
-        } else {
-          iterator.next();// region name;
-          int size = ((Integer) iterator.next()).intValue();
-          for (int i = 0; i < size; i++) {
-            id = (EventID) iterator.next();
-            map.put(new ThreadIdentifier(id.getMembershipID(), id.getThreadID()),
-                new Long(id.getSequenceID()));
-          }
+    };
+
+    thread1.start();
+    thread2.start();
+    ThreadUtils.join(thread1, 30 * 1000);
+    ThreadUtils.join(thread2, 30 * 1000);
+    List list2 = HARegionQueue.createMessageListForTesting();
+    Iterator iterator = list1.iterator();
+    boolean doOnce = true;
+    EventID id;
+    Map map = new HashMap();
+
+    while (iterator.hasNext()) {
+      if (!doOnce) {
+        iterator.next(); // read the total message size
+        doOnce = true;
+      } else {
+        iterator.next();// region name;
+        int size = (Integer) iterator.next();
+        for (int i = 0; i < size; i++) {
+          id = (EventID) iterator.next();
+          map.put(new ThreadIdentifier(id.getMembershipID(), id.getThreadID()), id.getSequenceID());
         }
       }
-      assertTrue(
-          " Expected the map size to be " + (numberOfIterations) + " but it is " + map.size(),
-          map.size() == (numberOfIterations));
+    }
 
-    } catch (Exception e) {
-      throw new AssertionError("Test failed due to : ", e);
+    iterator = list2.iterator();
+    doOnce = true;
+
+    while (iterator.hasNext()) {
+      if (!doOnce) {
+        iterator.next(); // read the total message size
+        doOnce = true;
+      } else {
+        iterator.next();// region name;
+        int size = (Integer) iterator.next();
+        for (int i = 0; i < size; i++) {
+          id = (EventID) iterator.next();
+          map.put(new ThreadIdentifier(id.getMembershipID(), id.getThreadID()), id.getSequenceID());
+        }
+      }
     }
+
+    assertThat(" Expected the map size to be " + numberOfIterations + " but it is " + map.size(),
+        map.size(), is(numberOfIterations));
   }
 
   /**
@@ -1168,203 +942,179 @@ public class HARegionQueueJUnitTest {
    * It is then verified to see that the map size should be 2x * number of regions
    */
   @Test
-  public void testConcurrentDispatcherAndRemovalForMultipleRegionsDifferentThreadId() {
-    try {
-      final long numberOfIterations = 1000;
-      final HARegionQueue hrq1 =
-
-          createHARegionQueue("testConcurrentDispatcherAndRemoval1");
-
-      final HARegionQueue hrq2 =
-
-          createHARegionQueue("testConcurrentDispatcherAndRemoval2");
-      final HARegionQueue hrq3 =
-
-          createHARegionQueue("testConcurrentDispatcherAndRemoval3");
-      final HARegionQueue hrq4 =
-
-          createHARegionQueue("testConcurrentDispatcherAndRemoval4");
-      final HARegionQueue hrq5 =
-
-          createHARegionQueue("testConcurrentDispatcherAndRemoval5");
-
-      HARegionQueue.stopQRMThread();
-
-      final ThreadIdentifier[] ids1 = new ThreadIdentifier[(int) numberOfIterations];
-      final ThreadIdentifier[] ids2 = new ThreadIdentifier[(int) numberOfIterations];
-      final ThreadIdentifier[] ids3 = new ThreadIdentifier[(int) numberOfIterations];
-      final ThreadIdentifier[] ids4 = new ThreadIdentifier[(int) numberOfIterations];
-      final ThreadIdentifier[] ids5 = new ThreadIdentifier[(int) numberOfIterations];
+  public void testConcurrentDispatcherAndRemovalForMultipleRegionsDifferentThreadId()
+      throws Exception {
+    int numberOfIterations = 1000;
+    HARegionQueue hrq1 = createHARegionQueue(this.testName.getMethodName() + "-1");
+    HARegionQueue hrq2 = createHARegionQueue(this.testName.getMethodName() + "-2");
+    HARegionQueue hrq3 = createHARegionQueue(this.testName.getMethodName() + "-3");
+    HARegionQueue hrq4 = createHARegionQueue(this.testName.getMethodName() + "-4");
+    HARegionQueue hrq5 = createHARegionQueue(this.testName.getMethodName() + "-5");
+
+    HARegionQueue.stopQRMThread();
+
+    ThreadIdentifier[] ids1 = new ThreadIdentifier[(int) numberOfIterations];
+    ThreadIdentifier[] ids2 = new ThreadIdentifier[(int) numberOfIterations];
+    ThreadIdentifier[] ids3 = new ThreadIdentifier[(int) numberOfIterations];
+    ThreadIdentifier[] ids4 = new ThreadIdentifier[(int) numberOfIterations];
+    ThreadIdentifier[] ids5 = new ThreadIdentifier[(int) numberOfIterations];
+
+    for (int i = 0; i < numberOfIterations; i++) {
+      ids1[i] = new ThreadIdentifier(new byte[] {1}, i);
+      ids2[i] = new ThreadIdentifier(new byte[] {2}, i);
+      ids3[i] = new ThreadIdentifier(new byte[] {3}, i);
+      ids4[i] = new ThreadIdentifier(new byte[] {4}, i);
+      ids5[i] = new ThreadIdentifier(new byte[] {5}, i);
+      hrq1.addDispatchedMessage(ids1[i], i);
+      hrq2.addDispatchedMessage(ids2[i], i);
+      hrq3.addDispatchedMessage(ids3[i], i);
+      hrq4.addDispatchedMessage(ids4[i], i);
+      hrq5.addDispatchedMessage(ids5[i], i);
+    }
 
-      for (int i = 0; i < numberOfIterations; i++) {
-        ids1[i] = new ThreadIdentifier(new byte[] {1}, i);
-        ids2[i] = new ThreadIdentifier(new byte[] {2}, i);
-        ids3[i] = new ThreadIdentifier(new byte[] {3}, i);
-        ids4[i] = new ThreadIdentifier(new byte[] {4}, i);
-        ids5[i] = new ThreadIdentifier(new byte[] {5}, i);
-        hrq1.addDispatchedMessage(ids1[i], i);
-        hrq2.addDispatchedMessage(ids2[i], i);
-        hrq3.addDispatchedMessage(ids3[i], i);
-        hrq4.addDispatchedMessage(ids4[i], i);
-        hrq5.addDispatchedMessage(ids5[i], i);
+    Thread thread1 = new Thread() {
+      @Override
+      public void run() {
+        try {
+          Thread.sleep(600);
+        } catch (InterruptedException e) {
+          errorCollector.addError(e);
+        }
+        list1 = HARegionQueue.createMessageListForTesting();
       }
+    };
 
-      Thread thread1 = new Thread() {
-        public void run() {
-          try {
-            Thread.sleep(600);
-          } catch (InterruptedException e) {
-            fail("interrupted");
-          }
-          list1 = HARegionQueue.createMessageListForTesting();
-        };
-      };
-      Thread thread2 = new Thread() {
-        public void run() {
-          try {
-            Thread.sleep(480);
-          } catch (InterruptedException e) {
-            fail("Interrupted");
-          }
-          for (int i = 0; i < numberOfIterations; i++) {
-            ids1[i] = new ThreadIdentifier(new byte[] {1}, i + numberOfIterations);
-            ids2[i] = new ThreadIdentifier(new byte[] {2}, i + numberOfIterations);
-            ids3[i] = new ThreadIdentifier(new byte[] {3}, i + numberOfIterations);
-            ids4[i] = new ThreadIdentifier(new byte[] {4}, i + numberOfIterations);
-            ids5[i] = new ThreadIdentifier(new byte[] {5}, i + numberOfIterations);
-
-            hrq1.addDispatchedMessage(ids1[i], i + numberOfIterations);
-            hrq2.addDispatchedMessage(ids2[i], i + numberOfIterations);
-            hrq3.addDispatchedMessage(ids3[i], i + numberOfIterations);
-            hrq4.addDispatchedMessage(ids4[i], i + numberOfIterations);
-            hrq5.addDispatchedMessage(ids5[i], i + numberOfIterations);
-          }
-        };
-      };
-      thread1.start();
-      thread2.start();
-      ThreadUtils.join(thread1, 30 * 1000);
-      ThreadUtils.join(thread2, 30 * 1000);
-      List list2 = HARegionQueue.createMessageListForTesting();
-      Iterator iterator = list1.iterator();
-      boolean doOnce = true;
-      EventID id = null;
-      Map map = new HashMap();
-      while (iterator.hasNext()) {
-        if (!doOnce) {
-          iterator.next(); // read the total message size
-          doOnce = true;
-        } else {
-          iterator.next();// region name;
-          int size = ((Integer) iterator.next()).intValue();
-          System.out.println(" size of list 1 iteration x " + size);
-          for (int i = 0; i < size; i++) {
-
-            id = (EventID) iterator.next();
-            map.put(new ThreadIdentifier(id.getMembershipID(), id.getThreadID()),
-                new Long(id.getSequenceID()));
-          }
+    Thread thread2 = new Thread() {
+      @Override
+      public void run() {
+        try {
+          Thread.sleep(480);
+        } catch (InterruptedException e) {
+          errorCollector.addError(e);
+        }
+        for (int i = 0; i < numberOfIterations; i++) {
+          ids1[i] = new ThreadIdentifier(new byte[] {1}, i + numberOfIterations);
+          ids2[i] = new ThreadIdentifier(new byte[] {2}, i + numberOfIterations);
+          ids3[i] = new ThreadIdentifier(new byte[] {3}, i + numberOfIterations);
+          ids4[i] = new ThreadIdentifier(new byte[] {4}, i + numberOfIterations);
+          ids5[i] = new ThreadIdentifier(new byte[] {5}, i + numberOfIterations);
+
+          hrq1.addDispatchedMessage(ids1[i], i + numberOfIterations);
+          hrq2.addDispatchedMessage(ids2[i], i + numberOfIterations);
+          hrq3.addDispatchedMessage(ids3[i], i + numberOfIterations);
+          hrq4.addDispatchedMessage(ids4[i], i + numberOfIterations);
+          hrq5.addDispatchedMessage(ids5[i], i + numberOfIterations);
         }
       }
-
-      iterator = list2.iterator();
-      doOnce = true;
-      id = null;
-      while (iterator.hasNext()) {
-        if (!doOnce) {
-          iterator.next(); // read the total message size
-          doOnce = true;
-        } else {
-          iterator.next();// region name;
-          int size = ((Integer) iterator.next()).intValue();
-          System.out.println(" size of list 2 iteration x " + size);
-          for (int i = 0; i < size; i++) {
-            id = (EventID) iterator.next();
-            map.put(new ThreadIdentifier(id.getMembershipID(), id.getThreadID()),
-                new Long(id.getSequenceID()));
-          }
+    };
+
+    thread1.start();
+    thread2.start();
+    ThreadUtils.join(thread1, 30 * 1000);
+    ThreadUtils.join(thread2, 30 * 1000);
+    List list2 = HARegionQueue.createMessageListForTesting();
+    Iterator iterator = list1.iterator();
+    boolean doOnce = true;
+    EventID id = null;
+    Map map = new HashMap();
+
+    while (iterator.hasNext()) {
+      if (!doOnce) {
+        iterator.next(); // read the total message size
+        doOnce = true;
+      } else {
+        iterator.next(); // region name;
+        int size = (Integer) iterator.next();
+        System.out.println(" size of list 1 iteration x " + size);
+        for (int i = 0; i < size; i++) {
+          id = (EventID) iterator.next();
+          map.put(new ThreadIdentifier(id.getMembershipID(), id.getThreadID()), id.getSequenceID());
         }
       }
+    }
 
-      assertTrue(" Expected the map size to be " + (numberOfIterations * 2 * 5) + " but it is "
-          + map.size(), map.size() == (numberOfIterations * 2 * 5));
+    iterator = list2.iterator();
+    doOnce = true;
 
-    } catch (Exception e) {
-      throw new AssertionError("Test failed due to : ", e);
+    while (iterator.hasNext()) {
+      if (!doOnce) {
+        iterator.next(); // read the total message size
+        doOnce = true;
+      } else {
+        iterator.next(); // region name;
+        int size = (Integer) iterator.next();
+        System.out.println(" size of list 2 iteration x " + size);
+        for (int i = 0; i < size; i++) {
+          id = (EventID) iterator.next();
+          map.put(new ThreadIdentifier(id.getMembershipID(), id.getThreadID()), id.getSequenceID());
+        }
+      }
     }
+
+    assertThat(
+        " Expected the map size to be " + numberOfIterations * 2 * 5 + " but it is " + map.size(),
+        map.size(), is(numberOfIterations * 2 * 5));
   }
 
   /**
-   * Concurrent Peek on Blokcing Queue waiting with for a Put . If concurrent take is also happening
+   * Concurrent Peek on Blocking Queue waiting with for a Put . If concurrent take is also happening
    * such that the object is removed first then the peek should block & not return with null.
    */
   @Test
-  public void testBlockingQueueForConcurrentPeekAndTake() {
-    exceptionInThread = false;
-    testFailed = false;
-    try {
-      final TestBlockingHARegionQueue bQ =
-          new TestBlockingHARegionQueue("testBlockQueueForConcurrentPeekAndTake", cache);
-      Thread[] threads = new Thread[3];
-      for (int i = 0; i < 3; i++) {
-        threads[i] = new Thread() {
-          public void run() {
-            try {
-              long startTime = System.currentTimeMillis();
-              Object obj = bQ.peek();
-              if (obj == null) {
-                testFailed = true;
-                message.append(
-                    " Failed :  failed since object was null and was not expected to be null \n");
-              }
-              long totalTime = System.currentTimeMillis() - startTime;
+  public void testBlockingQueueForConcurrentPeekAndTake() throws Exception {
+    TestBlockingHARegionQueue regionQueue =
+        new TestBlockingHARegionQueue("testBlockQueueForConcurrentPeekAndTake", this.cache);
+    Thread[] threads = new Thread[3];
+
+    for (int i = 0; i < 3; i++) {
+      threads[i] = new Thread() {
+        @Override
+        public void run() {
+          try {
+            long startTime = System.currentTimeMillis();
+            Object obj = regionQueue.peek();
+            if (obj == null) {
+              errorCollector.addError(new AssertionError(
+                  "Failed :  failed since object was null and was not expected to be null"));
+            }
+            long totalTime = System.currentTimeMillis() - startTime;
 
-              if (totalTime < 4000) {
-                testFailed = true;
-                message
-                    .append(" Failed :  Expected time to be greater than 4000 but it is not so ");
-              }
-            } catch (Exception e) {
-              exceptionInThread = true;
-              exception = e;
+            if (totalTime < 4000) {
+              errorCollector.addError(new AssertionError(
+                  "Failed :  Expected time to be greater than 4000 but it is not so"));
             }
+          } catch (Exception e) {
+            errorCollector.addError(e);
           }
-        };
-
-      }
-
-      for (int k = 0; k < 3; k++) {
-        threads[k].start();
-      }
-      Thread.sleep(4000);
-
-      EventID id = new EventID(new byte[] {1}, 1, 1);
-      EventID id1 = new EventID(new byte[] {1}, 1, 2);
+        }
+      };
+    }
 
-      bQ.takeFirst = true;
-      bQ.put(new ConflatableObject("key", "value", id, true, "testing"));
+    for (int k = 0; k < 3; k++) {
+      threads[k].start();
+    }
 
-      Thread.sleep(2000);
+    Thread.sleep(4000);
 
-      bQ.put(new ConflatableObject("key1", "value1", id1, true, "testing"));
+    EventID id = new EventID(new byte[] {1}, 1, 1);
+    EventID id1 = new EventID(new byte[] {1}, 1, 2);
 
-      long startTime = System.currentTimeMillis();
-      for (int k = 0; k < 3; k++) {
-        ThreadUtils.join(threads[k], 180 * 1000);
-      }
+    regionQueue.takeFirst = true;
+    regionQueue.put(new ConflatableObject("key", "value", id, true, this.testName.getMethodName()));
 
-      long totalTime = System.currentTimeMillis() - startTime;
+    Thread.sleep(2000);
 
-      if (totalTime >= 180000) {
-        fail(" Test taken too long ");
-      }
+    regionQueue
+        .put(new ConflatableObject("key1", "value1", id1, true, this.testName.getMethodName()));
 
-      if (testFailed) {
-        fail(" test failed due to " + message);
-      }
+    long startTime = System.currentTimeMillis();
+    for (int k = 0; k < 3; k++) {
+      ThreadUtils.join(threads[k], 180 * 1000);
+    }
 
-    } catch (Exception e) {
-      throw new AssertionError(" Test failed due to ", e);
+    long totalTime = System.currentTimeMillis() - startTime;
+    if (totalTime >= 180000) {
+      fail(" Test taken too long ");
     }
   }
 
@@ -1373,71 +1123,60 @@ public class HARegionQueueJUnitTest {
    * QRM thread , the peek should block correctly.
    */
   @Test
-  public void testBlockingQueueForTakeWhenPeekInProgress() {
-    exceptionInThread = false;
-    testFailed = false;
-    try {
-      final TestBlockingHARegionQueue bQ =
-          new TestBlockingHARegionQueue("testBlockQueueForTakeWhenPeekInProgress", cache);
-      Thread[] threads = new Thread[3];
-      for (int i = 0; i < 3; i++) {
-        threads[i] = new Thread() {
-          public void run() {
-            try {
-              long startTime = System.currentTimeMillis();
-              Object obj = bQ.peek();
-              if (obj == null) {
-                testFailed = true;
-                message.append(
-                    " Failed :  failed since object was null and was not expected to be null \n");
-              }
-              long totalTime = System.currentTimeMillis() - startTime;
+  public void testBlockingQueueForTakeWhenPeekInProgress() throws Exception {
+    TestBlockingHARegionQueue regionQueue =
+        new TestBlockingHARegionQueue("testBlockQueueForTakeWhenPeekInProgress", this.cache);
+    Thread[] threads = new Thread[3];
+
+    for (int i = 0; i < 3; i++) {
+      threads[i] = new Thread() {
+        @Override
+        public void run() {
+          try {
+            long startTime = System.currentTimeMillis();
+            Object obj = regionQueue.peek();
+            if (obj == null) {
+              errorCollector.addError(new AssertionError(
+                  "Failed :  failed since object was null and was not expected to be null"));
+            }
+            long totalTime = System.currentTimeMillis() - startTime;
 
-              if (totalTime < 4000) {
-                testFailed = true;
-                message
-                    .append(" Failed :  Expected time to be greater than 4000 but it is not so ");
-              }
-            } catch (Exception e) {
-              exceptionInThread = true;
-              exception = e;
+            if (totalTime < 4000) {
+              errorCollector.addError(new AssertionError(
+                  "Failed :  Expected time to be greater than 4000 but it is not so"));
             }
+          } catch (Exception e) {
+            errorCollector.addError(e);
           }
-        };
-      }
-
-      for (int k = 0; k < 3; k++) {
-        threads[k].start();
-      }
-      Thread.sleep(4000);
-
-      EventID id = new EventID(new byte[] {1}, 1, 1);
-      EventID id1 = new EventID(new byte[] {1}, 1, 2);
+        }
+      };
+    }
 
-      bQ.takeWhenPeekInProgress = true;
-      bQ.put(new ConflatableObject("key", "value", id, true, "testing"));
+    for (int k = 0; k < 3; k++) {
+      threads[k].start();
+    }
 
-      Thread.sleep(2000);
+    Thread.sleep(4000);
 
-      bQ.put(new ConflatableObject("key1", "value1", id1, true, "testing"));
+    EventID id = new EventID(new byte[] {1}, 1, 1);
+    EventID id1 = new EventID(new byte[] {1}, 1, 2);
 
-      long startTime = System.currentTimeMillis();
-      for (int k = 0; k < 3; k++) {
-        ThreadUtils.join(threads[k], 60 * 1000);
-      }
+    regionQueue.takeWhenPeekInProgress = true;
+    regionQueue.put(new ConflatableObject("key", "value", id, true, this.testName.getMethodName()));
 
-      long totalTime = System.currentTimeMillis() - startTime;
+    Thread.sleep(2000);
 
-      if (totalTime >= 60000) {
-        fail(" Test taken too long ");
-      }
+    regionQueue
+        .put(new ConflatableObject("key1", "value1", id1, true, this.testName.getMethodName()));
 
-      if (testFailed) {
-        fail(" test failed due to " + message);
-      }
+    long startTime = System.currentTimeMillis();
+    for (int k = 0; k < 3; k++) {
+      ThreadUtils.join(threads[k], 60 * 1000);
+    }
 
-    } catch (Exception e) {
-      throw new AssertionError(" Test failed due to ", e);
+    long totalTime = System.currentTimeMillis() - startTime;
+    if (totalTime >= 60000) {
+      fail(" Test taken too long ");
     }
   }
 
@@ -1451,138 +1190,88 @@ public class HARegionQueueJUnitTest {
    * violation. This test will validate that behaviour
    */
   @Test
-  public void testConcurrentEventExpiryAndTake() {
-    try {
-      HARegionQueueAttributes haa = new HARegionQueueAttributes();
-      haa.setExpiryTime(3);
-      final RegionQueue regionqueue =
-          new HARegionQueue.TestOnlyHARegionQueue("testing", cache, haa) {
-            CacheListener createCacheListenerForHARegion() {
+  public void testConcurrentEventExpiryAndTake() throws Exception {
+    AtomicBoolean complete = new AtomicBoolean(false);
+    AtomicBoolean expiryCalled = new AtomicBoolean(false);
+    AtomicBoolean allowExpiryToProceed = new AtomicBoolean(false);
 
-              return new CacheListenerAdapter() {
+    HARegionQueueAttributes haa = new HARegionQueueAttributes();
+    haa.setExpiryTime(3);
 
-                public void afterInvalidate(EntryEvent event) {
+    RegionQueue regionqueue =
+        new HARegionQueue.TestOnlyHARegionQueue(this.testName.getMethodName(), this.cache, haa) {
+          @Override
+          CacheListener createCacheListenerForHARegion() {
 
-                  if (event.getKey() instanceof Long) {
-                    synchronized (HARegionQueueJUnitTest.this) {
-                      expiryCalled = true;
-                      HARegionQueueJUnitTest.this.notify();
+            return new CacheListenerAdapter() {
 
-                    } ;
-                    Thread.yield();
+              @Override
+              public void afterInvalidate(EntryEvent event) {
 
-                    synchronized (HARegionQueueJUnitTest.this) {
-                      if (!allowExpiryToProceed) {
-                        try {
-                          HARegionQueueJUnitTest.this.wait();
-                        } catch (InterruptedException e1) {
-                          encounteredException = true;
-                        }
+                if (event.getKey() instanceof Long) {
+                  synchronized (HARegionQueueJUnitTest.this) {
+                    expiryCalled.set(true);
+                    HARegionQueueJUnitTest.this.notifyAll();
+                  }
+
+                  Thread.yield();
+
+                  synchronized (HARegionQueueJUnitTest.this) {
+                    while (!allowExpiryToProceed.get()) {
+                      try {
+                        HARegionQueueJUnitTest.this.wait();
+                      } catch (InterruptedException e) {
+                        errorCollector.addError(e);
+                        break;
                       }
                     }
-                    try {
-                      expireTheEventOrThreadIdentifier(event);
-                    } catch (CacheException e) {
-                      e.printStackTrace();
-                      encounteredException = true;
-                    } finally {
-                      synchronized (HARegionQueueJUnitTest.this) {
-                        complete = true;
-                        HARegionQueueJUnitTest.this.notify();
-                      }
+                  }
+
+                  try {
+                    expireTheEventOrThreadIdentifier(event);
+                  } catch (CacheException e) {
+                    errorCollector.addError(e);
+                  } finally {
+                    synchronized (HARegionQueueJUnitTest.this) {
+                      complete.set(true);
+                      HARegionQueueJUnitTest.this.notifyAll();
                     }
                   }
                 }
-              };
-            }
-          };
-      EventID ev1 = new EventID(new byte[] {1}, 1, 1);
+              }
+            };
+          }
+        };
 
-      Conflatable cf1 = new ConflatableObject("key", "value", ev1, true, "testing");
+    EventID ev1 = new EventID(new byte[] {1}, 1, 1);
+    Conflatable cf1 =
+        new ConflatableObject("key", "value", ev1, true, this.testName.getMethodName());
+    regionqueue.put(cf1);
 
-      regionqueue.put(cf1);
-      synchronized (this) {
-        if (!expiryCalled) {
-          this.wait();
-        }
-      }
-      try {
-        Object o = regionqueue.take();
-        assertNull(o);
-      } catch (Exception e) {
-        throw new AssertionError("Test failed due to exception ", e);
-      } finally {
-        synchronized (this) {
-          this.allowExpiryToProceed = true;
-          this.notify();
-        }
+    synchronized (this) {
+      

<TRUNCATED>

[30/43] geode git commit: Run spotlessApply

Posted by kl...@apache.org.
Run spotlessApply


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/7ca7c2cc
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/7ca7c2cc
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/7ca7c2cc

Branch: refs/heads/feature/GEODE-2632-17
Commit: 7ca7c2cc1ceb4c6899aeac4a7792eb569dd4cc51
Parents: cbfc667
Author: Kirk Lund <kl...@apache.org>
Authored: Fri May 19 15:23:12 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Tue May 30 10:21:09 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/geode/GemFireException.java |   7 +-
 .../geode/cache/CacheRuntimeException.java      |   4 +-
 .../geode/internal/cache/ha/HARegionQueue.java  |  18 +--
 .../cache/tier/sockets/BaseCommand.java         | 152 ++++++++++++-------
 .../cache/tier/sockets/CacheClientNotifier.java |   8 +-
 .../ServerInterestRegistrationMessage.java      |  31 ++--
 .../cache/tier/sockets/command/AddPdxEnum.java  |   3 +-
 .../cache/tier/sockets/command/AddPdxType.java  |   3 +-
 .../cache/tier/sockets/command/ClearRegion.java |  14 +-
 .../cache/tier/sockets/command/ClientReady.java |  12 +-
 .../tier/sockets/command/CloseConnection.java   |  11 +-
 .../tier/sockets/command/CommitCommand.java     |   3 +-
 .../cache/tier/sockets/command/ContainsKey.java |   9 +-
 .../tier/sockets/command/ContainsKey66.java     |   9 +-
 .../tier/sockets/command/CreateRegion.java      |  10 +-
 .../cache/tier/sockets/command/Default.java     |  10 +-
 .../cache/tier/sockets/command/Destroy.java     |  15 +-
 .../cache/tier/sockets/command/Destroy65.java   |  34 +++--
 .../tier/sockets/command/DestroyRegion.java     |  11 +-
 .../tier/sockets/command/ExecuteFunction.java   |   3 +-
 .../tier/sockets/command/ExecuteFunction65.java |   3 +-
 .../tier/sockets/command/ExecuteFunction66.java |   3 +-
 .../tier/sockets/command/ExecuteFunction70.java |   3 +-
 .../sockets/command/ExecuteRegionFunction.java  |   3 +-
 .../command/ExecuteRegionFunction61.java        |   3 +-
 .../command/ExecuteRegionFunction65.java        |   3 +-
 .../command/ExecuteRegionFunction66.java        |   3 +-
 .../command/ExecuteRegionFunctionSingleHop.java |   3 +-
 .../sockets/command/GatewayReceiverCommand.java |  85 +++++------
 .../cache/tier/sockets/command/Get70.java       |  14 +-
 .../cache/tier/sockets/command/GetAll.java      |   8 +-
 .../cache/tier/sockets/command/GetAll651.java   |   8 +-
 .../cache/tier/sockets/command/GetAll70.java    |  11 +-
 .../sockets/command/GetAllWithCallback.java     |  11 +-
 .../command/GetClientPRMetadataCommand.java     |   6 +-
 .../command/GetClientPRMetadataCommand66.java   |   6 +-
 .../sockets/command/GetFunctionAttribute.java   |   3 +-
 .../tier/sockets/command/GetPDXEnumById.java    |   5 +-
 .../tier/sockets/command/GetPDXIdForEnum.java   |   3 +-
 .../tier/sockets/command/GetPDXIdForType.java   |   3 +-
 .../tier/sockets/command/GetPDXTypeById.java    |   5 +-
 .../cache/tier/sockets/command/Invalid.java     |   7 +-
 .../cache/tier/sockets/command/Invalidate.java  |  18 ++-
 .../cache/tier/sockets/command/KeySet.java      |  12 +-
 .../cache/tier/sockets/command/MakePrimary.java |   7 +-
 .../cache/tier/sockets/command/PeriodicAck.java |   5 +-
 .../cache/tier/sockets/command/Ping.java        |  11 +-
 .../cache/tier/sockets/command/Put.java         |  25 +--
 .../cache/tier/sockets/command/Put61.java       |  20 ++-
 .../cache/tier/sockets/command/Put65.java       |  35 +++--
 .../cache/tier/sockets/command/PutAll.java      |  23 ++-
 .../cache/tier/sockets/command/PutAll70.java    |  31 ++--
 .../cache/tier/sockets/command/PutAll80.java    |  35 +++--
 .../sockets/command/PutUserCredentials.java     |   9 +-
 .../cache/tier/sockets/command/Query.java       |   3 +-
 .../cache/tier/sockets/command/Query651.java    |  12 +-
 .../command/RegisterDataSerializers.java        |  12 +-
 .../sockets/command/RegisterInstantiators.java  |  14 +-
 .../tier/sockets/command/RegisterInterest.java  |  21 +--
 .../sockets/command/RegisterInterest61.java     |  19 +--
 .../sockets/command/RegisterInterestList.java   |  18 ++-
 .../sockets/command/RegisterInterestList61.java |  18 ++-
 .../sockets/command/RegisterInterestList66.java |  15 +-
 .../cache/tier/sockets/command/RemoveAll.java   |  36 +++--
 .../tier/sockets/command/RemoveUserAuth.java    |   4 +-
 .../cache/tier/sockets/command/Request.java     |  14 +-
 .../tier/sockets/command/RequestEventValue.java |  20 ++-
 .../cache/tier/sockets/command/Size.java        |   6 +-
 .../command/TXSynchronizationCommand.java       |  13 +-
 .../sockets/command/UnregisterInterest.java     |  14 +-
 .../sockets/command/UnregisterInterestList.java |  11 +-
 .../command/UpdateClientNotification.java       |   3 +-
 .../cache/tier/sockets/command/CloseCQ.java     |  16 +-
 .../cache/tier/sockets/command/ExecuteCQ.java   |  13 +-
 .../cache/tier/sockets/command/ExecuteCQ61.java |  16 +-
 .../cache/tier/sockets/command/GetCQStats.java  |  12 +-
 .../tier/sockets/command/GetDurableCQs.java     |  10 +-
 .../cache/tier/sockets/command/MonitorCQ.java   |  18 ++-
 .../cache/tier/sockets/command/StopCQ.java      |  16 +-
 79 files changed, 677 insertions(+), 474 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/GemFireException.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/GemFireException.java b/geode-core/src/main/java/org/apache/geode/GemFireException.java
index 3a69307..cd80019 100644
--- a/geode-core/src/main/java/org/apache/geode/GemFireException.java
+++ b/geode-core/src/main/java/org/apache/geode/GemFireException.java
@@ -19,8 +19,7 @@ package org.apache.geode;
  * GemFire.
  * <p>
  * Since these exceptions are unchecked, this class really <em>ought</em> to be called
- * {@code GemFireRuntimeException}; however, the current name is retained for compatibility's
- * sake.
+ * {@code GemFireRuntimeException}; however, the current name is retained for compatibility's sake.
  * <p>
  * This class is abstract to enforce throwing more specific exception types. Please avoid using
  * GemFireException to describe an arbitrary error condition
@@ -60,8 +59,8 @@ public abstract class GemFireException extends RuntimeException {
   }
 
   /**
-   * Returns the root cause of this {@code GemFireException} or {@code null} if the cause
-   * is nonexistent or unknown.
+   * Returns the root cause of this {@code GemFireException} or {@code null} if the cause is
+   * nonexistent or unknown.
    */
   public Throwable getRootCause() {
     if (this.getCause() == null) {

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/cache/CacheRuntimeException.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/cache/CacheRuntimeException.java b/geode-core/src/main/java/org/apache/geode/cache/CacheRuntimeException.java
index 89b596f..7732e96 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/CacheRuntimeException.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/CacheRuntimeException.java
@@ -41,8 +41,8 @@ public abstract class CacheRuntimeException extends GemFireException {
   }
 
   /**
-   * Constructs an instance of {@code CacheRuntimeException} with the specified detail message
-   * and cause.
+   * Constructs an instance of {@code CacheRuntimeException} with the specified detail message and
+   * cause.
    * 
    * @param msg the detail message
    * @param cause the causal Throwable

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java b/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java
index c0d3342..66e34b9 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java
@@ -2059,7 +2059,7 @@ public class HARegionQueue implements RegionQueue {
   private static class BlockingHARegionQueue extends HARegionQueue {
 
     private static final String EVENT_ENQUEUE_WAIT_TIME_NAME =
-      DistributionConfig.GEMFIRE_PREFIX + "subscription.EVENT_ENQUEUE_WAIT_TIME";
+        DistributionConfig.GEMFIRE_PREFIX + "subscription.EVENT_ENQUEUE_WAIT_TIME";
 
     private static final int DEFAULT_EVENT_ENQUEUE_WAIT_TIME = 100;
 
@@ -2068,7 +2068,7 @@ public class HARegionQueue implements RegionQueue {
      * logged.
      */
     private static final String MAX_QUEUE_LOG_FREQUENCY =
-      DistributionConfig.GEMFIRE_PREFIX + "logFrequency.clientQueueReachedMaxLimit";
+        DistributionConfig.GEMFIRE_PREFIX + "logFrequency.clientQueueReachedMaxLimit";
 
     private static final long DEFAULT_LOG_FREQUENCY = 1000;
 
@@ -2095,8 +2095,7 @@ public class HARegionQueue implements RegionQueue {
     private final Object permitMon = new Object();
 
     /**
-     * Lock on which the take & remove threads block awaiting data from put
-     * operations
+     * Lock on which the take & remove threads block awaiting data from put operations
      */
     private final StoppableReentrantLock lock;
 
@@ -2107,9 +2106,9 @@ public class HARegionQueue implements RegionQueue {
 
     /**
      * System property value denoting the time in milliseconds. Any thread putting an event into a
-     * subscription queue, which is full, will wait this much time for the queue to make space. It'll
-     * then enqueue the event possibly causing the queue to grow beyond its capacity/max-size. See
-     * #51400.
+     * subscription queue, which is full, will wait this much time for the queue to make space.
+     * It'll then enqueue the event possibly causing the queue to grow beyond its capacity/max-size.
+     * See #51400.
      */
     private final int enqueueEventWaitTime;
 
@@ -2142,8 +2141,7 @@ public class HARegionQueue implements RegionQueue {
     }
 
     private static int calcEnqueueEventWaitTime() {
-      int value =
-        Integer.getInteger(EVENT_ENQUEUE_WAIT_TIME_NAME, DEFAULT_EVENT_ENQUEUE_WAIT_TIME);
+      int value = Integer.getInteger(EVENT_ENQUEUE_WAIT_TIME_NAME, DEFAULT_EVENT_ENQUEUE_WAIT_TIME);
       if (value < 0) {
         value = DEFAULT_EVENT_ENQUEUE_WAIT_TIME;
       }
@@ -2209,7 +2207,7 @@ public class HARegionQueue implements RegionQueue {
                   if ((this.maxQueueSizeHitCount % this.logFrequency) == 0) {
                     logger.warn(LocalizedMessage.create(
                         LocalizedStrings.HARegionQueue_CLIENT_QUEUE_FOR_0_IS_FULL,
-                        new Object[] { this.region.getName() }));
+                        new Object[] {this.region.getName()}));
                     this.maxQueueSizeHitCount = 0;
                   }
                   ++this.maxQueueSizeHitCount;

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommand.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommand.java
index f09c854..1fb8c8c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommand.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommand.java
@@ -87,7 +87,8 @@ public abstract class BaseCommand implements Command {
 
   private static final byte[] OK_BYTES = new byte[] {0};
 
-  public static final int MAXIMUM_CHUNK_SIZE = Integer.getInteger("BridgeServer.MAXIMUM_CHUNK_SIZE", 100);
+  public static final int MAXIMUM_CHUNK_SIZE =
+      Integer.getInteger("BridgeServer.MAXIMUM_CHUNK_SIZE", 100);
 
   /** Whether to suppress logging of IOExceptions */
   private static final boolean SUPPRESS_IO_EXCEPTION_LOGGING =
@@ -99,14 +100,16 @@ public abstract class BaseCommand implements Command {
    * of them completes or fails. The bytes are computed based in the size sent in the incoming msg
    * header.
    */
-  private static final int MAX_INCOMING_DATA = Integer.getInteger("BridgeServer.MAX_INCOMING_DATA", -1);
+  private static final int MAX_INCOMING_DATA =
+      Integer.getInteger("BridgeServer.MAX_INCOMING_DATA", -1);
 
   /**
    * Maximum number of concurrent incoming client messages that a bridge server will allow. Once a
    * server is working on this number additional incoming client messages will wait until one of
    * them completes or fails.
    */
-  private static final int MAX_INCOMING_MESSAGES = Integer.getInteger("BridgeServer.MAX_INCOMING_MSGS", -1);
+  private static final int MAX_INCOMING_MESSAGES =
+      Integer.getInteger("BridgeServer.MAX_INCOMING_MSGS", -1);
 
   private static final Semaphore INCOMING_DATA_LIMITER;
 
@@ -192,8 +195,10 @@ public abstract class BaseCommand implements Command {
    * 
    * @return true if thread should masquerade as a transactional thread.
    */
-  protected boolean shouldMasqueradeForTx(Message clientMessage, ServerConnection serverConnection) {
-    return serverConnection.getClientVersion().compareTo(Version.GFE_66) >= 0 && clientMessage.getTransactionId() > TXManagerImpl.NOTX;
+  protected boolean shouldMasqueradeForTx(Message clientMessage,
+      ServerConnection serverConnection) {
+    return serverConnection.getClientVersion().compareTo(Version.GFE_66) >= 0
+        && clientMessage.getTransactionId() > TXManagerImpl.NOTX;
   }
 
   /**
@@ -260,8 +265,8 @@ public abstract class BaseCommand implements Command {
     return tag;
   }
 
-  public abstract void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
-      throws IOException, ClassNotFoundException, InterruptedException;
+  public abstract void cmdExecute(Message clientMessage, ServerConnection serverConnection,
+      long start) throws IOException, ClassNotFoundException, InterruptedException;
 
   protected void writeReply(Message origMsg, ServerConnection serverConnection) throws IOException {
     Message replyMsg = serverConnection.getReplyMessage();
@@ -292,7 +297,8 @@ public abstract class BaseCommand implements Command {
     }
   }
 
-  private static void handleEOFException(Message msg, ServerConnection serverConnection, Exception eof) {
+  private static void handleEOFException(Message msg, ServerConnection serverConnection,
+      Exception eof) {
     CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
     CacheServerStats stats = serverConnection.getCacheServerStats();
     boolean potentialModification = serverConnection.getPotentialModification();
@@ -307,7 +313,8 @@ public abstract class BaseCommand implements Command {
           int transId = msg != null ? msg.getTransactionId() : Integer.MIN_VALUE;
           logger.warn(LocalizedMessage.create(
               LocalizedStrings.BaseCommand_0_EOFEXCEPTION_DURING_A_WRITE_OPERATION_ON_REGION__1_KEY_2_MESSAGEID_3,
-              new Object[] {serverConnection.getName(), serverConnection.getModRegion(), serverConnection.getModKey(), transId }));
+              new Object[] {serverConnection.getName(), serverConnection.getModRegion(),
+                  serverConnection.getModKey(), transId}));
         } else {
           logger.debug("EOF exception", eof);
           logger.info(LocalizedMessage.create(
@@ -332,7 +339,8 @@ public abstract class BaseCommand implements Command {
     serverConnection.setClientDisconnectedException(e);
   }
 
-  private static void handleIOException(Message msg, ServerConnection serverConnection, Exception e) {
+  private static void handleIOException(Message msg, ServerConnection serverConnection,
+      Exception e) {
     CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
     boolean potentialModification = serverConnection.getPotentialModification();
 
@@ -342,7 +350,8 @@ public abstract class BaseCommand implements Command {
           int transId = msg != null ? msg.getTransactionId() : Integer.MIN_VALUE;
           logger.warn(LocalizedMessage.create(
               LocalizedStrings.BaseCommand_0_UNEXPECTED_IOEXCEPTION_DURING_OPERATION_FOR_REGION_1_KEY_2_MESSID_3,
-              new Object[] {serverConnection.getName(), serverConnection.getModRegion(), serverConnection.getModKey(), transId }),
+              new Object[] {serverConnection.getName(), serverConnection.getModRegion(),
+                  serverConnection.getModKey(), transId}),
               e);
         } else {
           logger.warn(LocalizedMessage.create(LocalizedStrings.BaseCommand_0_UNEXPECTED_IOEXCEPTION,
@@ -354,7 +363,8 @@ public abstract class BaseCommand implements Command {
     serverConnection.setClientDisconnectedException(e);
   }
 
-  private static void handleShutdownException(Message msg, ServerConnection serverConnection, Exception e) {
+  private static void handleShutdownException(Message msg, ServerConnection serverConnection,
+      Exception e) {
     CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
     boolean potentialModification = serverConnection.getPotentialModification();
 
@@ -363,11 +373,14 @@ public abstract class BaseCommand implements Command {
         int transId = msg != null ? msg.getTransactionId() : Integer.MIN_VALUE;
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.BaseCommand_0_UNEXPECTED_SHUTDOWNEXCEPTION_DURING_OPERATION_ON_REGION_1_KEY_2_MESSAGEID_3,
-            new Object[] {serverConnection.getName(), serverConnection.getModRegion(), serverConnection.getModKey(), transId }),
+            new Object[] {serverConnection.getName(), serverConnection.getModRegion(),
+                serverConnection.getModKey(), transId}),
             e);
       } else {
-        logger.warn(LocalizedMessage.create(
-            LocalizedStrings.BaseCommand_0_UNEXPECTED_SHUTDOWNEXCEPTION, serverConnection.getName()), e);
+        logger.warn(
+            LocalizedMessage.create(LocalizedStrings.BaseCommand_0_UNEXPECTED_SHUTDOWNEXCEPTION,
+                serverConnection.getName()),
+            e);
       }
     }
     serverConnection.setFlagProcessMessagesAsFalse();
@@ -399,12 +412,14 @@ public abstract class BaseCommand implements Command {
           if (!wroteExceptionResponse) {
             logger.warn(LocalizedMessage.create(
                 LocalizedStrings.BaseCommand_0_UNEXPECTED_EXCEPTION_DURING_OPERATION_ON_REGION_1_KEY_2_MESSAGEID_3,
-                new Object[] {serverConnection.getName(), serverConnection.getModRegion(), serverConnection.getModKey(), transId }),
+                new Object[] {serverConnection.getName(), serverConnection.getModRegion(),
+                    serverConnection.getModKey(), transId}),
                 e);
           } else {
             if (logger.isDebugEnabled()) {
               logger.debug("{}: Exception during operation on region: {} key: {} messageId: {}",
-                  serverConnection.getName(), serverConnection.getModRegion(), serverConnection.getModKey(), transId, e);
+                  serverConnection.getName(), serverConnection.getModRegion(),
+                  serverConnection.getModKey(), transId, e);
             }
           }
         } else {
@@ -426,7 +441,8 @@ public abstract class BaseCommand implements Command {
     }
   }
 
-  private static void handleThrowable(Message msg, ServerConnection serverConnection, Throwable th) {
+  private static void handleThrowable(Message msg, ServerConnection serverConnection,
+      Throwable th) {
     boolean requiresResponse = serverConnection.getTransientFlag(REQUIRES_RESPONSE);
     boolean responded = serverConnection.getTransientFlag(RESPONDED);
     boolean requiresChunkedResponse = serverConnection.getTransientFlag(REQUIRES_CHUNKED_RESPONSE);
@@ -435,8 +451,10 @@ public abstract class BaseCommand implements Command {
     try {
       try {
         if (th instanceof Error) {
-          logger.fatal(LocalizedMessage.create(
-              LocalizedStrings.BaseCommand_0_UNEXPECTED_ERROR_ON_SERVER, serverConnection.getName()), th);
+          logger.fatal(
+              LocalizedMessage.create(LocalizedStrings.BaseCommand_0_UNEXPECTED_ERROR_ON_SERVER,
+                  serverConnection.getName()),
+              th);
         }
         if (requiresResponse && !responded) {
           if (requiresChunkedResponse) {
@@ -452,7 +470,8 @@ public abstract class BaseCommand implements Command {
             int transId = msg != null ? msg.getTransactionId() : Integer.MIN_VALUE;
             logger.warn(LocalizedMessage.create(
                 LocalizedStrings.BaseCommand_0_UNEXPECTED_EXCEPTION_DURING_OPERATION_ON_REGION_1_KEY_2_MESSAGEID_3,
-                new Object[] {serverConnection.getName(), serverConnection.getModRegion(), serverConnection.getModKey(), transId }),
+                new Object[] {serverConnection.getName(), serverConnection.getModRegion(),
+                    serverConnection.getModKey(), transId}),
                 th);
           } else {
             logger.warn(LocalizedMessage.create(LocalizedStrings.BaseCommand_0_UNEXPECTED_EXCEPTION,
@@ -471,15 +490,19 @@ public abstract class BaseCommand implements Command {
     }
   }
 
-  protected static void writeChunkedException(Message origMsg, Throwable e, ServerConnection serverConnection) throws IOException {
-    writeChunkedException(origMsg, e, serverConnection, serverConnection.getChunkedResponseMessage());
+  protected static void writeChunkedException(Message origMsg, Throwable e,
+      ServerConnection serverConnection) throws IOException {
+    writeChunkedException(origMsg, e, serverConnection,
+        serverConnection.getChunkedResponseMessage());
   }
 
-  protected static void writeChunkedException(Message origMsg, Throwable e, ServerConnection serverConnection, ChunkedMessage originalResponse) throws IOException {
+  protected static void writeChunkedException(Message origMsg, Throwable e,
+      ServerConnection serverConnection, ChunkedMessage originalResponse) throws IOException {
     writeChunkedException(origMsg, e, serverConnection, originalResponse, 2);
   }
 
-  private static void writeChunkedException(Message origMsg, Throwable exception, ServerConnection serverConnection, ChunkedMessage originalResponse, int numOfParts)
+  private static void writeChunkedException(Message origMsg, Throwable exception,
+      ServerConnection serverConnection, ChunkedMessage originalResponse, int numOfParts)
       throws IOException {
     Throwable e = getClientException(serverConnection, exception);
     ChunkedMessage chunkedResponseMsg = serverConnection.getChunkedResponseMessage();
@@ -492,8 +515,8 @@ public abstract class BaseCommand implements Command {
         chunkedResponseMsg.addStringPart(getExceptionTrace(e));
       }
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Sending exception chunk while reply in progress: {}", serverConnection.getName(),
-            e.getMessage(), e);
+        logger.debug("{}: Sending exception chunk while reply in progress: {}",
+            serverConnection.getName(), e.getMessage(), e);
       }
     } else {
       chunkedResponseMsg.setMessageType(MessageType.EXCEPTION);
@@ -506,7 +529,8 @@ public abstract class BaseCommand implements Command {
         chunkedResponseMsg.addStringPart(getExceptionTrace(e));
       }
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Sending exception chunk: {}", serverConnection.getName(), e.getMessage(), e);
+        logger.debug("{}: Sending exception chunk: {}", serverConnection.getName(), e.getMessage(),
+            e);
       }
     }
     chunkedResponseMsg.sendChunk(serverConnection);
@@ -673,8 +697,8 @@ public abstract class BaseCommand implements Command {
     }
   }
 
-  static void writeQueryResponseChunk(Object queryResponseChunk, CollectionType collectionType, boolean lastChunk, ServerConnection serverConnection)
-      throws IOException {
+  static void writeQueryResponseChunk(Object queryResponseChunk, CollectionType collectionType,
+      boolean lastChunk, ServerConnection serverConnection) throws IOException {
     ChunkedMessage queryResponseMsg = serverConnection.getQueryResponseMessage();
     queryResponseMsg.setNumberOfParts(2);
     queryResponseMsg.setLastChunk(lastChunk);
@@ -683,7 +707,8 @@ public abstract class BaseCommand implements Command {
     queryResponseMsg.sendChunk(serverConnection);
   }
 
-  protected static void writeQueryResponseException(Message origMsg, Throwable exception, ServerConnection serverConnection) throws IOException {
+  protected static void writeQueryResponseException(Message origMsg, Throwable exception,
+      ServerConnection serverConnection) throws IOException {
     Throwable e = getClientException(serverConnection, exception);
     ChunkedMessage queryResponseMsg = serverConnection.getQueryResponseMessage();
     ChunkedMessage chunkedResponseMsg = serverConnection.getChunkedResponseMessage();
@@ -696,8 +721,8 @@ public abstract class BaseCommand implements Command {
       queryResponseMsg.addObjPart(e);
       queryResponseMsg.addStringPart(getExceptionTrace(e));
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Sending exception chunk while reply in progress: {}", serverConnection.getName(),
-            e.getMessage(), e);
+        logger.debug("{}: Sending exception chunk while reply in progress: {}",
+            serverConnection.getName(), e.getMessage(), e);
       }
       queryResponseMsg.sendChunk(serverConnection);
     } else {
@@ -710,7 +735,8 @@ public abstract class BaseCommand implements Command {
       chunkedResponseMsg.addObjPart(e);
       chunkedResponseMsg.addStringPart(getExceptionTrace(e));
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Sending exception chunk: {}", serverConnection.getName(), e.getMessage(), e);
+        logger.debug("{}: Sending exception chunk: {}", serverConnection.getName(), e.getMessage(),
+            e);
       }
       chunkedResponseMsg.sendChunk(serverConnection);
     }
@@ -721,7 +747,8 @@ public abstract class BaseCommand implements Command {
     // Send chunked response header identifying error message
     ChunkedMessage chunkedResponseMsg = serverConnection.getChunkedResponseMessage();
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sending error message header type: {} transaction: {}", serverConnection.getName(), messageType, origMsg.getTransactionId());
+      logger.debug("{}: Sending error message header type: {} transaction: {}",
+          serverConnection.getName(), messageType, origMsg.getTransactionId());
     }
     chunkedResponseMsg.setMessageType(messageType);
     chunkedResponseMsg.setTransactionId(origMsg.getTransactionId());
@@ -737,7 +764,8 @@ public abstract class BaseCommand implements Command {
     chunkedResponseMsg.sendChunk(serverConnection);
   }
 
-  protected static void writeFunctionResponseException(Message origMsg, int messageType, ServerConnection serverConnection, Throwable exception) throws IOException {
+  protected static void writeFunctionResponseException(Message origMsg, int messageType,
+      ServerConnection serverConnection, Throwable exception) throws IOException {
     Throwable e = getClientException(serverConnection, exception);
     ChunkedMessage functionResponseMsg = serverConnection.getFunctionResponseMessage();
     ChunkedMessage chunkedResponseMsg = serverConnection.getChunkedResponseMessage();
@@ -748,8 +776,8 @@ public abstract class BaseCommand implements Command {
       functionResponseMsg.addObjPart(e);
       functionResponseMsg.addStringPart(getExceptionTrace(e));
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Sending exception chunk while reply in progress: {}", serverConnection.getName(),
-            e.getMessage(), e);
+        logger.debug("{}: Sending exception chunk while reply in progress: {}",
+            serverConnection.getName(), e.getMessage(), e);
       }
       functionResponseMsg.sendChunk(serverConnection);
     } else {
@@ -762,7 +790,8 @@ public abstract class BaseCommand implements Command {
       chunkedResponseMsg.addObjPart(e);
       chunkedResponseMsg.addStringPart(getExceptionTrace(e));
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Sending exception chunk: {}", serverConnection.getName(), e.getMessage(), e);
+        logger.debug("{}: Sending exception chunk: {}", serverConnection.getName(), e.getMessage(),
+            e);
       }
       chunkedResponseMsg.sendChunk(serverConnection);
     }
@@ -955,14 +984,17 @@ public abstract class BaseCommand implements Command {
   /**
    * @param list is a List of entry keys
    */
-  private static void sendRegisterInterestResponseChunk(Region region, Object riKey, List list, boolean lastChunk, ServerConnection servConn) throws IOException {
+  private static void sendRegisterInterestResponseChunk(Region region, Object riKey, List list,
+      boolean lastChunk, ServerConnection servConn) throws IOException {
     ChunkedMessage chunkedResponseMsg = servConn.getRegisterInterestResponseMessage();
     chunkedResponseMsg.setNumberOfParts(1);
     chunkedResponseMsg.setLastChunk(lastChunk);
     chunkedResponseMsg.addObjPart(list, false);
     String regionName = region == null ? " null " : region.getFullPath();
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sending{}register interest response chunk for region: {} for keys: {} chunk=<{}>", servConn.getName(), lastChunk ? " last " : " ", regionName, riKey, chunkedResponseMsg);
+      logger.debug(
+          "{}: Sending{}register interest response chunk for region: {} for keys: {} chunk=<{}>",
+          servConn.getName(), lastChunk ? " last " : " ", regionName, riKey, chunkedResponseMsg);
     }
 
     chunkedResponseMsg.sendChunk(servConn);
@@ -977,7 +1009,7 @@ public abstract class BaseCommand implements Command {
   private static boolean sendTombstonesInRIResults(ServerConnection servConn,
       InterestResultPolicy policy) {
     return policy == InterestResultPolicy.KEYS_VALUES
-           && servConn.getClientVersion().compareTo(Version.GFE_80) >= 0;
+        && servConn.getClientVersion().compareTo(Version.GFE_80) >= 0;
   }
 
   /**
@@ -998,7 +1030,8 @@ public abstract class BaseCommand implements Command {
     // Handle list of keys
     if (region != null) {
       for (Object entryKey : keyList) {
-        if (region.containsKey(entryKey) || sendTombstonesInRIResults(servConn, policy) && region.containsTombstone(entryKey)) {
+        if (region.containsKey(entryKey)
+            || sendTombstonesInRIResults(servConn, policy) && region.containsTombstone(entryKey)) {
 
           appendInterestResponseKey(region, keyList, entryKey, newKeyList, servConn);
         }
@@ -1016,8 +1049,8 @@ public abstract class BaseCommand implements Command {
       justification = "Null value handled in sendNewRegisterInterestResponseChunk()")
   private static void handleKVSingleton(LocalRegion region, Object entryKey,
       boolean serializeValues, ServerConnection servConn) throws IOException {
-    VersionedObjectList values = new VersionedObjectList(MAXIMUM_CHUNK_SIZE, true, region == null || region.getAttributes().getConcurrencyChecksEnabled(),
-        serializeValues);
+    VersionedObjectList values = new VersionedObjectList(MAXIMUM_CHUNK_SIZE, true,
+        region == null || region.getAttributes().getConcurrencyChecksEnabled(), serializeValues);
 
     if (region != null) {
       if (region.containsKey(entryKey) || region.containsTombstone(entryKey)) {
@@ -1083,8 +1116,8 @@ public abstract class BaseCommand implements Command {
       return;
     }
 
-    VersionedObjectList values = new VersionedObjectList(MAXIMUM_CHUNK_SIZE, true, region == null || region.getAttributes().getConcurrencyChecksEnabled(),
-        serializeValues);
+    VersionedObjectList values = new VersionedObjectList(MAXIMUM_CHUNK_SIZE, true,
+        region == null || region.getAttributes().getConcurrencyChecksEnabled(), serializeValues);
 
     if (region != null) {
 
@@ -1190,7 +1223,7 @@ public abstract class BaseCommand implements Command {
 
   private static boolean isRemovalToken(final Object value) {
     return value == Token.REMOVED_PHASE1 || value == Token.REMOVED_PHASE2
-           || value == Token.DESTROYED || value == Token.TOMBSTONE;
+        || value == Token.DESTROYED || value == Token.TOMBSTONE;
   }
 
   public static void appendNewRegisterInterestResponseChunkFromLocal(LocalRegion region,
@@ -1207,7 +1240,8 @@ public abstract class BaseCommand implements Command {
       if (values.size() == MAXIMUM_CHUNK_SIZE) {
         // Send the chunk and clear the list
         // values.setKeys(null); // Now we need to send keys too.
-        sendNewRegisterInterestResponseChunk(region, riKeys != null ? riKeys : "ALL_KEYS", values, false, servConn);
+        sendNewRegisterInterestResponseChunk(region, riKeys != null ? riKeys : "ALL_KEYS", values,
+            false, servConn);
         values.clear();
       }
     } // for
@@ -1245,7 +1279,8 @@ public abstract class BaseCommand implements Command {
       }
       if (values.size() == MAXIMUM_CHUNK_SIZE) {
         // Send the chunk and clear the list
-        sendNewRegisterInterestResponseChunk(region, riKeys != null ? riKeys : "ALL_KEYS", values, false, servConn);
+        sendNewRegisterInterestResponseChunk(region, riKeys != null ? riKeys : "ALL_KEYS", values,
+            false, servConn);
         values.clear();
       }
     } // for
@@ -1259,9 +1294,9 @@ public abstract class BaseCommand implements Command {
     chunkedResponseMsg.addObjPart(list, false);
     String regionName = region == null ? " null " : region.getFullPath();
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sending{}register interest response chunk for region: {} for keys: {} chunk=<{}>",
-        servConn.getName(), lastChunk ? " last " : " ", regionName, riKey, chunkedResponseMsg
-        );
+      logger.debug(
+          "{}: Sending{}register interest response chunk for region: {} for keys: {} chunk=<{}>",
+          servConn.getName(), lastChunk ? " last " : " ", regionName, riKey, chunkedResponseMsg);
     }
     chunkedResponseMsg.sendChunk(servConn);
   }
@@ -1341,8 +1376,8 @@ public abstract class BaseCommand implements Command {
       handleKVKeysPR((PartitionedRegion) region, keyList, serializeValues, servConn);
       return;
     }
-    VersionedObjectList values = new VersionedObjectList(MAXIMUM_CHUNK_SIZE, true, region == null || region.getAttributes().getConcurrencyChecksEnabled(),
-        serializeValues);
+    VersionedObjectList values = new VersionedObjectList(MAXIMUM_CHUNK_SIZE, true,
+        region == null || region.getAttributes().getConcurrencyChecksEnabled(), serializeValues);
 
     // Handle list of keys
     if (region != null) {
@@ -1384,7 +1419,8 @@ public abstract class BaseCommand implements Command {
    * @param entryKey key we're responding to
    * @param list list to append to
    */
-  private static void appendInterestResponseKey(LocalRegion region, Object riKey, Object entryKey, List list, ServerConnection servConn) throws IOException {
+  private static void appendInterestResponseKey(LocalRegion region, Object riKey, Object entryKey,
+      List list, ServerConnection servConn) throws IOException {
     list.add(entryKey);
     if (logger.isDebugEnabled()) {
       logger.debug("{}: appendInterestResponseKey <{}>; list size was {}; region: {}",
@@ -1397,8 +1433,8 @@ public abstract class BaseCommand implements Command {
     }
   }
 
-  private static void appendInterestResponseKeys(LocalRegion region, Object riKey, Collection entryKeys, List collector, ServerConnection servConn)
-      throws IOException {
+  private static void appendInterestResponseKeys(LocalRegion region, Object riKey,
+      Collection entryKeys, List collector, ServerConnection servConn) throws IOException {
     for (final Object entryKey : entryKeys) {
       appendInterestResponseKey(region, riKey, entryKey, collector, servConn);
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientNotifier.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientNotifier.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientNotifier.java
index 5631184..4c19df2 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientNotifier.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientNotifier.java
@@ -125,16 +125,16 @@ public class CacheClientNotifier {
    * BridgeServer.SOCKET_BUFFER_SIZE system property.
    */
   private static final int socketBufferSize =
-    Integer.getInteger("BridgeServer.SOCKET_BUFFER_SIZE", 32768);
+      Integer.getInteger("BridgeServer.SOCKET_BUFFER_SIZE", 32768);
 
   private static final long CLIENT_PING_TASK_PERIOD =
-    Long.getLong(DistributionConfig.GEMFIRE_PREFIX + "serverToClientPingPeriod", 60000);
+      Long.getLong(DistributionConfig.GEMFIRE_PREFIX + "serverToClientPingPeriod", 60000);
 
   /**
    * package-private to avoid synthetic accessor
    */
   static final long CLIENT_PING_TASK_COUNTER =
-    Long.getLong(DistributionConfig.GEMFIRE_PREFIX + "serverToClientPingCounter", 3);
+      Long.getLong(DistributionConfig.GEMFIRE_PREFIX + "serverToClientPingCounter", 3);
 
   private static volatile CacheClientNotifier ccnSingleton;
 
@@ -189,7 +189,7 @@ public class CacheClientNotifier {
    * provide a read-only {@code Set} of listeners.
    */
   private final Set readableInterestRegistrationListeners =
-    Collections.unmodifiableSet(this.writableInterestRegistrationListeners);
+      Collections.unmodifiableSet(this.writableInterestRegistrationListeners);
 
   private final Map<String, DefaultQuery> compiledQueries = new ConcurrentHashMap<>();
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerInterestRegistrationMessage.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerInterestRegistrationMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerInterestRegistrationMessage.java
index 5860982..7118347 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerInterestRegistrationMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerInterestRegistrationMessage.java
@@ -1,18 +1,16 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
  *
- *      http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
  */
 package org.apache.geode.internal.cache.tier.sockets;
 
@@ -31,7 +29,8 @@ import org.apache.geode.distributed.internal.ReplyProcessor21;
 import org.apache.geode.internal.InternalDataSerializer;
 
 /**
- * Send interest registration to another server. Since interest registration performs a state-flush operation this message must not transmitted on an ordered socket.
+ * Send interest registration to another server. Since interest registration performs a state-flush
+ * operation this message must not transmitted on an ordered socket.
  * <p>
  * Extracted from CacheClientNotifier
  */
@@ -42,7 +41,8 @@ public class ServerInterestRegistrationMessage extends HighPriorityDistributionM
   private ClientInterestMessageImpl clientMessage;
   private int processorId;
 
-  ServerInterestRegistrationMessage(ClientProxyMembershipID clientId, ClientInterestMessageImpl clientInterestMessage) {
+  ServerInterestRegistrationMessage(ClientProxyMembershipID clientId,
+      ClientInterestMessageImpl clientInterestMessage) {
     this.clientId = clientId;
     this.clientMessage = clientInterestMessage;
   }
@@ -51,7 +51,8 @@ public class ServerInterestRegistrationMessage extends HighPriorityDistributionM
     // deserializing in fromData
   }
 
-  static void sendInterestChange(DM dm, ClientProxyMembershipID clientId, ClientInterestMessageImpl clientInterestMessage) {
+  static void sendInterestChange(DM dm, ClientProxyMembershipID clientId,
+      ClientInterestMessageImpl clientInterestMessage) {
     ServerInterestRegistrationMessage registrationMessage =
         new ServerInterestRegistrationMessage(clientId, clientInterestMessage);
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/AddPdxEnum.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/AddPdxEnum.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/AddPdxEnum.java
index fb0bd50..2cb36cd 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/AddPdxEnum.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/AddPdxEnum.java
@@ -44,7 +44,8 @@ public class AddPdxEnum extends BaseCommand {
     serverConnection.setAsTrue(REQUIRES_RESPONSE);
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received get pdx id for enum request ({} parts) from {}",
-          serverConnection.getName(), clientMessage.getNumberOfParts(), serverConnection.getSocketString());
+          serverConnection.getName(), clientMessage.getNumberOfParts(),
+          serverConnection.getSocketString());
     }
     int noOfParts = clientMessage.getNumberOfParts();
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/AddPdxType.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/AddPdxType.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/AddPdxType.java
index 10a065c..3feba0d 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/AddPdxType.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/AddPdxType.java
@@ -44,7 +44,8 @@ public class AddPdxType extends BaseCommand {
     serverConnection.setAsTrue(REQUIRES_RESPONSE);
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received get pdx id for type request ({} parts) from {}",
-          serverConnection.getName(), clientMessage.getNumberOfParts(), serverConnection.getSocketString());
+          serverConnection.getName(), clientMessage.getNumberOfParts(),
+          serverConnection.getSocketString());
     }
     int noOfParts = clientMessage.getNumberOfParts();
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ClearRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ClearRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ClearRegion.java
index c9c5a9d..ab19954 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ClearRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ClearRegion.java
@@ -78,8 +78,9 @@ public class ClearRegion extends BaseCommand {
     }
     regionName = regionNamePart.getString();
     if (logger.isDebugEnabled()) {
-      logger.debug(serverConnection.getName() + ": Received clear region request (" + clientMessage.getPayloadLength()
-                   + " bytes) from " + serverConnection.getSocketString() + " for region " + regionName);
+      logger.debug(serverConnection.getName() + ": Received clear region request ("
+          + clientMessage.getPayloadLength() + " bytes) from " + serverConnection.getSocketString()
+          + " for region " + regionName);
     }
 
     // Process the clear region request
@@ -91,7 +92,8 @@ public class ClearRegion extends BaseCommand {
           LocalizedStrings.ClearRegion_THE_INPUT_REGION_NAME_FOR_THE_CLEAR_REGION_REQUEST_IS_NULL
               .toLocalizedString();
 
-      writeErrorResponse(clientMessage, MessageType.CLEAR_REGION_DATA_ERROR, errMessage, serverConnection);
+      writeErrorResponse(clientMessage, MessageType.CLEAR_REGION_DATA_ERROR, errMessage,
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }
@@ -108,7 +110,8 @@ public class ClearRegion extends BaseCommand {
     ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
     long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
     long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-    EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
+    EventID eventId =
+        new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
     try {
       // Clear the region
@@ -141,7 +144,8 @@ public class ClearRegion extends BaseCommand {
     writeReply(clientMessage, serverConnection);
     serverConnection.setAsTrue(RESPONDED);
     if (logger.isDebugEnabled()) {
-      logger.debug(serverConnection.getName() + ": Sent clear region response for region " + regionName);
+      logger.debug(
+          serverConnection.getName() + ": Sent clear region response for region " + regionName);
     }
     stats.incWriteClearRegionResponseTime(DistributionStats.getStatTime() - start);
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ClientReady.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ClientReady.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ClientReady.java
index 053ef8a..cf9c470 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ClientReady.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ClientReady.java
@@ -35,7 +35,8 @@ public class ClientReady extends BaseCommand {
   private ClientReady() {}
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
+      throws IOException {
     CacheServerStats stats = serverConnection.getCacheServerStats();
     {
       long oldStart = start;
@@ -47,11 +48,12 @@ public class ClientReady extends BaseCommand {
       int clientPort = serverConnection.getSocketPort();
       if (logger.isDebugEnabled()) {
         logger.debug("{}: Received client ready request ({} bytes) from {} on {}:{}",
-            serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getProxyID(), clientHost,
-            clientPort);
+            serverConnection.getName(), clientMessage.getPayloadLength(),
+            serverConnection.getProxyID(), clientHost, clientPort);
       }
 
-      serverConnection.getAcceptor().getCacheClientNotifier().readyForEvents(serverConnection.getProxyID());
+      serverConnection.getAcceptor().getCacheClientNotifier()
+          .readyForEvents(serverConnection.getProxyID());
 
       long oldStart = start;
       start = DistributionStats.getStatTime();
@@ -62,7 +64,7 @@ public class ClientReady extends BaseCommand {
 
       if (logger.isDebugEnabled()) {
         logger.debug(serverConnection.getName() + ": Processed client ready request from "
-                     + serverConnection.getProxyID() + " on " + clientHost + ":" + clientPort);
+            + serverConnection.getProxyID() + " on " + clientHost + ":" + clientPort);
       }
     } finally {
       stats.incWriteClientReadyResponseTime(DistributionStats.getStatTime() - start);

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CloseConnection.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CloseConnection.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CloseConnection.java
index 378a322..21f0cad 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CloseConnection.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CloseConnection.java
@@ -39,7 +39,8 @@ public class CloseConnection extends BaseCommand {
   private CloseConnection() {}
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
+      throws IOException {
     CacheServerStats stats = serverConnection.getCacheServerStats();
     long oldStart = start;
     boolean respondToClient = serverConnection.getClientVersion().compareTo(Version.GFE_90) >= 0;
@@ -64,12 +65,12 @@ public class CloseConnection extends BaseCommand {
       byte[] keepaliveByte = keepalivePart.getSerializedForm();
       boolean keepalive = (keepaliveByte == null || keepaliveByte[0] == 0) ? false : true;
 
-      serverConnection.getAcceptor().getCacheClientNotifier().setKeepAlive(serverConnection.getProxyID(),
-          keepalive);
+      serverConnection.getAcceptor().getCacheClientNotifier()
+          .setKeepAlive(serverConnection.getProxyID(), keepalive);
 
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Processed close request from {}:{}, keepAlive: {}", serverConnection.getName(),
-            clientHost, clientPort, keepalive);
+        logger.debug("{}: Processed close request from {}:{}, keepAlive: {}",
+            serverConnection.getName(), clientHost, clientPort, keepalive);
       }
     } finally {
       if (respondToClient) {

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CommitCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CommitCommand.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CommitCommand.java
index b2bba4f..366d77c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CommitCommand.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CommitCommand.java
@@ -50,7 +50,8 @@ public class CommitCommand extends BaseCommand {
   private CommitCommand() {}
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
+      throws IOException {
     serverConnection.setAsTrue(REQUIRES_RESPONSE);
     TXManagerImpl txMgr = (TXManagerImpl) serverConnection.getCache().getCacheTransactionManager();
     InternalDistributedMember client =

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ContainsKey.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ContainsKey.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ContainsKey.java
index 50d1197..9cb2528 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ContainsKey.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ContainsKey.java
@@ -51,7 +51,8 @@ public class ContainsKey extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
+      throws IOException {
     Part regionNamePart = null;
     Part keyPart = null;
     String regionName = null;
@@ -78,7 +79,8 @@ public class ContainsKey extends BaseCommand {
     }
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received containsKey request ({} bytes) from {} for region {} key {}",
-          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName, key);
+          serverConnection.getName(), clientMessage.getPayloadLength(),
+          serverConnection.getSocketString(), regionName, key);
     }
 
     // Process the containsKey request
@@ -99,7 +101,8 @@ public class ContainsKey extends BaseCommand {
             LocalizedStrings.ContainsKey_THE_INPUT_REGION_NAME_FOR_THE_CONTAINSKEY_REQUEST_IS_NULL
                 .toLocalizedString();
       }
-      writeErrorResponse(clientMessage, MessageType.CONTAINS_KEY_DATA_ERROR, errMessage, serverConnection);
+      writeErrorResponse(clientMessage, MessageType.CONTAINS_KEY_DATA_ERROR, errMessage,
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ContainsKey66.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ContainsKey66.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ContainsKey66.java
index 53bb414..b2ce055 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ContainsKey66.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ContainsKey66.java
@@ -55,7 +55,8 @@ public class ContainsKey66 extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
+      throws IOException {
     Part regionNamePart = null, keyPart = null;
     String regionName = null;
     Object key = null;
@@ -82,7 +83,8 @@ public class ContainsKey66 extends BaseCommand {
     }
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received containsKey request ({} bytes) from {} for region {} key {}",
-          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName, key);
+          serverConnection.getName(), clientMessage.getPayloadLength(),
+          serverConnection.getSocketString(), regionName, key);
     }
 
     // Process the containsKey request
@@ -103,7 +105,8 @@ public class ContainsKey66 extends BaseCommand {
             LocalizedStrings.ContainsKey_THE_INPUT_REGION_NAME_FOR_THE_CONTAINSKEY_REQUEST_IS_NULL
                 .toLocalizedString();
       }
-      writeErrorResponse(clientMessage, MessageType.CONTAINS_KEY_DATA_ERROR, errMessage, serverConnection);
+      writeErrorResponse(clientMessage, MessageType.CONTAINS_KEY_DATA_ERROR, errMessage,
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CreateRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CreateRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CreateRegion.java
index b7ab01b..0dec596 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CreateRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CreateRegion.java
@@ -41,7 +41,8 @@ public class CreateRegion extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
+      throws IOException {
     Part regionNamePart = null;
     String regionName = null;
     serverConnection.setAsTrue(REQUIRES_RESPONSE);
@@ -59,8 +60,8 @@ public class CreateRegion extends BaseCommand {
     if (logger.isDebugEnabled()) {
       logger.debug(
           "{}: Received create region request ({} bytes) from {} for parent region {} region {}",
-          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), parentRegionName,
-          regionName);
+          serverConnection.getName(), clientMessage.getPayloadLength(),
+          serverConnection.getSocketString(), parentRegionName, regionName);
     }
 
     // Process the create region request
@@ -82,7 +83,8 @@ public class CreateRegion extends BaseCommand {
             LocalizedStrings.CreateRegion_THE_INPUT_REGION_NAME_FOR_THE_CREATE_REGION_REQUEST_IS_NULL
                 .toLocalizedString();
       }
-      writeErrorResponse(clientMessage, MessageType.CREATE_REGION_DATA_ERROR, errMessage, serverConnection);
+      writeErrorResponse(clientMessage, MessageType.CREATE_REGION_DATA_ERROR, errMessage,
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Default.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Default.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Default.java
index 359e1b4..3daaec5 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Default.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Default.java
@@ -37,14 +37,16 @@ public class Default extends BaseCommand {
   private Default() {}
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
+      throws IOException {
     // requiresResponse = true; NOT NEEDED... ALWAYS SEND ERROR RESPONSE
 
     logger.fatal(
         LocalizedMessage.create(LocalizedStrings.Default_0_UNKNOWN_MESSAGE_TYPE_1_WITH_TX_2_FROM_3,
-            new Object[] {
-              serverConnection.getName(), MessageType.getString(clientMessage.getMessageType()),
-                Integer.valueOf(clientMessage.getTransactionId()), serverConnection.getSocketString()}));
+            new Object[] {serverConnection.getName(),
+                MessageType.getString(clientMessage.getMessageType()),
+                Integer.valueOf(clientMessage.getTransactionId()),
+                serverConnection.getSocketString()}));
     writeErrorResponse(clientMessage, MessageType.UNKNOWN_MESSAGE_TYPE_ERROR, serverConnection);
     // responded = true; NOT NEEDED... ALWAYS SEND ERROR RESPONSE
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy.java
index 0699c8b..83ab394 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy.java
@@ -90,7 +90,8 @@ public class Destroy extends BaseCommand {
     }
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received destroy request ({} bytes) from {} for region {} key {}",
-          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName, key);
+          serverConnection.getName(), clientMessage.getPayloadLength(),
+          serverConnection.getSocketString(), regionName, key);
     }
 
     // Process the destroy request
@@ -110,7 +111,8 @@ public class Destroy extends BaseCommand {
             .append(LocalizedStrings.Destroy__THE_INPUT_REGION_NAME_FOR_THE_DESTROY_REQUEST_IS_NULL
                 .toLocalizedString());
       }
-      writeErrorResponse(clientMessage, MessageType.DESTROY_DATA_ERROR, errMessage.toString(), serverConnection);
+      writeErrorResponse(clientMessage, MessageType.DESTROY_DATA_ERROR, errMessage.toString(),
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }
@@ -128,7 +130,8 @@ public class Destroy extends BaseCommand {
     ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
     long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
     long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-    EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
+    EventID eventId =
+        new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
     try {
       // for integrated security
@@ -154,7 +157,7 @@ public class Destroy extends BaseCommand {
       // exception happens. Just log it and continue.
       logger.info(LocalizedMessage.create(
           LocalizedStrings.Destroy_0_DURING_ENTRY_DESTROY_NO_ENTRY_WAS_FOUND_FOR_KEY_1,
-          new Object[] { serverConnection.getName(), key}));
+          new Object[] {serverConnection.getName(), key}));
     } catch (RegionDestroyedException rde) {
       writeException(clientMessage, rde, false, serverConnection);
       serverConnection.setAsTrue(RESPONDED);
@@ -198,8 +201,8 @@ public class Destroy extends BaseCommand {
     }
     serverConnection.setAsTrue(RESPONDED);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sent destroy response for region {} key {}", serverConnection.getName(), regionName,
-          key);
+      logger.debug("{}: Sent destroy response for region {} key {}", serverConnection.getName(),
+          regionName, key);
     }
     stats.incWriteDestroyResponseTime(DistributionStats.getStatTime() - start);
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy65.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy65.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy65.java
index 0ee0fc4..3aebbb5 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy65.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy65.java
@@ -158,8 +158,9 @@ public class Destroy65 extends BaseCommand {
     if (logger.isDebugEnabled()) {
       logger.debug(
           "{}: Received destroy65 request ({} bytes; op={}) from {} for region {} key {}{} txId {}",
-          serverConnection.getName(), clientMessage.getPayloadLength(), operation, serverConnection.getSocketString(),
-          regionName, key, (operation == Operation.REMOVE ? " value=" + expectedOldValue : ""),
+          serverConnection.getName(), clientMessage.getPayloadLength(), operation,
+          serverConnection.getSocketString(), regionName, key,
+          (operation == Operation.REMOVE ? " value=" + expectedOldValue : ""),
           clientMessage.getTransactionId());
     }
     boolean entryNotFoundForRemove = false;
@@ -181,7 +182,8 @@ public class Destroy65 extends BaseCommand {
             .append(LocalizedStrings.Destroy__THE_INPUT_REGION_NAME_FOR_THE_DESTROY_REQUEST_IS_NULL
                 .toLocalizedString());
       }
-      writeErrorResponse(clientMessage, MessageType.DESTROY_DATA_ERROR, errMessage.toString(), serverConnection);
+      writeErrorResponse(clientMessage, MessageType.DESTROY_DATA_ERROR, errMessage.toString(),
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }
@@ -199,7 +201,8 @@ public class Destroy65 extends BaseCommand {
     ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
     long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
     long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-    EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
+    EventID eventId =
+        new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
     EventIDHolder clientEvent = new EventIDHolder(eventId);
 
     Breadcrumbs.setEventId(eventId);
@@ -236,7 +239,8 @@ public class Destroy65 extends BaseCommand {
         }
       }
       if (operation == null || operation == Operation.DESTROY) {
-        region.basicBridgeDestroy(key, callbackArg, serverConnection.getProxyID(), true, clientEvent);
+        region.basicBridgeDestroy(key, callbackArg, serverConnection.getProxyID(), true,
+            clientEvent);
       } else {
         // this throws exceptions if expectedOldValue checks fail
         try {
@@ -254,15 +258,15 @@ public class Destroy65 extends BaseCommand {
             }
             // try the operation anyway to ensure that it's been distributed to all servers
             try {
-              region.basicBridgeRemove(key, expectedOldValue, callbackArg, serverConnection.getProxyID(),
-                  true, clientEvent);
+              region.basicBridgeRemove(key, expectedOldValue, callbackArg,
+                  serverConnection.getProxyID(), true, clientEvent);
             } catch (EntryNotFoundException e) {
               // ignore, and don't set entryNotFoundForRemove because this was a successful
               // operation - bug #51664
             }
           } else {
-            region.basicBridgeRemove(key, expectedOldValue, callbackArg, serverConnection.getProxyID(),
-                true, clientEvent);
+            region.basicBridgeRemove(key, expectedOldValue, callbackArg,
+                serverConnection.getProxyID(), true, clientEvent);
             if (logger.isDebugEnabled()) {
               logger.debug("region.remove succeeded");
             }
@@ -281,7 +285,7 @@ public class Destroy65 extends BaseCommand {
       // exception happens. Just log it and continue.
       logger.info(LocalizedMessage.create(
           LocalizedStrings.Destroy_0_DURING_ENTRY_DESTROY_NO_ENTRY_WAS_FOUND_FOR_KEY_1,
-          new Object[] { serverConnection.getName(), key}));
+          new Object[] {serverConnection.getName(), key}));
       entryNotFoundForRemove = true;
     } catch (RegionDestroyedException rde) {
       writeException(clientMessage, rde, false, serverConnection);
@@ -318,17 +322,19 @@ public class Destroy65 extends BaseCommand {
             pr.getNetworkHopType(), clientEvent.getVersionTag());
         pr.clearNetworkHopData();
       } else {
-        writeReply(clientMessage, serverConnection, entryNotFoundForRemove | clientEvent.getIsRedestroyedEntry(),
+        writeReply(clientMessage, serverConnection,
+            entryNotFoundForRemove | clientEvent.getIsRedestroyedEntry(),
             clientEvent.getVersionTag());
       }
     } else {
-      writeReply(clientMessage, serverConnection, entryNotFoundForRemove | clientEvent.getIsRedestroyedEntry(),
+      writeReply(clientMessage, serverConnection,
+          entryNotFoundForRemove | clientEvent.getIsRedestroyedEntry(),
           clientEvent.getVersionTag());
     }
     serverConnection.setAsTrue(RESPONDED);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sent destroy response for region {} key {}", serverConnection.getName(), regionName,
-          key);
+      logger.debug("{}: Sent destroy response for region {} key {}", serverConnection.getName(),
+          regionName, key);
     }
     stats.incWriteDestroyResponseTime(DistributionStats.getStatTime() - start);
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/DestroyRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/DestroyRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/DestroyRegion.java
index de37040..886994e 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/DestroyRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/DestroyRegion.java
@@ -89,7 +89,8 @@ public class DestroyRegion extends BaseCommand {
     regionName = regionNamePart.getString();
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received destroy region request ({} bytes) from {} for region {}",
-          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName);
+          serverConnection.getName(), clientMessage.getPayloadLength(),
+          serverConnection.getSocketString(), regionName);
     }
 
     // Process the destroy region request
@@ -101,7 +102,8 @@ public class DestroyRegion extends BaseCommand {
           LocalizedStrings.DestroyRegion__THE_INPUT_REGION_NAME_FOR_THE_DESTROY_REGION_REQUEST_IS_NULL
               .toLocalizedString());
 
-      writeErrorResponse(clientMessage, MessageType.DESTROY_REGION_DATA_ERROR, errMessage.toString(), serverConnection);
+      writeErrorResponse(clientMessage, MessageType.DESTROY_REGION_DATA_ERROR,
+          errMessage.toString(), serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }
@@ -120,7 +122,8 @@ public class DestroyRegion extends BaseCommand {
     ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
     long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
     long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-    EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
+    EventID eventId =
+        new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
     try {
       // user needs to have data:manage on all regions in order to destory a particular region
@@ -138,7 +141,7 @@ public class DestroyRegion extends BaseCommand {
     } catch (DistributedSystemDisconnectedException e) {
       // FIXME better exception hierarchy would avoid this check
       if (serverConnection.getCachedRegionHelper().getCache().getCancelCriterion()
-                          .cancelInProgress() != null) {
+          .cancelInProgress() != null) {
         if (logger.isDebugEnabled()) {
           logger.debug(
               "{} ignoring message of type {} from client {} because shutdown occurred during message processing.",

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction.java
index 1d0fc8c..c146d27 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction.java
@@ -64,7 +64,8 @@ public class ExecuteFunction extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start)
+      throws IOException {
     Object function = null;
     Object args = null;
     MemberMappedArgument memberMappedArg = null;

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction65.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction65.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction65.java
index 4a0a993..23d5b18 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction65.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction65.java
@@ -63,7 +63,8 @@ public class ExecuteFunction65 extends BaseCommand {
   ExecuteFunction65() {}
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start)
+      throws IOException {
     Object function = null;
     Object args = null;
     MemberMappedArgument memberMappedArg = null;

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction66.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction66.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction66.java
index f4aac7a..46302bc 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction66.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction66.java
@@ -87,7 +87,8 @@ public class ExecuteFunction66 extends BaseCommand {
   ExecuteFunction66() {}
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start)
+      throws IOException {
     Object function = null;
     Object args = null;
     MemberMappedArgument memberMappedArg = null;

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction70.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction70.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction70.java
index bf5f70b..09fe20e 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction70.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction70.java
@@ -51,7 +51,8 @@ public class ExecuteFunction70 extends ExecuteFunction66 {
   private ExecuteFunction70() {}
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
+      throws IOException {
     super.cmdExecute(clientMessage, serverConnection, start);
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction.java
index 3147fe5..82a378d 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction.java
@@ -63,7 +63,8 @@ public class ExecuteRegionFunction extends BaseCommand {
   private ExecuteRegionFunction() {}
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start)
+      throws IOException {
     String regionName = null;
     Object function = null;
     Object args = null;

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction61.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction61.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction61.java
index 73ab621..12919d0 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction61.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction61.java
@@ -63,7 +63,8 @@ public class ExecuteRegionFunction61 extends BaseCommand {
   private ExecuteRegionFunction61() {}
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start)
+      throws IOException {
     String regionName = null;
     Object function = null;
     Object args = null;

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction65.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction65.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction65.java
index f1540f9..3be907b 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction65.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction65.java
@@ -60,7 +60,8 @@ public class ExecuteRegionFunction65 extends BaseCommand {
   private ExecuteRegionFunction65() {}
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start)
+      throws IOException {
     String regionName = null;
     Object function = null;
     Object args = null;

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction66.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction66.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction66.java
index 25d69d6..674082c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction66.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction66.java
@@ -64,7 +64,8 @@ public class ExecuteRegionFunction66 extends BaseCommand {
   private ExecuteRegionFunction66() {}
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start)
+      throws IOException {
     String regionName = null;
     Object function = null;
     Object args = null;

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionSingleHop.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionSingleHop.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionSingleHop.java
index 725f03b..cf96137 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionSingleHop.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionSingleHop.java
@@ -62,7 +62,8 @@ public class ExecuteRegionFunctionSingleHop extends BaseCommand {
   private ExecuteRegionFunctionSingleHop() {}
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start)
+      throws IOException {
 
     String regionName = null;
     Object function = null;


[24/43] geode git commit: Cleanup BaseCommand

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/DestroyRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/DestroyRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/DestroyRegion.java
index 13fef02..de37040 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/DestroyRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/DestroyRegion.java
@@ -45,15 +45,15 @@ public class DestroyRegion extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
     Part regionNamePart = null, callbackArgPart = null;
     String regionName = null;
     Object callbackArg = null;
     Part eventPart = null;
     StringBuffer errMessage = new StringBuffer();
-    CacheServerStats stats = servConn.getCacheServerStats();
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    CacheServerStats stats = serverConnection.getCacheServerStats();
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
 
     {
       long oldStart = start;
@@ -61,11 +61,11 @@ public class DestroyRegion extends BaseCommand {
       stats.incReadDestroyRegionRequestTime(start - oldStart);
     }
     // Retrieve the data from the message parts
-    regionNamePart = msg.getPart(0);
-    eventPart = msg.getPart(1);
+    regionNamePart = clientMessage.getPart(0);
+    eventPart = clientMessage.getPart(1);
     // callbackArgPart = null; (redundant assignment)
-    if (msg.getNumberOfParts() > 2) {
-      callbackArgPart = msg.getPart(2);
+    if (clientMessage.getNumberOfParts() > 2) {
+      callbackArgPart = clientMessage.getPart(2);
       try {
         callbackArg = callbackArgPart.getObject();
       } catch (DistributedSystemDisconnectedException se) {
@@ -73,47 +73,46 @@ public class DestroyRegion extends BaseCommand {
         if (logger.isDebugEnabled()) {
           logger.debug(
               "{} ignoring message of type {} from client {} because shutdown occurred during message processing.",
-              servConn.getName(), MessageType.getString(msg.getMessageType()),
-              servConn.getProxyID());
+              serverConnection.getName(), MessageType.getString(clientMessage.getMessageType()),
+              serverConnection.getProxyID());
         }
 
-        servConn.setFlagProcessMessagesAsFalse();
-        servConn.setClientDisconnectedException(se);
+        serverConnection.setFlagProcessMessagesAsFalse();
+        serverConnection.setClientDisconnectedException(se);
         return;
       } catch (Exception e) {
-        writeException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeException(clientMessage, e, false, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
     }
     regionName = regionNamePart.getString();
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received destroy region request ({} bytes) from {} for region {}",
-          servConn.getName(), msg.getPayloadLength(), servConn.getSocketString(), regionName);
+          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName);
     }
 
     // Process the destroy region request
     if (regionName == null) {
       logger.warn(LocalizedMessage.create(
           LocalizedStrings.DestroyRegion_0_THE_INPUT_REGION_NAME_FOR_THE_DESTROY_REGION_REQUEST_IS_NULL,
-          servConn.getName()));
+          serverConnection.getName()));
       errMessage.append(
           LocalizedStrings.DestroyRegion__THE_INPUT_REGION_NAME_FOR_THE_DESTROY_REGION_REQUEST_IS_NULL
               .toLocalizedString());
 
-      writeErrorResponse(msg, MessageType.DESTROY_REGION_DATA_ERROR, errMessage.toString(),
-          servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.DESTROY_REGION_DATA_ERROR, errMessage.toString(), serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    LocalRegion region = (LocalRegion) servConn.getCache().getRegion(regionName);
+    LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
     if (region == null) {
       String reason =
           LocalizedStrings.DestroyRegion_REGION_WAS_NOT_FOUND_DURING_DESTROY_REGION_REQUEST
               .toLocalizedString();
-      writeRegionDestroyedEx(msg, regionName, reason, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -121,45 +120,45 @@ public class DestroyRegion extends BaseCommand {
     ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
     long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
     long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-    EventID eventId = new EventID(servConn.getEventMemberIDByteArray(), threadId, sequenceId);
+    EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
     try {
       // user needs to have data:manage on all regions in order to destory a particular region
       this.securityService.authorizeDataManage();
 
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         RegionDestroyOperationContext destroyContext =
             authzRequest.destroyRegionAuthorize(regionName, callbackArg);
         callbackArg = destroyContext.getCallbackArg();
       }
       // region.destroyRegion(callbackArg);
-      region.basicBridgeDestroyRegion(callbackArg, servConn.getProxyID(),
+      region.basicBridgeDestroyRegion(callbackArg, serverConnection.getProxyID(),
           true /* boolean from cache Client */, eventId);
     } catch (DistributedSystemDisconnectedException e) {
       // FIXME better exception hierarchy would avoid this check
-      if (servConn.getCachedRegionHelper().getCache().getCancelCriterion()
-          .cancelInProgress() != null) {
+      if (serverConnection.getCachedRegionHelper().getCache().getCancelCriterion()
+                          .cancelInProgress() != null) {
         if (logger.isDebugEnabled()) {
           logger.debug(
               "{} ignoring message of type {} from client {} because shutdown occurred during message processing.",
-              servConn.getName(), MessageType.getString(msg.getMessageType()),
-              servConn.getProxyID());
+              serverConnection.getName(), MessageType.getString(clientMessage.getMessageType()),
+              serverConnection.getProxyID());
         }
-        servConn.setFlagProcessMessagesAsFalse();
-        servConn.setClientDisconnectedException(e);
+        serverConnection.setFlagProcessMessagesAsFalse();
+        serverConnection.setClientDisconnectedException(e);
       } else {
-        writeException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeException(clientMessage, e, false, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
       }
       return;
     } catch (Exception e) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, e);
+      checkForInterrupt(serverConnection, e);
 
       // Otherwise, write an exception message and continue
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -169,10 +168,10 @@ public class DestroyRegion extends BaseCommand {
       start = DistributionStats.getStatTime();
       stats.incProcessDestroyRegionTime(start - oldStart);
     }
-    writeReply(msg, servConn);
-    servConn.setAsTrue(RESPONDED);
+    writeReply(clientMessage, serverConnection);
+    serverConnection.setAsTrue(RESPONDED);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sent destroy region response for region {}", servConn.getName(),
+      logger.debug("{}: Sent destroy region response for region {}", serverConnection.getName(),
           regionName);
     }
     stats.incWriteDestroyRegionResponseTime(DistributionStats.getStatTime() - start);

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction.java
index e63ac22..1d0fc8c 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction.java
@@ -64,21 +64,21 @@ public class ExecuteFunction extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start) throws IOException {
     Object function = null;
     Object args = null;
     MemberMappedArgument memberMappedArg = null;
     byte hasResult = 0;
     try {
-      hasResult = msg.getPart(0).getSerializedForm()[0];
+      hasResult = clientMessage.getPart(0).getSerializedForm()[0];
       if (hasResult == 1) {
         servConn.setAsTrue(REQUIRES_RESPONSE);
         servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
       }
-      function = msg.getPart(1).getStringOrObject();
-      args = msg.getPart(2).getObject();
+      function = clientMessage.getPart(1).getStringOrObject();
+      args = clientMessage.getPart(2).getObject();
 
-      Part part = msg.getPart(3);
+      Part part = clientMessage.getPart(3);
       if (part != null) {
         memberMappedArg = (MemberMappedArgument) part.getObject();
       }
@@ -87,7 +87,7 @@ public class ExecuteFunction extends BaseCommand {
           LocalizedStrings.ExecuteFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
           function), exception);
       if (hasResult == 1) {
-        writeChunkedException(msg, exception, false, servConn);
+        writeChunkedException(clientMessage, exception, servConn);
         servConn.setAsTrue(RESPONDED);
         return;
       }
@@ -97,7 +97,7 @@ public class ExecuteFunction extends BaseCommand {
           LocalizedStrings.ExecuteFunction_THE_INPUT_FUNCTION_FOR_THE_EXECUTE_FUNCTION_REQUEST_IS_NULL
               .toLocalizedString();
       logger.warn("{}: {}", servConn.getName(), message);
-      sendError(hasResult, msg, message, servConn);
+      sendError(hasResult, clientMessage, message, servConn);
       return;
     }
 
@@ -110,7 +110,7 @@ public class ExecuteFunction extends BaseCommand {
           final String message = LocalizedStrings.ExecuteFunction_FUNCTION_NAMED_0_IS_NOT_REGISTERED
               .toLocalizedString(function);
           logger.warn("{}: {}", servConn.getName(), message);
-          sendError(hasResult, msg, message, servConn);
+          sendError(hasResult, clientMessage, message, servConn);
           return;
         }
       } else {
@@ -129,7 +129,7 @@ public class ExecuteFunction extends BaseCommand {
             args, functionObject.optimizeForWrite());
       }
       ChunkedMessage m = servConn.getFunctionResponseMessage();
-      m.setTransactionId(msg.getTransactionId());
+      m.setTransactionId(clientMessage.getTransactionId());
       ResultSender resultSender = new ServerToClientFunctionResultSender(m,
           MessageType.EXECUTE_FUNCTION_RESULT, servConn, functionObject, executeContext);
 
@@ -182,7 +182,7 @@ public class ExecuteFunction extends BaseCommand {
           function), ioException);
       String message =
           LocalizedStrings.ExecuteFunction_SERVER_COULD_NOT_SEND_THE_REPLY.toLocalizedString();
-      sendException(hasResult, msg, message, servConn, ioException);
+      sendException(hasResult, clientMessage, message, servConn, ioException);
     } catch (InternalFunctionInvocationTargetException internalfunctionException) {
       // Fix for #44709: User should not be aware of
       // InternalFunctionInvocationTargetException. No instance of
@@ -200,20 +200,20 @@ public class ExecuteFunction extends BaseCommand {
             new Object[] {function}), internalfunctionException);
       }
       final String message = internalfunctionException.getMessage();
-      sendException(hasResult, msg, message, servConn, internalfunctionException);
+      sendException(hasResult, clientMessage, message, servConn, internalfunctionException);
     } catch (Exception e) {
       logger.warn(LocalizedMessage.create(
           LocalizedStrings.ExecuteFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
           function), e);
       final String message = e.getMessage();
-      sendException(hasResult, msg, message, servConn, e);
+      sendException(hasResult, clientMessage, message, servConn, e);
     }
   }
 
   private void sendException(byte hasResult, Message msg, String message, ServerConnection servConn,
       Throwable e) throws IOException {
     if (hasResult == 1) {
-      writeFunctionResponseException(msg, MessageType.EXCEPTION, message, servConn, e);
+      writeFunctionResponseException(msg, MessageType.EXCEPTION, servConn, e);
       servConn.setAsTrue(RESPONDED);
     }
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction65.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction65.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction65.java
index 8fafd10..4a0a993 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction65.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction65.java
@@ -63,7 +63,7 @@ public class ExecuteFunction65 extends BaseCommand {
   ExecuteFunction65() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start) throws IOException {
     Object function = null;
     Object args = null;
     MemberMappedArgument memberMappedArg = null;
@@ -71,7 +71,7 @@ public class ExecuteFunction65 extends BaseCommand {
     byte functionState = 0;
     boolean isReexecute = false;
     try {
-      functionState = msg.getPart(0).getSerializedForm()[0];
+      functionState = clientMessage.getPart(0).getSerializedForm()[0];
 
       if (functionState == AbstractExecution.HA_HASRESULT_NO_OPTIMIZEFORWRITE_REEXECUTE) {
         functionState = AbstractExecution.HA_HASRESULT_NO_OPTIMIZEFORWRITE;
@@ -90,10 +90,10 @@ public class ExecuteFunction65 extends BaseCommand {
         servConn.setAsTrue(REQUIRES_RESPONSE);
         servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
       }
-      function = msg.getPart(1).getStringOrObject();
-      args = msg.getPart(2).getObject();
+      function = clientMessage.getPart(1).getStringOrObject();
+      args = clientMessage.getPart(2).getObject();
 
-      Part part = msg.getPart(3);
+      Part part = clientMessage.getPart(3);
       if (part != null) {
         memberMappedArg = (MemberMappedArgument) part.getObject();
       }
@@ -102,7 +102,7 @@ public class ExecuteFunction65 extends BaseCommand {
           LocalizedStrings.ExecuteFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
           function), exception);
       if (hasResult == 1) {
-        writeChunkedException(msg, exception, false, servConn);
+        writeChunkedException(clientMessage, exception, servConn);
         servConn.setAsTrue(RESPONDED);
         return;
       }
@@ -112,7 +112,7 @@ public class ExecuteFunction65 extends BaseCommand {
           LocalizedStrings.ExecuteFunction_THE_INPUT_FUNCTION_FOR_THE_EXECUTE_FUNCTION_REQUEST_IS_NULL
               .toLocalizedString();
       logger.warn("{}: {}", servConn.getName(), message);
-      sendError(hasResult, msg, message, servConn);
+      sendError(hasResult, clientMessage, message, servConn);
       return;
     }
 
@@ -125,7 +125,7 @@ public class ExecuteFunction65 extends BaseCommand {
           final String message = LocalizedStrings.ExecuteFunction_FUNCTION_NAMED_0_IS_NOT_REGISTERED
               .toLocalizedString(function);
           logger.warn("{}: {}", servConn.getName(), message);
-          sendError(hasResult, msg, message, servConn);
+          sendError(hasResult, clientMessage, message, servConn);
           return;
         } else {
           byte functionStateOnServerSide = AbstractExecution.getFunctionState(functionObject.isHA(),
@@ -139,7 +139,7 @@ public class ExecuteFunction65 extends BaseCommand {
                 LocalizedStrings.FunctionService_FUNCTION_ATTRIBUTE_MISMATCH_CLIENT_SERVER
                     .toLocalizedString(function);
             logger.warn("{}: {}", servConn.getName(), message);
-            sendError(hasResult, msg, message, servConn);
+            sendError(hasResult, clientMessage, message, servConn);
             return;
           }
         }
@@ -159,7 +159,7 @@ public class ExecuteFunction65 extends BaseCommand {
             args, functionObject.optimizeForWrite());
       }
       ChunkedMessage m = servConn.getFunctionResponseMessage();
-      m.setTransactionId(msg.getTransactionId());
+      m.setTransactionId(clientMessage.getTransactionId());
       ResultSender resultSender = new ServerToClientFunctionResultSender65(m,
           MessageType.EXECUTE_FUNCTION_RESULT, servConn, functionObject, executeContext);
 
@@ -193,7 +193,7 @@ public class ExecuteFunction65 extends BaseCommand {
                   .toLocalizedString(new Object[] {functionObject.getId(), sm}),
               sm);
 
-          sendException(hasResult, msg, e.getMessage(), servConn, e);
+          sendException(hasResult, clientMessage, e.getMessage(), servConn, e);
           return;
         }
         functionObject.execute(context);
@@ -220,7 +220,7 @@ public class ExecuteFunction65 extends BaseCommand {
           function), ioException);
       String message =
           LocalizedStrings.ExecuteFunction_SERVER_COULD_NOT_SEND_THE_REPLY.toLocalizedString();
-      sendException(hasResult, msg, message, servConn, ioException);
+      sendException(hasResult, clientMessage, message, servConn, ioException);
     } catch (InternalFunctionInvocationTargetException internalfunctionException) {
       // Fix for #44709: User should not be aware of
       // InternalFunctionInvocationTargetException. No instance of
@@ -236,20 +236,20 @@ public class ExecuteFunction65 extends BaseCommand {
             new Object[] {function}), internalfunctionException);
       }
       final String message = internalfunctionException.getMessage();
-      sendException(hasResult, msg, message, servConn, internalfunctionException);
+      sendException(hasResult, clientMessage, message, servConn, internalfunctionException);
     } catch (Exception e) {
       logger.warn(LocalizedMessage.create(
           LocalizedStrings.ExecuteFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
           function), e);
       final String message = e.getMessage();
-      sendException(hasResult, msg, message, servConn, e);
+      sendException(hasResult, clientMessage, message, servConn, e);
     }
   }
 
   private void sendException(byte hasResult, Message msg, String message, ServerConnection servConn,
       Throwable e) throws IOException {
     if (hasResult == 1) {
-      writeFunctionResponseException(msg, MessageType.EXCEPTION, message, servConn, e);
+      writeFunctionResponseException(msg, MessageType.EXCEPTION, servConn, e);
       servConn.setAsTrue(RESPONDED);
     }
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction66.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction66.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction66.java
index d007777..f4aac7a 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction66.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction66.java
@@ -87,7 +87,7 @@ public class ExecuteFunction66 extends BaseCommand {
   ExecuteFunction66() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start) throws IOException {
     Object function = null;
     Object args = null;
     MemberMappedArgument memberMappedArg = null;
@@ -99,7 +99,7 @@ public class ExecuteFunction66 extends BaseCommand {
     boolean ignoreFailedMembers = false;
     int functionTimeout = ConnectionImpl.DEFAULT_CLIENT_FUNCTION_TIMEOUT;
     try {
-      byte[] bytes = msg.getPart(0).getSerializedForm();
+      byte[] bytes = clientMessage.getPart(0).getSerializedForm();
       functionState = bytes[0];
       if (bytes.length >= 5
           && servConn.getClientVersion().ordinal() >= Version.GFE_8009.ordinal()) {
@@ -123,25 +123,25 @@ public class ExecuteFunction66 extends BaseCommand {
         servConn.setAsTrue(REQUIRES_RESPONSE);
         servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
       }
-      function = msg.getPart(1).getStringOrObject();
-      args = msg.getPart(2).getObject();
+      function = clientMessage.getPart(1).getStringOrObject();
+      args = clientMessage.getPart(2).getObject();
 
-      Part part = msg.getPart(3);
+      Part part = clientMessage.getPart(3);
       if (part != null) {
         memberMappedArg = (MemberMappedArgument) part.getObject();
       }
 
-      groups = getGroups(msg);
-      allMembers = getAllMembers(msg);
-      ignoreFailedMembers = getIgnoreFailedMembers(msg);
+      groups = getGroups(clientMessage);
+      allMembers = getAllMembers(clientMessage);
+      ignoreFailedMembers = getIgnoreFailedMembers(clientMessage);
     } catch (ClassNotFoundException exception) {
       logger.warn(LocalizedMessage.create(
           LocalizedStrings.ExecuteFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
           function), exception);
       if (hasResult == 1) {
-        writeChunkedException(msg, exception, false, servConn);
+        writeChunkedException(clientMessage, exception, servConn);
       } else {
-        writeException(msg, exception, false, servConn);
+        writeException(clientMessage, exception, false, servConn);
       }
       servConn.setAsTrue(RESPONDED);
       return;
@@ -153,7 +153,7 @@ public class ExecuteFunction66 extends BaseCommand {
               .toLocalizedString();
       logger.warn(LocalizedMessage.create(LocalizedStrings.TWO_ARG_COLON,
           new Object[] {servConn.getName(), message}));
-      sendError(hasResult, msg, message, servConn);
+      sendError(hasResult, clientMessage, message, servConn);
       return;
     }
 
@@ -166,7 +166,7 @@ public class ExecuteFunction66 extends BaseCommand {
           final String message = LocalizedStrings.ExecuteFunction_FUNCTION_NAMED_0_IS_NOT_REGISTERED
               .toLocalizedString(function);
           logger.warn("{}: {}", servConn.getName(), message);
-          sendError(hasResult, msg, message, servConn);
+          sendError(hasResult, clientMessage, message, servConn);
           return;
         } else {
           byte functionStateOnServerSide = AbstractExecution.getFunctionState(functionObject.isHA(),
@@ -180,7 +180,7 @@ public class ExecuteFunction66 extends BaseCommand {
                 LocalizedStrings.FunctionService_FUNCTION_ATTRIBUTE_MISMATCH_CLIENT_SERVER
                     .toLocalizedString(function);
             logger.warn("{}: {}", servConn.getName(), message);
-            sendError(hasResult, msg, message, servConn);
+            sendError(hasResult, clientMessage, message, servConn);
             return;
           }
         }
@@ -200,7 +200,7 @@ public class ExecuteFunction66 extends BaseCommand {
             args, functionObject.optimizeForWrite());
       }
       ChunkedMessage m = servConn.getFunctionResponseMessage();
-      m.setTransactionId(msg.getTransactionId());
+      m.setTransactionId(clientMessage.getTransactionId());
       ServerToClientFunctionResultSender resultSender = new ServerToClientFunctionResultSender65(m,
           MessageType.EXECUTE_FUNCTION_RESULT, servConn, functionObject, executeContext);
 
@@ -232,7 +232,7 @@ public class ExecuteFunction66 extends BaseCommand {
                   .toLocalizedString(new Object[] {functionObject.getId(), sm}),
               sm);
 
-          sendException(hasResult, msg, e.getMessage(), servConn, e);
+          sendException(hasResult, clientMessage, e.getMessage(), servConn, e);
           return;
         }
         /**
@@ -252,7 +252,7 @@ public class ExecuteFunction66 extends BaseCommand {
         }
 
         if (!functionObject.hasResult()) {
-          writeReply(msg, servConn);
+          writeReply(clientMessage, servConn);
         }
       } catch (FunctionException functionException) {
         stats.endFunctionExecutionWithException(functionObject.hasResult());
@@ -269,7 +269,7 @@ public class ExecuteFunction66 extends BaseCommand {
           function), ioException);
       String message =
           LocalizedStrings.ExecuteFunction_SERVER_COULD_NOT_SEND_THE_REPLY.toLocalizedString();
-      sendException(hasResult, msg, message, servConn, ioException);
+      sendException(hasResult, clientMessage, message, servConn, ioException);
     } catch (InternalFunctionInvocationTargetException internalfunctionException) {
       // Fix for #44709: User should not be aware of
       // InternalFunctionInvocationTargetException. No instance of
@@ -287,13 +287,13 @@ public class ExecuteFunction66 extends BaseCommand {
             new Object[] {function}), internalfunctionException);
       }
       final String message = internalfunctionException.getMessage();
-      sendException(hasResult, msg, message, servConn, internalfunctionException);
+      sendException(hasResult, clientMessage, message, servConn, internalfunctionException);
     } catch (Exception e) {
       logger.warn(LocalizedMessage.create(
           LocalizedStrings.ExecuteFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
           function), e);
       final String message = e.getMessage();
-      sendException(hasResult, msg, message, servConn, e);
+      sendException(hasResult, clientMessage, message, servConn, e);
     }
   }
 
@@ -399,7 +399,7 @@ public class ExecuteFunction66 extends BaseCommand {
   private void sendException(byte hasResult, Message msg, String message, ServerConnection servConn,
       Throwable e) throws IOException {
     if (hasResult == 1) {
-      writeFunctionResponseException(msg, MessageType.EXCEPTION, message, servConn, e);
+      writeFunctionResponseException(msg, MessageType.EXCEPTION, servConn, e);
     } else {
       writeException(msg, e, false, servConn);
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction70.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction70.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction70.java
index 957b423..bf5f70b 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction70.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteFunction70.java
@@ -51,8 +51,8 @@ public class ExecuteFunction70 extends ExecuteFunction66 {
   private ExecuteFunction70() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
-    super.cmdExecute(msg, servConn, start);
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+    super.cmdExecute(clientMessage, serverConnection, start);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction.java
index 0007dab..3147fe5 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction.java
@@ -63,7 +63,7 @@ public class ExecuteRegionFunction extends BaseCommand {
   private ExecuteRegionFunction() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start) throws IOException {
     String regionName = null;
     Object function = null;
     Object args = null;
@@ -73,27 +73,27 @@ public class ExecuteRegionFunction extends BaseCommand {
     int filterSize = 0, partNumber = 0;
     CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
     try {
-      hasResult = msg.getPart(0).getSerializedForm()[0];
+      hasResult = clientMessage.getPart(0).getSerializedForm()[0];
       if (hasResult == 1) {
         servConn.setAsTrue(REQUIRES_RESPONSE);
         servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
       }
-      regionName = msg.getPart(1).getString();
-      function = msg.getPart(2).getStringOrObject();
-      args = msg.getPart(3).getObject();
-      Part part = msg.getPart(4);
+      regionName = clientMessage.getPart(1).getString();
+      function = clientMessage.getPart(2).getStringOrObject();
+      args = clientMessage.getPart(3).getObject();
+      Part part = clientMessage.getPart(4);
       if (part != null) {
         Object obj = part.getObject();
         if (obj instanceof MemberMappedArgument) {
           memberMappedArg = (MemberMappedArgument) obj;
         }
       }
-      filterSize = msg.getPart(5).getInt();
+      filterSize = clientMessage.getPart(5).getInt();
       if (filterSize != 0) {
         filter = new HashSet();
         partNumber = 6;
         for (int i = 0; i < filterSize; i++) {
-          filter.add(msg.getPart(partNumber + i).getStringOrObject());
+          filter.add(clientMessage.getPart(partNumber + i).getStringOrObject());
         }
       }
 
@@ -102,7 +102,7 @@ public class ExecuteRegionFunction extends BaseCommand {
           LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
           function), exception);
       if (hasResult == 1) {
-        writeChunkedException(msg, exception, false, servConn);
+        writeChunkedException(clientMessage, exception, servConn);
         servConn.setAsTrue(RESPONDED);
         return;
       }
@@ -120,7 +120,7 @@ public class ExecuteRegionFunction extends BaseCommand {
                 .toLocalizedString("region");
       }
       logger.warn("{}: {}", servConn.getName(), message);
-      sendError(hasResult, msg, message, servConn);
+      sendError(hasResult, clientMessage, message, servConn);
       return;
     }
 
@@ -130,7 +130,7 @@ public class ExecuteRegionFunction extends BaseCommand {
           LocalizedStrings.ExecuteRegionFunction_THE_REGION_NAMED_0_WAS_NOT_FOUND_DURING_EXECUTE_FUNCTION_REQUEST
               .toLocalizedString(regionName);
       logger.warn("{}: {}", servConn.getName(), message);
-      sendError(hasResult, msg, message, servConn);
+      sendError(hasResult, clientMessage, message, servConn);
       return;
     }
 
@@ -147,7 +147,7 @@ public class ExecuteRegionFunction extends BaseCommand {
               LocalizedStrings.ExecuteRegionFunction_THE_FUNCTION_0_HAS_NOT_BEEN_REGISTERED
                   .toLocalizedString(function);
           logger.warn("{}: {}", servConn.getName(), message);
-          sendError(hasResult, msg, message, servConn);
+          sendError(hasResult, clientMessage, message, servConn);
           return;
         }
       } else {
@@ -169,7 +169,7 @@ public class ExecuteRegionFunction extends BaseCommand {
       // Construct execution
       AbstractExecution execution = (AbstractExecution) FunctionService.onRegion(region);
       ChunkedMessage m = servConn.getFunctionResponseMessage();
-      m.setTransactionId(msg.getTransactionId());
+      m.setTransactionId(clientMessage.getTransactionId());
       resultSender = new ServerToClientFunctionResultSender(m,
           MessageType.EXECUTE_REGION_FUNCTION_RESULT, servConn, functionObject, executeContext);
 
@@ -204,7 +204,7 @@ public class ExecuteRegionFunction extends BaseCommand {
           function), ioe);
       final String message = LocalizedStrings.ExecuteRegionFunction_SERVER_COULD_NOT_SEND_THE_REPLY
           .toLocalizedString();
-      sendException(hasResult, msg, message, servConn, ioe);
+      sendException(hasResult, clientMessage, message, servConn, ioe);
     } catch (InternalFunctionInvocationTargetException internalfunctionException) {
       // Fix for #44709: User should not be aware of
       // InternalFunctionInvocationTargetException. No instance of
@@ -222,20 +222,20 @@ public class ExecuteRegionFunction extends BaseCommand {
             new Object[] {function}), internalfunctionException);
       }
       final String message = internalfunctionException.getMessage();
-      sendException(hasResult, msg, message, servConn, internalfunctionException);
+      sendException(hasResult, clientMessage, message, servConn, internalfunctionException);
     } catch (FunctionException fe) {
       logger.warn(LocalizedMessage.create(
           LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
           function), fe);
       String message = fe.getMessage();
 
-      sendException(hasResult, msg, message, servConn, fe);
+      sendException(hasResult, clientMessage, message, servConn, fe);
     } catch (Exception e) {
       logger.warn(LocalizedMessage.create(
           LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
           function), e);
       String message = e.getMessage();
-      sendException(hasResult, msg, message, servConn, e);
+      sendException(hasResult, clientMessage, message, servConn, e);
     } finally {
       handShake.setClientReadTimeout(earlierClientReadTimeout);
     }
@@ -245,7 +245,7 @@ public class ExecuteRegionFunction extends BaseCommand {
       Throwable e) throws IOException {
     synchronized (msg) {
       if (hasResult == 1) {
-        writeFunctionResponseException(msg, MessageType.EXCEPTION, message, servConn, e);
+        writeFunctionResponseException(msg, MessageType.EXCEPTION, servConn, e);
         servConn.setAsTrue(RESPONDED);
       }
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction61.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction61.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction61.java
index b1d3d4e..73ab621 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction61.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction61.java
@@ -63,7 +63,7 @@ public class ExecuteRegionFunction61 extends BaseCommand {
   private ExecuteRegionFunction61() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start) throws IOException {
     String regionName = null;
     Object function = null;
     Object args = null;
@@ -76,40 +76,40 @@ public class ExecuteRegionFunction61 extends BaseCommand {
     int filterSize = 0, partNumber = 0;
     CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
     try {
-      hasResult = msg.getPart(0).getSerializedForm()[0];
+      hasResult = clientMessage.getPart(0).getSerializedForm()[0];
       if (hasResult == 1) {
         servConn.setAsTrue(REQUIRES_RESPONSE);
         servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
       }
-      regionName = msg.getPart(1).getString();
-      function = msg.getPart(2).getStringOrObject();
-      args = msg.getPart(3).getObject();
-      Part part = msg.getPart(4);
+      regionName = clientMessage.getPart(1).getString();
+      function = clientMessage.getPart(2).getStringOrObject();
+      args = clientMessage.getPart(3).getObject();
+      Part part = clientMessage.getPart(4);
       if (part != null) {
         Object obj = part.getObject();
         if (obj instanceof MemberMappedArgument) {
           memberMappedArg = (MemberMappedArgument) obj;
         }
       }
-      isReExecute = msg.getPart(5).getSerializedForm()[0];
-      filterSize = msg.getPart(6).getInt();
+      isReExecute = clientMessage.getPart(5).getSerializedForm()[0];
+      filterSize = clientMessage.getPart(6).getInt();
       if (filterSize != 0) {
         filter = new HashSet();
         partNumber = 7;
         for (int i = 0; i < filterSize; i++) {
-          filter.add(msg.getPart(partNumber + i).getStringOrObject());
+          filter.add(clientMessage.getPart(partNumber + i).getStringOrObject());
         }
       }
 
       partNumber = 7 + filterSize;
-      removedNodesSize = msg.getPart(partNumber).getInt();
+      removedNodesSize = clientMessage.getPart(partNumber).getInt();
 
       if (removedNodesSize != 0) {
         removedNodesSet = new HashSet();
         partNumber = partNumber + 1;
 
         for (int i = 0; i < removedNodesSize; i++) {
-          removedNodesSet.add(msg.getPart(partNumber + i).getStringOrObject());
+          removedNodesSet.add(clientMessage.getPart(partNumber + i).getStringOrObject());
         }
       }
 
@@ -118,7 +118,7 @@ public class ExecuteRegionFunction61 extends BaseCommand {
           LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
           function), exception);
       if (hasResult == 1) {
-        writeChunkedException(msg, exception, false, servConn);
+        writeChunkedException(clientMessage, exception, servConn);
         servConn.setAsTrue(RESPONDED);
         return;
       }
@@ -136,7 +136,7 @@ public class ExecuteRegionFunction61 extends BaseCommand {
                 .toLocalizedString("region");
       }
       logger.warn("{}: {}", servConn.getName(), message);
-      sendError(hasResult, msg, message, servConn);
+      sendError(hasResult, clientMessage, message, servConn);
       return;
     } else {
       Region region = crHelper.getRegion(regionName);
@@ -145,7 +145,7 @@ public class ExecuteRegionFunction61 extends BaseCommand {
             LocalizedStrings.ExecuteRegionFunction_THE_REGION_NAMED_0_WAS_NOT_FOUND_DURING_EXECUTE_FUNCTION_REQUEST
                 .toLocalizedString(regionName);
         logger.warn("{}: {}", servConn.getName(), message);
-        sendError(hasResult, msg, message, servConn);
+        sendError(hasResult, clientMessage, message, servConn);
         return;
       }
       HandShake handShake = (HandShake) servConn.getHandshake();
@@ -161,7 +161,7 @@ public class ExecuteRegionFunction61 extends BaseCommand {
                 LocalizedStrings.ExecuteRegionFunction_THE_FUNCTION_0_HAS_NOT_BEEN_REGISTERED
                     .toLocalizedString(function);
             logger.warn("{}: {}", servConn.getName(), message);
-            sendError(hasResult, msg, message, servConn);
+            sendError(hasResult, clientMessage, message, servConn);
             return;
           }
         } else {
@@ -180,7 +180,7 @@ public class ExecuteRegionFunction61 extends BaseCommand {
         // Construct execution
         AbstractExecution execution = (AbstractExecution) FunctionService.onRegion(region);
         ChunkedMessage m = servConn.getFunctionResponseMessage();
-        m.setTransactionId(msg.getTransactionId());
+        m.setTransactionId(clientMessage.getTransactionId());
         resultSender = new ServerToClientFunctionResultSender(m,
             MessageType.EXECUTE_REGION_FUNCTION_RESULT, servConn, functionObject, executeContext);
 
@@ -220,7 +220,7 @@ public class ExecuteRegionFunction61 extends BaseCommand {
         final String message =
             LocalizedStrings.ExecuteRegionFunction_SERVER_COULD_NOT_SEND_THE_REPLY
                 .toLocalizedString();
-        sendException(hasResult, msg, message, servConn, ioe);
+        sendException(hasResult, clientMessage, message, servConn, ioe);
       } catch (FunctionException fe) {
         String message = fe.getMessage();
 
@@ -255,7 +255,7 @@ public class ExecuteRegionFunction61 extends BaseCommand {
           logger.warn(LocalizedMessage.create(
               LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
               function), fe);
-          sendException(hasResult, msg, message, servConn, fe);
+          sendException(hasResult, clientMessage, message, servConn, fe);
         }
 
       } catch (Exception e) {
@@ -263,7 +263,7 @@ public class ExecuteRegionFunction61 extends BaseCommand {
             LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
             function), e);
         String message = e.getMessage();
-        sendException(hasResult, msg, message, servConn, e);
+        sendException(hasResult, clientMessage, message, servConn, e);
       }
 
       finally {
@@ -275,7 +275,7 @@ public class ExecuteRegionFunction61 extends BaseCommand {
   private void sendException(byte hasResult, Message msg, String message, ServerConnection servConn,
       Throwable e) throws IOException {
     if (hasResult == 1) {
-      writeFunctionResponseException(msg, MessageType.EXCEPTION, message, servConn, e);
+      writeFunctionResponseException(msg, MessageType.EXCEPTION, servConn, e);
       servConn.setAsTrue(RESPONDED);
     }
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction65.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction65.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction65.java
index 57bc869..f1540f9 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction65.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction65.java
@@ -60,7 +60,7 @@ public class ExecuteRegionFunction65 extends BaseCommand {
   private ExecuteRegionFunction65() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start) throws IOException {
     String regionName = null;
     Object function = null;
     Object args = null;
@@ -74,7 +74,7 @@ public class ExecuteRegionFunction65 extends BaseCommand {
     CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
     byte functionState = 0;
     try {
-      functionState = msg.getPart(0).getSerializedForm()[0];
+      functionState = clientMessage.getPart(0).getSerializedForm()[0];
       if (functionState != 1) {
         hasResult = (byte) ((functionState & 2) - 1);
       } else {
@@ -84,35 +84,35 @@ public class ExecuteRegionFunction65 extends BaseCommand {
         servConn.setAsTrue(REQUIRES_RESPONSE);
         servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
       }
-      regionName = msg.getPart(1).getString();
-      function = msg.getPart(2).getStringOrObject();
-      args = msg.getPart(3).getObject();
-      Part part = msg.getPart(4);
+      regionName = clientMessage.getPart(1).getString();
+      function = clientMessage.getPart(2).getStringOrObject();
+      args = clientMessage.getPart(3).getObject();
+      Part part = clientMessage.getPart(4);
       if (part != null) {
         Object obj = part.getObject();
         if (obj instanceof MemberMappedArgument) {
           memberMappedArg = (MemberMappedArgument) obj;
         }
       }
-      isReExecute = msg.getPart(5).getSerializedForm()[0];
-      filterSize = msg.getPart(6).getInt();
+      isReExecute = clientMessage.getPart(5).getSerializedForm()[0];
+      filterSize = clientMessage.getPart(6).getInt();
       if (filterSize != 0) {
         filter = new HashSet<Object>();
         partNumber = 7;
         for (int i = 0; i < filterSize; i++) {
-          filter.add(msg.getPart(partNumber + i).getStringOrObject());
+          filter.add(clientMessage.getPart(partNumber + i).getStringOrObject());
         }
       }
 
       partNumber = 7 + filterSize;
-      removedNodesSize = msg.getPart(partNumber).getInt();
+      removedNodesSize = clientMessage.getPart(partNumber).getInt();
 
       if (removedNodesSize != 0) {
         removedNodesSet = new HashSet<Object>();
         partNumber = partNumber + 1;
 
         for (int i = 0; i < removedNodesSize; i++) {
-          removedNodesSet.add(msg.getPart(partNumber + i).getStringOrObject());
+          removedNodesSet.add(clientMessage.getPart(partNumber + i).getStringOrObject());
         }
       }
 
@@ -121,7 +121,7 @@ public class ExecuteRegionFunction65 extends BaseCommand {
           LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
           function), exception);
       if (hasResult == 1) {
-        writeChunkedException(msg, exception, false, servConn);
+        writeChunkedException(clientMessage, exception, servConn);
         servConn.setAsTrue(RESPONDED);
         return;
       }
@@ -139,7 +139,7 @@ public class ExecuteRegionFunction65 extends BaseCommand {
                 .toLocalizedString("region");
       }
       logger.warn("{}: {}", servConn.getName(), message);
-      sendError(hasResult, msg, message, servConn);
+      sendError(hasResult, clientMessage, message, servConn);
       return;
     }
 
@@ -149,7 +149,7 @@ public class ExecuteRegionFunction65 extends BaseCommand {
           LocalizedStrings.ExecuteRegionFunction_THE_REGION_NAMED_0_WAS_NOT_FOUND_DURING_EXECUTE_FUNCTION_REQUEST
               .toLocalizedString(regionName);
       logger.warn("{}: {}", servConn.getName(), message);
-      sendError(hasResult, msg, message, servConn);
+      sendError(hasResult, clientMessage, message, servConn);
       return;
     }
 
@@ -166,7 +166,7 @@ public class ExecuteRegionFunction65 extends BaseCommand {
               LocalizedStrings.ExecuteRegionFunction_THE_FUNCTION_0_HAS_NOT_BEEN_REGISTERED
                   .toLocalizedString(function);
           logger.warn("{}: {}", servConn.getName(), message);
-          sendError(hasResult, msg, message, servConn);
+          sendError(hasResult, clientMessage, message, servConn);
           return;
         } else {
           byte functionStateOnServerSide = AbstractExecution.getFunctionState(functionObject.isHA(),
@@ -180,7 +180,7 @@ public class ExecuteRegionFunction65 extends BaseCommand {
                 LocalizedStrings.FunctionService_FUNCTION_ATTRIBUTE_MISMATCH_CLIENT_SERVER
                     .toLocalizedString(function);
             logger.warn("{}: {}", servConn.getName(), message);
-            sendError(hasResult, msg, message, servConn);
+            sendError(hasResult, clientMessage, message, servConn);
             return;
           }
         }
@@ -203,7 +203,7 @@ public class ExecuteRegionFunction65 extends BaseCommand {
       // Construct execution
       AbstractExecution execution = (AbstractExecution) FunctionService.onRegion(region);
       ChunkedMessage m = servConn.getFunctionResponseMessage();
-      m.setTransactionId(msg.getTransactionId());
+      m.setTransactionId(clientMessage.getTransactionId());
       resultSender = new ServerToClientFunctionResultSender65(m,
           MessageType.EXECUTE_REGION_FUNCTION_RESULT, servConn, functionObject, executeContext);
 
@@ -262,7 +262,7 @@ public class ExecuteRegionFunction65 extends BaseCommand {
           function), ioe);
       final String message = LocalizedStrings.ExecuteRegionFunction_SERVER_COULD_NOT_SEND_THE_REPLY
           .toLocalizedString();
-      sendException(hasResult, msg, message, servConn, ioe);
+      sendException(hasResult, clientMessage, message, servConn, ioe);
     } catch (FunctionException fe) {
       String message = fe.getMessage();
 
@@ -298,7 +298,7 @@ public class ExecuteRegionFunction65 extends BaseCommand {
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
             function), fe);
-        sendException(hasResult, msg, message, servConn, fe);
+        sendException(hasResult, clientMessage, message, servConn, fe);
       }
 
     } catch (Exception e) {
@@ -306,7 +306,7 @@ public class ExecuteRegionFunction65 extends BaseCommand {
           LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
           function), e);
       String message = e.getMessage();
-      sendException(hasResult, msg, message, servConn, e);
+      sendException(hasResult, clientMessage, message, servConn, e);
     } finally {
       handShake.setClientReadTimeout(earlierClientReadTimeout);
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction66.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction66.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction66.java
index 0ed7235..25d69d6 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction66.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction66.java
@@ -64,7 +64,7 @@ public class ExecuteRegionFunction66 extends BaseCommand {
   private ExecuteRegionFunction66() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start) throws IOException {
     String regionName = null;
     Object function = null;
     Object args = null;
@@ -80,7 +80,7 @@ public class ExecuteRegionFunction66 extends BaseCommand {
     byte functionState = 0;
     int functionTimeout = ConnectionImpl.DEFAULT_CLIENT_FUNCTION_TIMEOUT;
     try {
-      byte[] bytes = msg.getPart(0).getSerializedForm();
+      byte[] bytes = clientMessage.getPart(0).getSerializedForm();
       functionState = bytes[0];
       if (bytes.length >= 5
           && servConn.getClientVersion().ordinal() >= Version.GFE_8009.ordinal()) {
@@ -95,17 +95,17 @@ public class ExecuteRegionFunction66 extends BaseCommand {
         servConn.setAsTrue(REQUIRES_RESPONSE);
         servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
       }
-      regionName = msg.getPart(1).getString();
-      function = msg.getPart(2).getStringOrObject();
-      args = msg.getPart(3).getObject();
-      Part part = msg.getPart(4);
+      regionName = clientMessage.getPart(1).getString();
+      function = clientMessage.getPart(2).getStringOrObject();
+      args = clientMessage.getPart(3).getObject();
+      Part part = clientMessage.getPart(4);
       if (part != null) {
         Object obj = part.getObject();
         if (obj instanceof MemberMappedArgument) {
           memberMappedArg = (MemberMappedArgument) obj;
         }
       }
-      byte[] flags = msg.getPart(5).getSerializedForm();
+      byte[] flags = clientMessage.getPart(5).getSerializedForm();
       if (servConn.getClientVersion().ordinal() > Version.GFE_81.ordinal()) {
         isBucketsAsFilter = (flags[0] & ExecuteFunctionHelper.BUCKETS_AS_FILTER_MASK) != 0;
         isReExecute = (flags[0] & ExecuteFunctionHelper.IS_REXECUTE_MASK) != 0 ? (byte) 1 : 0;
@@ -113,24 +113,24 @@ public class ExecuteRegionFunction66 extends BaseCommand {
         isReExecute = flags[0];
         isBucketsAsFilter = false;
       }
-      filterSize = msg.getPart(6).getInt();
+      filterSize = clientMessage.getPart(6).getInt();
       if (filterSize != 0) {
         filter = new HashSet<Object>();
         partNumber = 7;
         for (int i = 0; i < filterSize; i++) {
-          filter.add(msg.getPart(partNumber + i).getStringOrObject());
+          filter.add(clientMessage.getPart(partNumber + i).getStringOrObject());
         }
       }
 
       partNumber = 7 + filterSize;
-      removedNodesSize = msg.getPart(partNumber).getInt();
+      removedNodesSize = clientMessage.getPart(partNumber).getInt();
 
       if (removedNodesSize != 0) {
         removedNodesSet = new HashSet<Object>();
         partNumber = partNumber + 1;
 
         for (int i = 0; i < removedNodesSize; i++) {
-          removedNodesSet.add(msg.getPart(partNumber + i).getStringOrObject());
+          removedNodesSet.add(clientMessage.getPart(partNumber + i).getStringOrObject());
         }
       }
 
@@ -139,9 +139,9 @@ public class ExecuteRegionFunction66 extends BaseCommand {
           LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
           function), exception);
       if (hasResult == 1) {
-        writeChunkedException(msg, exception, false, servConn);
+        writeChunkedException(clientMessage, exception, servConn);
       } else {
-        writeException(msg, exception, false, servConn);
+        writeException(clientMessage, exception, false, servConn);
       }
       servConn.setAsTrue(RESPONDED);
       return;
@@ -159,7 +159,7 @@ public class ExecuteRegionFunction66 extends BaseCommand {
                 .toLocalizedString("region");
       }
       logger.warn("{}: {}", servConn.getName(), message);
-      sendError(hasResult, msg, message, servConn);
+      sendError(hasResult, clientMessage, message, servConn);
       return;
     }
 
@@ -169,7 +169,7 @@ public class ExecuteRegionFunction66 extends BaseCommand {
           LocalizedStrings.ExecuteRegionFunction_THE_REGION_NAMED_0_WAS_NOT_FOUND_DURING_EXECUTE_FUNCTION_REQUEST
               .toLocalizedString(regionName);
       logger.warn("{}: {}", servConn.getName(), message);
-      sendError(hasResult, msg, message, servConn);
+      sendError(hasResult, clientMessage, message, servConn);
       return;
     }
     HandShake handShake = (HandShake) servConn.getHandshake();
@@ -185,7 +185,7 @@ public class ExecuteRegionFunction66 extends BaseCommand {
               LocalizedStrings.ExecuteRegionFunction_THE_FUNCTION_0_HAS_NOT_BEEN_REGISTERED
                   .toLocalizedString(function);
           logger.warn("{}: {}", servConn.getName(), message);
-          sendError(hasResult, msg, message, servConn);
+          sendError(hasResult, clientMessage, message, servConn);
           return;
         } else {
           byte functionStateOnServerSide = AbstractExecution.getFunctionState(functionObject.isHA(),
@@ -199,7 +199,7 @@ public class ExecuteRegionFunction66 extends BaseCommand {
                 LocalizedStrings.FunctionService_FUNCTION_ATTRIBUTE_MISMATCH_CLIENT_SERVER
                     .toLocalizedString(function);
             logger.warn("{}: {}", servConn.getName(), message);
-            sendError(hasResult, msg, message, servConn);
+            sendError(hasResult, clientMessage, message, servConn);
             return;
           }
         }
@@ -222,7 +222,7 @@ public class ExecuteRegionFunction66 extends BaseCommand {
       // Construct execution
       AbstractExecution execution = (AbstractExecution) FunctionService.onRegion(region);
       ChunkedMessage m = servConn.getFunctionResponseMessage();
-      m.setTransactionId(msg.getTransactionId());
+      m.setTransactionId(clientMessage.getTransactionId());
       resultSender = new ServerToClientFunctionResultSender65(m,
           MessageType.EXECUTE_REGION_FUNCTION_RESULT, servConn, functionObject, executeContext);
 
@@ -276,7 +276,7 @@ public class ExecuteRegionFunction66 extends BaseCommand {
         } else {
           execution.execute(functionObject);
         }
-        writeReply(msg, servConn);
+        writeReply(clientMessage, servConn);
       }
     } catch (IOException ioe) {
       logger.warn(LocalizedMessage.create(
@@ -284,7 +284,7 @@ public class ExecuteRegionFunction66 extends BaseCommand {
           function), ioe);
       final String message = LocalizedStrings.ExecuteRegionFunction_SERVER_COULD_NOT_SEND_THE_REPLY
           .toLocalizedString();
-      sendException(hasResult, msg, message, servConn, ioe);
+      sendException(hasResult, clientMessage, message, servConn, ioe);
     } catch (FunctionException fe) {
       String message = fe.getMessage();
       Object cause = fe.getCause();
@@ -321,7 +321,7 @@ public class ExecuteRegionFunction66 extends BaseCommand {
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
             function), fe);
-        sendException(hasResult, msg, message, servConn, fe);
+        sendException(hasResult, clientMessage, message, servConn, fe);
       }
 
     } catch (Exception e) {
@@ -329,7 +329,7 @@ public class ExecuteRegionFunction66 extends BaseCommand {
           LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
           function), e);
       String message = e.getMessage();
-      sendException(hasResult, msg, message, servConn, e);
+      sendException(hasResult, clientMessage, message, servConn, e);
     } finally {
       handShake.setClientReadTimeout(earlierClientReadTimeout);
       ServerConnection.executeFunctionOnLocalNodeOnly((byte) 0);

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionSingleHop.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionSingleHop.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionSingleHop.java
index 8b2cf75..725f03b 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionSingleHop.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionSingleHop.java
@@ -62,7 +62,7 @@ public class ExecuteRegionFunctionSingleHop extends BaseCommand {
   private ExecuteRegionFunctionSingleHop() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection servConn, long start) throws IOException {
 
     String regionName = null;
     Object function = null;
@@ -79,7 +79,7 @@ public class ExecuteRegionFunctionSingleHop extends BaseCommand {
     CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
     int functionTimeout = ConnectionImpl.DEFAULT_CLIENT_FUNCTION_TIMEOUT;
     try {
-      byte[] bytes = msg.getPart(0).getSerializedForm();
+      byte[] bytes = clientMessage.getPart(0).getSerializedForm();
       functionState = bytes[0];
       if (bytes.length >= 5
           && servConn.getClientVersion().ordinal() >= Version.GFE_8009.ordinal()) {
@@ -94,49 +94,49 @@ public class ExecuteRegionFunctionSingleHop extends BaseCommand {
         servConn.setAsTrue(REQUIRES_RESPONSE);
         servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
       }
-      regionName = msg.getPart(1).getString();
-      function = msg.getPart(2).getStringOrObject();
-      args = msg.getPart(3).getObject();
-      Part part = msg.getPart(4);
+      regionName = clientMessage.getPart(1).getString();
+      function = clientMessage.getPart(2).getStringOrObject();
+      args = clientMessage.getPart(3).getObject();
+      Part part = clientMessage.getPart(4);
       if (part != null) {
         Object obj = part.getObject();
         if (obj instanceof MemberMappedArgument) {
           memberMappedArg = (MemberMappedArgument) obj;
         }
       }
-      isExecuteOnAllBuckets = msg.getPart(5).getSerializedForm()[0];
+      isExecuteOnAllBuckets = clientMessage.getPart(5).getSerializedForm()[0];
       if (isExecuteOnAllBuckets == 1) {
         filter = new HashSet();
-        bucketIdsSize = msg.getPart(6).getInt();
+        bucketIdsSize = clientMessage.getPart(6).getInt();
         if (bucketIdsSize != 0) {
           buckets = new HashSet<Integer>();
           partNumber = 7;
           for (int i = 0; i < bucketIdsSize; i++) {
-            buckets.add(msg.getPart(partNumber + i).getInt());
+            buckets.add(clientMessage.getPart(partNumber + i).getInt());
           }
         }
         partNumber = 7 + bucketIdsSize;
       } else {
-        filterSize = msg.getPart(6).getInt();
+        filterSize = clientMessage.getPart(6).getInt();
         if (filterSize != 0) {
           filter = new HashSet<Object>();
           partNumber = 7;
           for (int i = 0; i < filterSize; i++) {
-            filter.add(msg.getPart(partNumber + i).getStringOrObject());
+            filter.add(clientMessage.getPart(partNumber + i).getStringOrObject());
           }
         }
         partNumber = 7 + filterSize;
       }
 
 
-      removedNodesSize = msg.getPart(partNumber).getInt();
+      removedNodesSize = clientMessage.getPart(partNumber).getInt();
 
       if (removedNodesSize != 0) {
         removedNodesSet = new HashSet<Object>();
         partNumber = partNumber + 1;
 
         for (int i = 0; i < removedNodesSize; i++) {
-          removedNodesSet.add(msg.getPart(partNumber + i).getStringOrObject());
+          removedNodesSet.add(clientMessage.getPart(partNumber + i).getStringOrObject());
         }
       }
 
@@ -145,7 +145,7 @@ public class ExecuteRegionFunctionSingleHop extends BaseCommand {
           LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
           function), exception);
       if (hasResult == 1) {
-        writeChunkedException(msg, exception, false, servConn);
+        writeChunkedException(clientMessage, exception, servConn);
         servConn.setAsTrue(RESPONDED);
         return;
       }
@@ -163,7 +163,7 @@ public class ExecuteRegionFunctionSingleHop extends BaseCommand {
                 .toLocalizedString("region");
       }
       logger.warn("{}: {}", servConn.getName(), message);
-      sendError(hasResult, msg, message, servConn);
+      sendError(hasResult, clientMessage, message, servConn);
       return;
     }
 
@@ -173,7 +173,7 @@ public class ExecuteRegionFunctionSingleHop extends BaseCommand {
           LocalizedStrings.ExecuteRegionFunction_THE_REGION_NAMED_0_WAS_NOT_FOUND_DURING_EXECUTE_FUNCTION_REQUEST
               .toLocalizedString(regionName);
       logger.warn("{}: {}", servConn.getName(), message);
-      sendError(hasResult, msg, message, servConn);
+      sendError(hasResult, clientMessage, message, servConn);
       return;
     }
     HandShake handShake = (HandShake) servConn.getHandshake();
@@ -189,7 +189,7 @@ public class ExecuteRegionFunctionSingleHop extends BaseCommand {
               LocalizedStrings.ExecuteRegionFunction_THE_FUNCTION_0_HAS_NOT_BEEN_REGISTERED
                   .toLocalizedString(function);
           logger.warn("{}: {}", servConn.getName(), message);
-          sendError(hasResult, msg, message, servConn);
+          sendError(hasResult, clientMessage, message, servConn);
           return;
         } else {
           byte functionStateOnServer = AbstractExecution.getFunctionState(functionObject.isHA(),
@@ -199,7 +199,7 @@ public class ExecuteRegionFunctionSingleHop extends BaseCommand {
                 LocalizedStrings.FunctionService_FUNCTION_ATTRIBUTE_MISMATCH_CLIENT_SERVER
                     .toLocalizedString(function);
             logger.warn("{}: {}", servConn.getName(), message);
-            sendError(hasResult, msg, message, servConn);
+            sendError(hasResult, clientMessage, message, servConn);
             return;
           }
         }
@@ -222,7 +222,7 @@ public class ExecuteRegionFunctionSingleHop extends BaseCommand {
       // Construct execution
       AbstractExecution execution = (AbstractExecution) FunctionService.onRegion(region);
       ChunkedMessage m = servConn.getFunctionResponseMessage();
-      m.setTransactionId(msg.getTransactionId());
+      m.setTransactionId(clientMessage.getTransactionId());
       resultSender = new ServerToClientFunctionResultSender65(m,
           MessageType.EXECUTE_REGION_FUNCTION_RESULT, servConn, functionObject, executeContext);
 
@@ -290,7 +290,7 @@ public class ExecuteRegionFunctionSingleHop extends BaseCommand {
           function), ioe);
       final String message = LocalizedStrings.ExecuteRegionFunction_SERVER_COULD_NOT_SEND_THE_REPLY
           .toLocalizedString();
-      sendException(hasResult, msg, message, servConn, ioe);
+      sendException(hasResult, clientMessage, message, servConn, ioe);
     } catch (FunctionException fe) {
       String message = fe.getMessage();
 
@@ -301,21 +301,21 @@ public class ExecuteRegionFunctionSingleHop extends BaseCommand {
           logger.debug("Exception on server while executing function: {}: {}", function, message,
               fe);
         }
-        synchronized (msg) {
+        synchronized (clientMessage) {
           resultSender.setException(fe);
         }
       } else {
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
             function), fe);
-        sendException(hasResult, msg, message, servConn, fe);
+        sendException(hasResult, clientMessage, message, servConn, fe);
       }
     } catch (Exception e) {
       logger.warn(LocalizedMessage.create(
           LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0,
           function), e);
       String message = e.getMessage();
-      sendException(hasResult, msg, message, servConn, e);
+      sendException(hasResult, clientMessage, message, servConn, e);
     } finally {
       handShake.setClientReadTimeout(earlierClientReadTimeout);
       ServerConnection.executeFunctionOnLocalNodeOnly((byte) 0);


[42/43] geode git commit: Resolve conflict

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/283215f9/geode-core/src/test/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt b/geode-core/src/test/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
index 88df942..f54ff74 100644
--- a/geode-core/src/test/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
+++ b/geode-core/src/test/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
@@ -1,289 +1,289 @@
 org/apache/geode/admin/RegionSubRegionSnapshot,2
 fromData,62,2a2bb80023b500082a2bb900240100b5000b2a2bb80025b500052ab40005b9002601004d2cb9002701009900132cb900280100c000292ab6001ba7ffeab1
 toData,30,2ab400082bb800202b2ab4000bb9002102002ab40005c000032bb80022b1
-
+
 org/apache/geode/admin/internal/FinishBackupRequest,2
 fromData,33,2a2bb700292a2bb8002ab500022a2bb8002ab500032a2bb8002bb6002cb50004b1
 toData,33,2a2bb7002d2ab400022bb8002e2ab400032bb8002e2ab40004b8002f2bb80030b1
-
+
 org/apache/geode/admin/internal/FinishBackupResponse,2
 fromData,14,2a2bb700042a2bb80005b50003b1
 toData,14,2a2bb700062ab400032bb80007b1
-
+
 org/apache/geode/admin/internal/PrepareBackupResponse,2
 fromData,14,2a2bb700042a2bb80005b50003b1
 toData,14,2a2bb700062ab400032bb80007b1
-
+
 org/apache/geode/admin/internal/SystemMemberCacheEventProcessor$SystemMemberCacheMessage,2
 fromData,27,2a2bb7001a2a2bb8001bb5000c2a2bb9001c0100b8001db5000fb1
 toData,27,2a2bb7001e2ab4000c2bb8001f2b2ab4000fb40020b900210200b1
-
+
 org/apache/geode/admin/jmx/internal/StatAlertNotification,2
 fromData,39,2a2bb8002ab600032a2bb8002bb600072a2bb8002cc0002dc0002db600052a2bb8002eb50008b1
 toData,33,2ab600162bb800262ab600202bb800272ab6000e2bb800282ab400082bb80029b1
-
+
 org/apache/geode/cache/ExpirationAttributes,2
 fromData,22,2a2bb900120100b500022a2bb80013c00014b50004b1
 toData,19,2b2ab40002b9001502002ab400042bb80016b1
-
+
 org/apache/geode/cache/MembershipAttributes,2
 fromData,39,2a2a2bb80032b7000bb500032a2bb900330100b80034b500052a2bb900330100b80035b50007b1
 toData,89,2ab40003b9002c0100bd002d4d2ab40003b9002101004e03360415042cbea2001b2c15042db900240100c00025b90026010053840401a7ffe42c2bb8002e2b2ab40005b4002fb9003002002b2ab40007b40031b900300200b1
-
+
 org/apache/geode/cache/SubscriptionAttributes,2
 fromData,14,2a2bb9000f0100b80010b50003b1
 toData,14,2b2ab40003b4000db9000e0200b1
-
+
 org/apache/geode/cache/client/internal/CacheServerLoadMessage,2
 fromData,52,2a2bb7000d2abb000e59b7000fb500022ab400022bb800102abb001159b70012b500032ab400032bb800102a2bb80013b50004b1
 toData,30,2a2bb700142ab400022bb800152ab400032bb800152ab400042bb80016b1
-
+
 org/apache/geode/cache/client/internal/locator/ClientConnectionRequest,2
 fromData,14,2a2bb700042a2bb80005b50003b1
 toData,14,2a2bb700062ab400032bb80007b1
-
+
 org/apache/geode/cache/client/internal/locator/ClientConnectionResponse,2
 fromData,35,2a2bb80004b500022ab400029900162abb000559b70006b500032ab400032bb60007b1
 toData,31,2ab40003c6000704a70004033d1c2bb800081c99000b2ab400032bb60009b1
-
+
 org/apache/geode/cache/client/internal/locator/ClientReplacementRequest,2
 fromData,25,2a2bb700042abb000559b70006b500032ab400032bb80007b1
 toData,14,2a2bb700082ab400032bb80009b1
-
+
 org/apache/geode/cache/client/internal/locator/GetAllServersRequest,2
 fromData,6,2a2bb70003b1
 toData,6,2a2bb70004b1
-
+
 org/apache/geode/cache/client/internal/locator/GetAllServersResponse,2
 fromData,31,2a2bb80005b500032ab40003c600122ab40003b600049a00082a04b50002b1
 toData,9,2ab400032bb80006b1
-
+
 org/apache/geode/cache/client/internal/locator/LocatorListResponse,2
 fromData,43,2a2bb80006b500032a2bb900070100b500052ab40003c600142ab40003b9000401009a00082a04b50002b1
 toData,19,2ab400032bb800082b2ab40005b900090200b1
-
+
 org/apache/geode/cache/client/internal/locator/LocatorStatusResponse,2
 fromData,56,2a2bb6001f2a2bb600202a2bb600212a2bb600222a2bb600232a2bb600242a2bb600252a2bb600262a2bb600272a2bb600282a2bb60029b1
-toData,56,2a2bb600322a2bb600332a2bb600342a2bb600352a2bb600362a2bb600372a2bb600382a2bb600392a2bb6003a2a2bb6003b2a2bb6003cb1
-
+toData,56,2a2bb600312a2bb600322a2bb600332a2bb600342a2bb600352a2bb600362a2bb600372a2bb600382a2bb600392a2bb6003a2a2bb6003bb1
+
 org/apache/geode/cache/client/internal/locator/QueueConnectionRequest,2
 fromData,40,2a2bb700072a2bb80008b500042a2bb80009b500062a2bb8000ab500052a2bb9000b0100b50002b1
 toData,40,2a2bb7000c2ab400042bb8000d2ab400062bb8000e2ab400052bb8000f2b2ab40002b900100200b1
-
+
 org/apache/geode/cache/client/internal/locator/QueueConnectionResponse,2
 fromData,41,2a2bb80006b500032a2bb80007b500042ab40004c600142ab40004b9000501009a00082a04b50002b1
 toData,17,2ab400032bb800082ab400042bb80009b1
-
+
 org/apache/geode/cache/client/internal/locator/ServerLocationRequest,2
 fromData,9,2a2bb80003b50002b1
 toData,9,2ab400022bb80004b1
-
+
 org/apache/geode/cache/query/internal/CqEntry,2
 fromData,17,2a2bb80009b500022a2bb80009b50003b1
 toData,17,2ab400022bb8000b2ab400032bb8000bb1
-
+
 org/apache/geode/cache/query/internal/CumulativeNonDistinctResults,2
 fromData,126,2bb8001dc0001e4d2abb00035912042cb70005b500012cb9001f01003e2bb90020010037042abb001a59160488b70021b5000816043706160609949e00421d9900232bb800223a082ab40008bb0023592cc000241908b70025b90026020057a700152bb8001d3a082ab400081908b9002602005716060a653706a7ffbdb1
 toData,125,2ab40001b900280100b9001f01003d2ab40001b9002801002bb80029bb002a5911040001b7002b4e2db6002c3a042ab600143a050336061905b9001501009900311905b9001601003a071c9900181907c0002db9002e01003a0819082bb8002fa7000919072db80029840601a7ffcb1904150685b600302d2bb60031b1
-
+
 org/apache/geode/cache/query/internal/LinkedResultSet,2
 fromData,40,2bb9001501003d2a2bb80016c00017b500071c3e1d9e00122a2bb80016b60018578403ffa7fff0b1
 toData,46,2b2ab60019b9001a02002ab400072bb8001b2ab6001c4d2cb9001d01009900102cb9001e01002bb8001ba7ffedb1
-
+
 org/apache/geode/cache/query/internal/LinkedStructSet,2
 fromData,68,2a2bb900300100b500022bb9003101003d2a2bb80032c00020b500071c3e1d9e00242bb80032c000333a042abb000c592ab400071904b70034b60035578403ffa7ffdeb1
 toData,66,2b2ab40002b9003702002b2ab60038b9003902002ab400072bb8003a2ab600284d2cb90029010099001a2cb9002a0100c000124e2db9003b01002bb8003aa7ffe3b1
-
+
 org/apache/geode/cache/query/internal/NWayMergeResults,2
 fromData,134,2bb8001ec0001f4d2abb00045912202cb70006b500022cb9002101003e2a2bb80022b500012bb90023010037042abb001b59160488b70024b5000916043706160609949e00421d9900232bb800253a082ab40009bb0026592cc000271908b70028b90029020057a700152bb8001e3a082ab400091908b9002902005716060a653706a7ffbdb1
 toData,133,2ab40002b9002b0100b9002101003d2ab40002b9002b01002bb8002c2ab400012bb8002dbb002e5911040001b7002f4e2db600303a042ab600153a050336061905b9001601009900311905b9001701003a071c9900181907c00031b9003201003a0819082bb80033a7000919072db8002c840601a7ffcb1904150685b600342d2bb60035b1
-
+
 org/apache/geode/cache/query/internal/NullToken,2
 fromData,1,b1
 toData,1,b1
-
+
 org/apache/geode/cache/query/internal/PRQueryTraceInfo,2
 fromData,29,2a2bb900090100b500042a2bb9000a0100b500062a2bb8000bb50003b1
 toData,29,2b2ab40004b9000502002b2ab40006b9000702002ab400032bb80008b1
-
+
 org/apache/geode/cache/query/internal/ResultsBag,2
 fromData,106,2a2bb80018c00019b5001a2a2bb9001b0100b50017b2001c9a00162ab400179c000fbb001d592ab40017b7001ebf2a2ab6001fb500042a2bb600202ab400172ab40021643d1c9e00232bb800184e2bb9001b010036042ab400042d1504b60022571c1504643da7ffdfb1
 toData,116,2ab4001a2bb800242b2ab60025b9002602002a2bb600272ab600252ab40021643d2ab40004b60028b9002901004e2db9000a010099003f1c9e003b2db9000b0100c0002a3a041904b6002b3a0519052bb800241904b6002c36061c1506a200061c36062b1506b9002602001c1506643da7ffbeb1
-
+
 org/apache/geode/cache/query/internal/ResultsCollectionWrapper,2
 fromData,55,2bb9005801003d1c99000e2a2bb80059b50002a7000e2a2bb8005ac0005bb500022a2bb8005ac0005cb5000d2a2bb900580100b50004b1
 toData,60,2ab40002c100543d2b1cb9005502001c9900112ab40002c0004e2bb80056a7000b2ab400022bb800572ab4000d2bb800572b2ab40004b900550200b1
-
+
 org/apache/geode/cache/query/internal/ResultsSet,2
 fromData,49,2bb9001801003dbb001959b7001a4e2d2bb8001b2a2db600051c360415049e00122a2bb8001cb6001d578404ffa7ffefb1
 toData,70,2b2ab6001eb9001f02002ab60020b900040100c000194d2cc6000704a70004031221b800222c2bb800232ab600244e2db9002501009900102db9002601002bb80027a7ffedb1
-
+
 org/apache/geode/cache/query/internal/SortedResultSet,2
 fromData,40,2bb9001601003d2a2bb80017c00018b500081c3e1d9e00122a2bb80017b60019578403ffa7fff0b1
 toData,46,2b2ab6001ab9001b02002ab400082bb8001c2ab6001d4d2cb9001e01009900102cb9001f01002bb8001ca7ffedb1
-
+
 org/apache/geode/cache/query/internal/SortedStructSet,2
 fromData,57,2a2bb900440100b500022bb9004501003d2a2bb80046c00034b500091c3e1d9e00192bb80046c000283a042a1904b60013578403ffa7ffe9b1
 toData,64,2b2ab40002b9004702002b2ab6002bb9004802002ab400092bb800492ab6002c4d2cb9002601009900182cb900270100c00028c000284e2d2bb8004aa7ffe5b1
-
+
 org/apache/geode/cache/query/internal/StructBag,2
 fromData,16,2a2bb700482a2bb900490100b50002b1
 toData,16,2a2bb7004a2b2ab40002b9004b0200b1
-
+
 org/apache/geode/cache/query/internal/StructImpl,2
 fromData,72,2a2bb80026c00027b500072a2bb80028b500082ab40008c600302ab400084d2cbe3e03360415041da2001f2c1504323a051905c1000999000b2a04b50002a70009840401a7ffe1b1
 toData,17,2ab400072bb800292ab400082bb8002ab1
-
+
 org/apache/geode/cache/query/internal/StructSet,2
 fromData,58,2abb000359bb000459b70005b70006b500072bb9005101003d2a2bb80052c00041b5000c1c3e1d9e00122a2bb80052b60031578403ffa7fff0b1
 toData,46,2b2ab6003ab9005302002ab4000c2bb800542ab6004a4d2cb9001201009900102cb9001301002bb80054a7ffedb1
-
+
 org/apache/geode/cache/query/internal/Undefined,2
 fromData,1,b1
 toData,1,b1
-
+
 org/apache/geode/cache/query/internal/index/IndexCreationData,2
 fromData,106,2a2bb9000f0100b500022a2bb9000f0100b500052a2bb9000f0100b500042a2bb900100100b500082bb9001101003d031ca0000d2ab2000cb50003a70019041ca0000d2ab2000eb50003a7000a2ab20012b500032bb9001001003e1d99000d2a2bb9000f0100b50006b1
 toData,122,2b2ab40002b9000a02002b2ab40005b9000a02002b2ab40004b9000a02002b2ab40008b9000b0200b2000c2ab40003a6000d2b03b9000d0200a7001eb2000e2ab40003a6000d2b04b9000d0200a7000a2b05b9000d02002ab40006c600172b04b9000b02002b2ab40006b9000a0200a7000a2b03b9000b0200b1
-
+
 org/apache/geode/cache/query/internal/types/CollectionTypeImpl,2
 fromData,17,2a2bb700232a2bb80024c00025b50003b1
 toData,14,2a2bb700262ab400032bb80027b1
-
+
 org/apache/geode/cache/query/internal/types/MapTypeImpl,2
 fromData,17,2a2bb7001c2a2bb8001dc00015b50003b1
 toData,14,2a2bb7001e2ab400032bb8001fb1
-
+
 org/apache/geode/cache/query/internal/types/ObjectTypeImpl,2
 fromData,9,2a2bb8000ab50002b1
 toData,9,2ab400022bb8000bb1
-
+
 org/apache/geode/cache/query/internal/types/StructTypeImpl,2
 fromData,28,2a2bb700292a2bb8002ab5000b2a2bb8002bc0002cc0002cb5000db1
 toData,22,2a2bb7002d2ab4000b2bb8002e2ab4000d2bb8002fb1
-
+
 org/apache/geode/cache/server/ServerLoad,2
 fromData,41,2a2bb900070100b500022a2bb900070100b500042a2bb900070100b500032a2bb900070100b50005b1
 toData,41,2b2ab40002b9000602002b2ab40004b9000602002b2ab40003b9000602002b2ab40005b900060200b1
-
+
 org/apache/geode/cache/server/internal/ConnectionCountProbe,2
 fromData,1,b1
 toData,1,b1
-
+
 org/apache/geode/distributed/internal/DistributionAdvisor$Profile,2
 fromData,40,2abb001259b70013b500082ab400082bb800142a2bb900150100b500092a2bb900150100b50003b1
 toData,29,2ab400082bb800102b2ab40009b9001102002b2ab40003b900110200b1
-
+
 org/apache/geode/distributed/internal/DistributionMessage,2
 fromData,1,b1
 toData,1,b1
-
+
 org/apache/geode/distributed/internal/HighPriorityAckedMessage,2
 fromData,51,2a2bb700442a2bb900450100b500122ab800462bb90045010032b500092a2bb900470100b5000a2a2bb80048c00049b50007b1
 toData,47,2a2bb700402b2ab40012b9004102002b2ab40009b60024b9004102002b2ab4000ab9004202002ab400072bb80043b1
-
+
 org/apache/geode/distributed/internal/ReplyMessage,2
 fromData,101,2a2bb7003d2bb9003e01003d2a1c05b8003fb500022a1c1008b8003fb500031c04b8003f99000d2a2bb900400100b500041c07b8003f9900132a2bb80041b500062a04b50007a700141c1040b8003f99000b2a2bb80041b500062a1c1080b8003fb50039b1
 toData,132,2a2bb70038033d2ab400029900081c0580913d2ab4000799000b1c0780913da700102ab40006c600091c104080913d2ab400049900081c0480913d2ab400039900091c100880913d2ab400399900091c108080913d2b1cb9003a02002ab4000499000d2b2ab40004b9003b02002ab400079a000a2ab40006c6000b2ab400062bb8003cb1
-
+
 org/apache/geode/distributed/internal/SerialAckedMessage,2
 fromData,27,2a2bb7002c2a2bb9002d0100b500172a2bb8002ec0001fb50007b1
 toData,24,2a2bb700292b2ab40017b9002a02002ab400072bb8002bb1
-
+
 org/apache/geode/distributed/internal/ServerLocation,2
 fromData,19,2a2bb8000bb500092a2bb9000c0100b5000ab1
 toData,19,2ab400092bb8000d2b2ab4000ab9000e0200b1
-
+
 org/apache/geode/distributed/internal/ShutdownMessage,2
 fromData,27,2a2bb7000c2a2bb9000d0100b500032a2bb8000ec0000fb50002b1
 toData,24,2a2bb700092b2ab40003b9000a02002ab400022bb8000bb1
-
+
 org/apache/geode/distributed/internal/StartupMessage,3
 fromDataProblem,38,2ab40037c7000e2abb006559b70066b500372ab400372bb60067572ab400371268b6006757b1
 fromData,293,2a2bb700692a2bb8006ab500092a2bb9006b0100b5000c2a2bb9006c0100b5000d2a2bb9006c0100b500112bb9006b01003d033e1d1ca2003e2bb8006d3a042bb9006b010036051904c6000d19040301011505b8006ea700183a062ab2006f04bd00205903190653b60024b70070840301a7ffc32bb9006b01003e03360415041da200492bb8006d3a052bb8006d3a062bb9006b010036071905c600121906c6000d19051906150703b80071a700183a082ab2007204bd00205903190853b60024b70070840401a7ffb72a2bb80073c00074b500122a2bb9006b0100b500172a2bb8006ab500182a2bb9006c0100b50019bb005e59b7005f3a0419042bb600752a1904b60076b5000a2a1904b60077b5000b2a1904b60078b5000e2a1904b60079b5000fb1
 toData,318,2a2bb7004b2ab400092bb8004c2b2ab4000cb9004d02002b2ab4000db9004e02002b2ab40011b9004e0200b8004f4d2b2cbeb9004d0200033e1d2cbea2001f2c1d32b600502bb800512b2c1d32b60052b9004d0200840301a7ffe1b800534e2b2dbeb9004d020003360415042dbea200782d150432c100549900302d150432c00054b60055b600563a052d150432c00054b60057b600563a062d150432c00054b600583607a700272d150432c00059b6005a3a052d150432c00059b6005b3a062d150432c00059b6005c360719052bb8005119062bb800512b1507b9004d0200840401a7ff872ab400122bb8005d2b2ab40017b9004d02002ab400182bb8004c2b2ab40019b9004e0200bb005e59b7005f3a0419042ab4000ab6006019042ab4000bb6006119042ab4000eb6006219042ab4000fb6006319042bb60064b1
-
+
 org/apache/geode/distributed/internal/StartupResponseMessage,3
 fromDataProblem,43,2ab40026c7000e2abb003859b70039b500262ab400262bb6003a572ab40026123b123cb8003db6003a57b1
 fromData,220,2a2bb700442a2bb900450100b500202a2bb80046b500092a2bb900470100b5000a2bb9004501003d2a1cbd0013b500032a1cbc0ab50002033e1d1ca200332ab400031d2bb80048532ab400021d2bb9004501004fa700143a042ab400021d2bb9004501004f1904bf840301a7ffce2bb9004501003e2a1dbd0013b500042a1dbd0013b500052a1dbc0ab5000603360415041da2002c2ab4000415042bb80048532ab4000515042bb80048532ab4000615042bb9004501004f840401a7ffd42a2bb80049c0004ab5000c2a2bb900450100b5000e2a2bb80046b50011b1
 toData,170,2a2bb7003e2b2ab40020b9003f02002ab400092bb800402b2ab4000ab9004102002b2ab40002beb9003f0200033d1c2ab40002bea2001f2ab400031c322bb800422b2ab400021c2eb9003f0200840201a7ffde2b2ab40006beb9003f0200033d1c2ab40006bea200292ab400041c322bb800422ab400051c322bb800422b2ab400061c2eb9003f0200840201a7ffd42ab4000c2bb800432b2ab4000eb9003f02002ab400112bb80040b1
-
+
 org/apache/geode/distributed/internal/StartupResponseWithVersionMessage,2
 fromData,43,2a2bb7001f2a2bb80020b50004bb001a59b7001b4d2c2bb600212a2cb60022b500062a2cb60023b50009b1
 toData,43,2a2bb700182ab400042bb80019bb001a59b7001b4d2c2ab40006b6001c2c2ab40009b6001d2c2bb6001eb1
-
+
 org/apache/geode/distributed/internal/WaitForViewInstallation,2
 fromData,26,2a2bb7001c2a2bb9001d0100b500162a2bb9001e0100b50017b1
 toData,26,2a2bb700192b2ab40016b9001a03002b2ab40017b9001b0200b1
-
+
 org/apache/geode/distributed/internal/locks/DLockQueryProcessor$DLockQueryMessage,2
 fromData,42,2a2bb700472a2bb80048b500032a2bb80049b500432a2bb9004a0100b500322a2bb9004b0100b50002b1
 toData,42,2a2bb700412ab400032bb800422ab400432bb800442b2ab40032b9004502002b2ab40002b900460200b1
-
+
 org/apache/geode/distributed/internal/locks/DLockQueryProcessor$DLockQueryReplyMessage,2
 fromData,74,2a2bb700092a2bb9000a0100b500032ab4000304a000352bb8000bc0000c4d2cc600152abb000d592c2bb9000a0100b7000eb500042a2bb9000a0100b500062a2bb9000f0100b50007b1
 toData,83,2a2bb700102b2ab40003b9001102002ab4000304a0003e2ab40004c7000b012bb80012a7001b2ab40004b600052bb800122b2ab40004b60013b9001102002b2ab40006b9001102002b2ab40007b900140300b1
-
+
 org/apache/geode/distributed/internal/locks/DLockRecoverGrantorProcessor$DLockRecoverGrantorMessage,2
 fromData,55,2a2bb700122a2bb80013b500022a2bb900140100b500032a2bb900140100b500052a2bb900150100b500042a2bb80016c00017b50006b1
 toData,52,2a2bb700182ab400022bb800192b2ab40003b9001a02002b2ab40005b9001a02002b2ab40004b9001b03002ab400062bb8001cb1
-
+
 org/apache/geode/distributed/internal/locks/DLockRecoverGrantorProcessor$DLockRecoverGrantorReplyMessage,2
 fromData,30,2a2bb700052a2bb900060100b500022a2bb80007c00008c00008b50003b1
 toData,24,2a2bb700092b2ab40002b9000a02002ab400032bb8000bb1
-
+
 org/apache/geode/distributed/internal/locks/DLockReleaseProcessor$DLockReleaseMessage,2
 fromData,52,2a2bb7004e2a2bb8004fb500032a2bb80050b5002b2a2bb900510100b5002a2a2bb900520100b500022a2bb900520100b50041b1
 toData,52,2a2bb700492ab400032bb8004a2ab4002b2bb8004b2b2ab4002ab9004c02002b2ab40002b9004d02002b2ab40041b9004d0200b1
-
+
 org/apache/geode/distributed/internal/locks/DLockReleaseProcessor$DLockReleaseReplyMessage,2
 fromData,24,2a2bb700052a2bb80006b500072a2bb900080100b50003b1
 toData,24,2a2bb700092ab400072bb8000a2b2ab40003b9000b0200b1
-
+
 org/apache/geode/distributed/internal/locks/DLockRemoteToken,2
 fromData,10,bb000e59120fb70010bf
 toData,53,2ab400112bb800252ab40012b600152bb800252b2ab40012b60026b9002702002b2ab40013b9002702002b2ab40014b900280300b1
-
+
 org/apache/geode/distributed/internal/locks/DLockRequestProcessor$DLockRequestMessage,2
 fromData,134,2a2bb700a62a2bb900a70100b500242a2bb800a8b500102a2bb900a90100b500112a2bb900a90100b500122a2bb900a90100b500132a2bb900aa0100b500552a2bb900aa0100b5000e2a2bb900ab0100b5000f2a2bb900ab0100b500142a2bb900ab0100b500152a2bb900a90100b500162a2bb900ab0100b500172a2bb900ab0100b5006cb1
 toData,134,2a2bb700a02b2ab40024b900a102002ab400102bb800a22b2ab40011b900a303002b2ab40012b900a303002b2ab40013b900a303002b2ab40055b900a402002b2ab4000eb900a402002b2ab4000fb900a502002b2ab40014b900a502002b2ab40015b900a502002b2ab40016b900a303002b2ab40017b900a502002b2ab4006cb900a50200b1
-
+
 org/apache/geode/distributed/internal/locks/DLockRequestProcessor$DLockResponseMessage,2
 fromData,72,2a2bb700442a2bb900450100b500032a2bb900460100b500112a2bb80047b500122a2bb900480100b5003f2a2bb80047b500412a2bb900490100b500132a2bb900490100b50043b1
 toData,72,2a2bb7003b2b2ab40003b9003c02002b2ab40011b9003d02002ab400122bb8003e2b2ab4003fb9004003002ab400412bb8003e2b2ab40013b9004202002b2ab40043b900420200b1
-
+
 org/apache/geode/distributed/internal/locks/DLockService$SuspendLockingToken,2
 fromData,1,b1
 toData,1,b1
-
+
 org/apache/geode/distributed/internal/locks/DeposeGrantorProcessor$DeposeGrantorMessage,2
 fromData,55,2a2bb700162a2bb900170100b500092a2bb80018b500042a2bb80019c0001ab500052a2bb9001b0100b500062a2bb900170100b50007b1
 toData,52,2a2bb7001c2b2ab40009b9001d02002ab400042bb8001e2ab400052bb8001f2b2ab40006b9002003002b2ab40007b9001d0200b1
-
+
 org/apache/geode/distributed/internal/locks/ElderInitProcessor$ElderInitMessage,2
 fromData,16,2a2bb7001d2a2bb9001e0100b50005b1
 toData,16,2a2bb7001f2b2ab40005b900200200b1
-
+
 org/apache/geode/distributed/internal/locks/ElderInitProcessor$ElderInitReplyMessage,2
 fromData,38,2a2bb7000e2a2bb8000fb500042a2bb8000fb500052a2bb8000fb500062a2bb8000fb50007b1
 toData,38,2a2bb700102ab400042bb800112ab400052bb800112ab400062bb800112ab400072bb80011b1
-
+
 org/apache/geode/distributed/internal/locks/GrantorRequestProcessor$GrantorInfoReplyMessage,2
 fromData,47,2a2bb700142a2bb80015c00016b500052a2bb900170100b500092a2bb900180100b5000b2a2bb900190100b50007b1
 toData,44,2a2bb7001a2ab400052bb8001b2b2ab40009b9001c03002b2ab4000bb9001d02002b2ab40007b9001e0200b1
-
+
 org/apache/geode/distributed/internal/locks/GrantorRequestProcessor$GrantorRequestMessage,2
 fromData,73,2a2bb700292a2bb9002a0100b500052a2bb9002b0100b500062a2bb8002cb500072a2bb9002b0100b5000b2a2bb9002d0100b500092ab4000904a0000e2a2bb8002ec0002fb50008b1
 toData,70,2a2bb700302b2ab40005b9003103002b2ab40006b9003202002ab400072bb800332b2ab4000bb9003202002b2ab40009b9003402002ab4000904a0000b2ab400082bb80035b1
-
+
 org/apache/geode/distributed/internal/locks/NonGrantorDestroyedProcessor$NonGrantorDestroyedMessage,2
 fromData,24,2a2bb700282a2bb900290100b500082a2bb8002ab50006b1
 toData,24,2a2bb7002b2b2ab40008b9002c02002ab400062bb8002db1
-
+
 org/apache/geode/distributed/internal/locks/NonGrantorDestroyedProcessor$NonGrantorDestroyedReplyMessage,2
 fromData,16,2a2bb700172a2bb900180100b50008b1
 toData,16,2a2bb700192b2ab40008b9001a0200b1
-
+
 org/apache/geode/distributed/internal/membership/InternalDistributedMember,6
 fromData,38,2a2bb6009f2ab6009ab900190100b20095b60078a100112ab400012bb900960200a700044db1
 fromDataPre_GFE_7_1_0_0,282,2bb800a04d2bb900a101003e2a2bb8008fb500072ab2000499000e2c2ab40007b800a2a700072cb60008b500072bb900a3010036041504047e99000704a700040336051504057e99000704a700040336062a1504077e99000704a7000403b5003a2bb900a1010036072bb900a1010036082bb900a3010036092bb800913a0a02360b2bb8008f3a0c1509100da0000e2a2bb8008fb50014a700152bb8008f3a0d190dc6000a190db800a4360b2bb8008f3a0d2bb80092b60093360e190db600779e0011bb004f59190d150eb70050a70004013a0f2a15042bb700943610bb003159150715081509150b190c190a190fb700323a112a2c1d1505150615101911b80018b500012ab40001b9003e01009e000704a7000403b80047b1
@@ -291,1848 +291,1844 @@ fromDataPre_GFE_9_0_0_0,282,2bb800a04d2bb900a101003e2a2bb8008fb500072ab200049900
 toData,34,2a2bb600992ab40001b900190100b20095b60078a1000d2ab6009a2bb9008b0200b1
 toDataPre_GFE_7_1_0_0,271,2ab40001b9003e01009e000704a7000403b800472ab600592bb8009b2b2ab60058b9009c02002ab400072bb80082033d2ab40001b9008301009900071c04803d2ab40001b9007301009900071c05803d2ab4003a9900071c07803d2b1c1100ff7e91b9009d02002b2ab40001b9003d0100b9009c02002b2ab40001b9004b0100b9009c02002b2ab40001b9003e0100b9009d02002ab40001b9004001002bb800852ab40001b9004c01002bb800822ab40001b9003e01003e1d100da0000e2ab400142bb80082a700132ab40001b9003f0100b8009e2bb800822ab40001b9004e01003a041904c70008124da700081904b600862bb800821904c7000911012ca700081904b60087b800882bb80089b1
 toDataPre_GFE_9_0_0_0,266,2ab600592bb8009b2b2ab60058b9009c02002ab400072bb80082033d2ab40001b9008301009900071c04803d2ab40001b9007301009900071c05803d2ab4003a9900071c07803d1c1008803d2b1c1100ff7e91b9009d02002b2ab40001b9003d0100b9009c02002b2ab40001b9004b0100b9009c02002ab40001b9003e01003e2b1db9009d02002ab40001b9004001002bb800852ab40001b9004c01002bb800821d100da0000e2ab400142bb80082a700132ab40001b9003f0100b8009e2bb800822ab40001b9004e01003a041904c70008124da700081904b600862bb800821904c7000911012ca700081904b60087b800882bb800892ab40001b90019010036052b150504b8008ab1
-
+
 org/apache/geode/distributed/internal/membership/NetView,2
 fromData,109,2a2bb8006ec0002bb500132a2bb9006f0100b500082a2bb80070b5000bb200409a00122ab4000bc7000bbb004159b70042bf2abb000c592ab4000bb7000db5000e2a2bb80071b500102a2bb80071b500122a2bb80072b500052bb800734d2cc6000d2ab400042cb900180200b1
 toData,60,2ab400132bb800682b2ab40008b9006902002a2ab4000b2bb7006a2ab400102bb8006b2ab400122bb8006b2ab400052bb8006c2ab400042bb8006db1
-
+
 org/apache/geode/distributed/internal/membership/gms/GMSMember,2
 fromData,62,2a2bb6004b2a2bb9004c0100b500072a2bb9004d0100b5003c2a2bb9004d0100b500092a2bb9004c0100b500082a2bb8004eb5000a2a2bb8004fb5000bb1
 toData,62,2a2bb6003f2b2ab40007b9004002002b2ab4003cb9004102002b2ab40009b9004102002b2ab40008b9004002002ab4000a2bb800422ab4000b2bb80043b1
-
+
 org/apache/geode/distributed/internal/membership/gms/locator/FindCoordinatorRequest,2
 fromData,97,2a2bb8001dc0001ab500022bb9001e01003d2abb001f591cb70020b50005033e1d1ca2001a2ab400052bb8001dc0001ab90021020057840301a7ffe72a2bb9001e0100b500062a2bb9001e0100b500082a2bb80022b500042a2bb80023b50007b1
 toData,114,2ab400022bb800142ab40005c6003a2b2ab40005b900150100b9001602002ab40005b9001701004d2cb9001801009900152cb900190100c0001a4e2d2bb80014a7ffe8a7000a2b03b9001602002b2ab40006b9001602002b2ab40008b9001602002ab400042bb8001b2ab400072bb8001cb1
-
+
 org/apache/geode/distributed/internal/membership/gms/locator/FindCoordinatorResponse,2
 fromData,105,2a2bb80026c00027b500022a2bb80026c00027b500032a2bb80028b5000a2a2bb80029b5000c2a2bb9002a0100b500092ab400099a00342a2bb9002a0100b500042a2bb9002a0100b500072a2bb9002a0100b500082a2bb80026c0002bb500052a2bb8002cb50006b1
 toData,89,2ab400022bb800212ab400032bb800212ab4000a2bb800222ab4000c2bb800232b2ab40009b9002402002b2ab40004b9002402002b2ab40007b9002402002b2ab40008b9002402002ab400052bb800212ab400062bb80025b1
-
+
 org/apache/geode/distributed/internal/membership/gms/locator/GetViewRequest,2
 fromData,1,b1
 toData,1,b1
-
+
 org/apache/geode/distributed/internal/membership/gms/locator/GetViewResponse,2
 fromData,12,2a2bb80005c00006b50002b1
 toData,9,2ab400022bb80004b1
-
+
 org/apache/geode/distributed/internal/membership/gms/messages/HeartbeatMessage,2
 fromData,11,2a2bb900110100b50002b1
 toData,11,2b2ab40002b900100200b1
-
+
 org/apache/geode/distributed/internal/membership/gms/messages/HeartbeatRequestMessage,2
 fromData,22,2a2bb900130100b500022a2bb80014c00015b50003b1
 toData,19,2b2ab40002b9001102002ab400032bb80012b1
-
+
 org/apache/geode/distributed/internal/membership/gms/messages/InstallViewMessage,2
 fromData,49,2a2bb700112a2bb900120100b500072ab800132bb90012010032b500052a2bb80014c00015b500022a2bb80014b50006b1
 toData,45,2a2bb7000d2b2ab40007b9000e02002b2ab40005b6000fb9000e02002ab400022bb800102ab400062bb80010b1
-
+
 org/apache/geode/distributed/internal/membership/gms/messages/JoinRequestMessage,2
 fromData,48,2a2bb8001bc0001cb500042a2bb8001bb500052a2bb8001db500022a2bb9001e0100b6001f2a2bb900200100b50006b1
 toData,45,2ab400042bb800162ab400052bb800162ab400022bb800172b2ab60018b9001902002b2ab40006b9001a0200b1
-
+
 org/apache/geode/distributed/internal/membership/gms/messages/JoinResponseMessage,2
 fromData,47,2a2bb8001bc0001cb500022a2bb8001bc0001db500032a2bb8001eb500072a2bb8001fb500082a2bb8001fb50006b1
 toData,41,2ab400022bb800182ab400032bb800182ab400072bb800192ab400082bb8001a2ab400062bb8001ab1
-
+
 org/apache/geode/distributed/internal/membership/gms/messages/LeaveRequestMessage,2
 fromData,20,2a2bb8000cc0000db500032a2bb8000eb50004b1
 toData,17,2ab400032bb8000a2ab400042bb8000bb1
-
+
 org/apache/geode/distributed/internal/membership/gms/messages/RemoveMemberMessage,2
 fromData,20,2a2bb80015c00016b500032a2bb80017b50004b1
 toData,17,2ab400032bb800132ab400042bb80014b1
-
+
 org/apache/geode/distributed/internal/membership/gms/messages/SuspectMembersMessage,2
 fromData,53,2bb9001b01003d033e1d1ca20029bb0016592bb8001cc0001d2bb8001eb7001f3a042ab400031904b90020020057840301a7ffd8b1
 toData,81,2ab40003c600452b2ab40003b900110100b9001202002ab40003b9001301004d2cb9001401009900202cb900150100c000164e2db600172bb800182db600192bb8001aa7ffdda7000a2b03b900120200b1
-
+
 org/apache/geode/distributed/internal/membership/gms/messages/ViewAckMessage,2
 fromData,37,2a2bb7000e2a2bb9000f0100b500032a2bb900100100b500042a2bb80011c00012b50005b1
 toData,34,2a2bb7000a2b2ab40003b9000b02002b2ab40004b9000c02002ab400052bb8000db1
-
+
 org/apache/geode/distributed/internal/membership/gms/mgr/LocalViewMessage,2
 fromData,8,bb000859b70009bf
 toData,8,bb000859b70009bf
-
+
 org/apache/geode/distributed/internal/streaming/StreamingOperation$RequestStreamingMessage,2
 fromData,16,2a2bb700202a2bb900210100b50003b1
 toData,16,2a2bb700222b2ab40003b900230200b1
-
+
 org/apache/geode/distributed/internal/streaming/StreamingOperation$StreamingReplyMessage,2
 fromData,339,2a2bb700142bb9001501003d2a2bb900150100b500102a2bb900160100b500112a2bb900160100b500032bb800174e2db20018b600199e000704a700040336041c02a0000b2a01b50002a701082a1cb5000e2abb001a591cb7001bb500022ab4000399000704b8001c2ab40008b8001d3a051905c1001e3606013a07150699000d1905c0001eb6001f3a0703360803360915091ca20087b20020c6000cb2002006b900210200150699000fb80022990009043608a700672bb800233a0a150699004a1907c600451907b90024010099003b1504360b150b99001715099a0012190ac100259a000704a7000403360b150b990019bb0026591907c00027190ac00028c00028b700293a0a2ab40002190ab9002a020057840901a7ff7915089900172a04b50004b20020c6000cb2002005b9002102002ab4000399001a03b8001ca700133a0c2ab4000399000703b8001c190cbfb1
 toData,85,2a2bb7002b2ab4000dc7000d2b02b9002c0200a7000d2b2ab4000eb9002c02002b2ab40010b9002c02002b2ab40011b9002d02002b2ab40003b9002d02002ab4000dc600122ab4000e9e000b2ab4000d2bb6002eb1
-
+
 org/apache/geode/distributed/internal/tcpserver/InfoRequest,2
 fromData,1,b1
 toData,1,b1
-
+
 org/apache/geode/distributed/internal/tcpserver/InfoResponse,2
 fromData,9,2a2bb80003b50002b1
 toData,9,2ab400022bb80004b1
-
+
 org/apache/geode/distributed/internal/tcpserver/ShutdownRequest,2
 fromData,1,b1
 toData,1,b1
-
+
 org/apache/geode/distributed/internal/tcpserver/ShutdownResponse,2
 fromData,1,b1
 toData,1,b1
-
+
 org/apache/geode/distributed/internal/tcpserver/VersionRequest,2
 fromData,1,b1
 toData,1,b1
-
+
 org/apache/geode/distributed/internal/tcpserver/VersionResponse,2
 fromData,11,2a2bb900060100b50004b1
 toData,11,2b2ab40004b900050200b1
-
+
 org/apache/geode/internal/DSFIDFactory,2
 fromData,8,bb000259b70003bf
 toData,8,bb000259b70003bf
-
+
 org/apache/geode/internal/InternalDataSerializer$RegistrationMessage,2
 fromData,39,2a2bb700282bb800292a2bb8002ab500042a2bb9002b0100b500062a2bb8002cc00008b50009b1
 toData,32,2a2bb700242ab400042bb800252b2ab40006b9002602002ab400092bb80027b1
-
+
 org/apache/geode/internal/InternalInstantiator$RegistrationContextMessage,2
 fromData,14,2a2bb7001a2a2bb8001bb5000db1
 toData,14,2a2bb7001c2ab4000d2bb8001db1
-
+
 org/apache/geode/internal/InternalInstantiator$RegistrationMessage,3
 fromDataProblem,38,2ab4000bc7000e2abb001b59b7001cb5000b2ab4000b2bb6001d572ab4000b121eb6001d57b1
 fromData,125,2a2bb7001f2a2bb80020b500122a2bb80020b50013b80021c6004f2a2ab40012b80022b50003a7001b4d2ab2002404bd002559032c53b60026b700272a01b500032a2ab40013b80022b50005a7001b4d2ab2002804bd002559032c53b60026b700272a01b500052a2bb900290100b500072a2bb8002ac00009b5000ab1
 toData,46,2a2bb700162ab40003b600172bb800182ab40005b600172bb800182b2ab40007b9001902002ab4000a2bb8001ab1
-
+
 org/apache/geode/internal/ManagerInfo,2
 fromData,61,2a2bb900540100b500182a2bb900540100b500192a2bb900540100b5001a2bb9005501003d1c9e00161cbc084e2b2db9005602002a2db80057b5001bb1
 toData,74,2b2ab40018b9005002002b2ab40019b9005002002b2ab4001ab9005002002ab4001bc7000d2b03b900510200a7001d2ab4001bb600524d2b2cbeb9005102002b2c032cbeb900530400b1
-
+
 org/apache/geode/internal/admin/ClientMembershipMessage,2
 fromData,32,2a2bb7000d2a2bb8000eb500022a2bb8000eb500032a2bb9000f0100b50004b1
 toData,32,2a2bb7000a2ab400022bb8000b2ab400032bb8000b2b2ab40004b9000c0200b1
-
+
 org/apache/geode/internal/admin/remote/AddHealthListenerRequest,2
 fromData,17,2a2bb700102a2bb80011c00012b50007b1
 toData,14,2a2bb7000e2ab400072bb8000fb1
-
+
 org/apache/geode/internal/admin/remote/AddHealthListenerResponse,2
 fromData,16,2a2bb7000c2a2bb9000d0100b50008b1
 toData,16,2a2bb7000a2b2ab40008b9000b0200b1
-
+
 org/apache/geode/internal/admin/remote/AddStatListenerRequest,2
 fromData,26,2a2bb700112a2bb900120100b500042a2bb900130100b50006b1
 toData,26,2a2bb7000e2b2ab40004b9000f03002b2ab40006b900100200b1
-
+
 org/apache/geode/internal/admin/remote/AddStatListenerResponse,2
 fromData,16,2a2bb7000c2a2bb9000d0100b50008b1
 toData,16,2a2bb7000a2b2ab40008b9000b0200b1
-
+
 org/apache/geode/internal/admin/remote/AdminConsoleDisconnectMessage,2
 fromData,34,2a2bb7001a2a2bb9001b0100b500052a2bb9001b0100b500042a2bb8001cb50007b1
 toData,34,2a2bb700172b2ab40005b9001802002b2ab40004b9001802002ab400072bb80019b1
-
+
 org/apache/geode/internal/admin/remote/AdminConsoleMessage,2
 fromData,16,2a2bb7000f2a2bb900100100b50005b1
 toData,16,2a2bb7000d2b2ab40005b9000e0200b1
-
+
 org/apache/geode/internal/admin/remote/AdminFailureResponse,2
 fromData,17,2a2bb700092a2bb8000ac0000bb50005b1
 toData,14,2a2bb700072ab400052bb80008b1
-
+
 org/apache/geode/internal/admin/remote/AdminRequest,2
 fromData,24,2a2bb7002b2a2bb9002c0100b500052a2bb8002db50003b1
 toData,24,2a2bb700282b2ab40005b9002902002ab400032bb8002ab1
-
+
 org/apache/geode/internal/admin/remote/AdminResponse,2
 fromData,16,2a2bb700062a2bb900070100b50002b1
 toData,16,2a2bb700042b2ab40002b900050200b1
-
+
 org/apache/geode/internal/admin/remote/AlertLevelChangeMessage,2
 fromData,16,2a2bb700132a2bb900140100b50004b1
 toData,16,2a2bb700112b2ab40004b900120200b1
-
+
 org/apache/geode/internal/admin/remote/AlertListenerMessage,2
 fromData,69,2a2bb7001f2a2bb900200100b500062a2bb80021c00022b500072a2bb80023b500082a2bb80023b5000a2a2bb900240100b5000b2a2bb80023b5000c2a2bb80023b5000db1
 toData,66,2a2bb7001a2b2ab40006b9001b02002ab400072bb8001c2ab400082bb8001d2ab4000a2bb8001d2b2ab4000bb9001e03002ab4000c2bb8001d2ab4000d2bb8001db1
-
+
 org/apache/geode/internal/admin/remote/AlertsNotificationMessage,2
 fromData,20,2a2bb700052a2bb80006c00007c00007b50003b1
 toData,14,2a2bb700022ab400032bb80004b1
-
+
 org/apache/geode/internal/admin/remote/AppCacheSnapshotMessage,2
 fromData,16,2a2bb700122a2bb900130100b50005b1
 toData,16,2a2bb700102b2ab40005b900110200b1
-
+
 org/apache/geode/internal/admin/remote/BridgeServerRequest,2
 fromData,47,2a2bb700182a2bb900190100b500052a2bb900190100b500062a2bb8001ac0001bb5000a2a2bb900190100b5000fb1
 toData,44,2a2bb700152b2ab40005b9001602002b2ab40006b9001602002ab4000a2bb800172b2ab4000fb900160200b1
-
+
 org/apache/geode/internal/admin/remote/BridgeServerResponse,2
 fromData,28,2a2bb700292a2bb8002ac0000fb5000b2a2bb8002ac00024b50025b1
 toData,22,2a2bb700272ab4000b2bb800282ab400252bb80028b1
-
+
 org/apache/geode/internal/admin/remote/CacheConfigRequest,2
 fromData,36,2a2bb700112a2bb900120100b500032a2bb900130100b500042a2bb900130100b50006b1
 toData,36,2a2bb7000e2b2ab40003b9000f02002b2ab40004b9001002002b2ab40006b900100200b1
-
+
 org/apache/geode/internal/admin/remote/CacheConfigResponse,2
 fromData,28,2a2bb7001c2a2bb8001dc00014b500092a2bb8001dc00017b50018b1
 toData,22,2a2bb7001a2ab400092bb8001b2ab400182bb8001bb1
-
+
 org/apache/geode/internal/admin/remote/CacheInfoRequest,2
 fromData,6,2a2bb7000bb1
 toData,6,2a2bb7000ab1
-
+
 org/apache/geode/internal/admin/remote/CacheInfoResponse,2
 fromData,17,2a2bb7000f2a2bb80010c00008b5000ab1
 toData,14,2a2bb7000d2ab4000a2bb8000eb1
-
+
 org/apache/geode/internal/admin/remote/CancelStatListenerRequest,2
 fromData,16,2a2bb7000d2a2bb9000e0100b50003b1
 toData,16,2a2bb7000b2b2ab40003b9000c0200b1
-
+
 org/apache/geode/internal/admin/remote/CancelStatListenerResponse,2
 fromData,6,2a2bb7000ab1
 toData,6,2a2bb70009b1
-
+
 org/apache/geode/internal/admin/remote/CancellationMessage,2
 fromData,16,2a2bb7000c2a2bb9000d0100b50004b1
 toData,16,2a2bb7000a2b2ab40004b9000b0200b1
-
+
 org/apache/geode/internal/admin/remote/ChangeRefreshIntervalMessage,2
 fromData,16,2a2bb700072a2bb900080100b50003b1
 toData,16,2a2bb700052b2ab40003b900060300b1
-
+
 org/apache/geode/internal/admin/remote/ClientHealthStats,4
 fromData,73,2a2bb80011b500052a2bb80011b500062a2bb80011b500072a2bb80011b500082a2bb80011b500092a2bb80011b5000b2a2bb80012b5000a2a2bb80013b5000c2a2bb80014b50004b1
 fromDataPre_GFE_8_0_0_0,65,2a2bb80011b500052a2bb80011b500062a2bb80011b500072a2bb80011b500082a2bb80011b500092a2bb80011b5000b2a2bb80012b5000a2a2bb80013b5000cb1
 toData,73,2ab400052bb8000d2ab400062bb8000d2ab400072bb8000d2ab400082bb8000d2ab400092bb8000d2ab4000b2bb8000d2ab4000a2bb8000e2ab4000c2bb8000f2ab400042bb80010b1
 toDataPre_GFE_8_0_0_0,65,2ab400052bb8000d2ab400062bb8000d2ab400072bb8000d2ab400082bb8000d2ab400092bb8000d2ab4000b2bb8000d2ab4000a2bb8000e2ab4000c2bb8000fb1
-
+
 org/apache/geode/internal/admin/remote/CompactRequest,2
 fromData,6,2a2bb70029b1
 toData,6,2a2bb7002ab1
-
+
 org/apache/geode/internal/admin/remote/CompactResponse,2
 fromData,14,2a2bb700042a2bb80005b50003b1
 toData,14,2a2bb700062ab400032bb80007b1
-
+
 org/apache/geode/internal/admin/remote/DestroyEntryMessage,2
 fromData,25,2a2bb7001b2a2bb8001cc0001db500042a2bb8001cb50005b1
 toData,22,2a2bb700192ab400042bb8001a2ab400052bb8001ab1
-
+
 org/apache/geode/internal/admin/remote/DestroyRegionMessage,2
 fromData,17,2a2bb7001a2a2bb8001bc0001cb50004b1
 toData,14,2a2bb700182ab400042bb80019b1
-
+
 org/apache/geode/internal/admin/remote/DurableClientInfoRequest,2
 fromData,28,2a2bb7000c2a2bb8000db500032a2bb9000e0100b500042ab80005b1
 toData,24,2a2bb700092ab400032bb8000a2b2ab40004b9000b0200b1
-
+
 org/apache/geode/internal/admin/remote/DurableClientInfoResponse,2
 fromData,16,2a2bb700172a2bb900180100b50002b1
 toData,16,2a2bb700152b2ab40002b900160200b1
-
+
 org/apache/geode/internal/admin/remote/FetchDistLockInfoRequest,2
 fromData,6,2a2bb7000bb1
 toData,6,2a2bb7000ab1
-
+
 org/apache/geode/internal/admin/remote/FetchDistLockInfoResponse,2
 fromData,20,2a2bb7001e2a2bb8001fc00018c00018b50019b1
 toData,14,2a2bb7001c2ab400192bb8001db1
-
+
 org/apache/geode/internal/admin/remote/FetchHealthDiagnosisRequest,2
 fromData,27,2a2bb7000d2bb9000e01003d2bb8000fc000104e2a1c2db70003b1
 toData,24,2a2bb7000a2b2ab40005b9000b02002ab400062bb8000cb1
-
+
 org/apache/geode/internal/admin/remote/FetchHealthDiagnosisResponse,2
 fromData,14,2a2bb7000c2a2bb8000db50008b1
 toData,14,2a2bb7000a2ab400082bb8000bb1
-
+
 org/apache/geode/internal/admin/remote/FetchHostRequest,2
 fromData,6,2a2bb7000bb1
 toData,6,2a2bb7000ab1
-
+
 org/apache/geode/internal/admin/remote/FetchHostResponse,2
 fromData,67,2a2bb700342a2bb80035b500132a2bb80036c00037b5000e2a2bb80036c00014b500262a2bb80036c00014b500192a2bb900380100b5002d2a2bb900390100b50002b1
 toData,58,2a2bb7002f2ab400132bb800302ab4000e2bb800312ab400262bb800312ab400192bb800312b2ab4002db9003203002b2ab40002b900330200b1
-
+
 org/apache/geode/internal/admin/remote/FetchResourceAttributesRequest,2
 fromData,16,2a2bb7000d2a2bb9000e0100b50003b1
 toData,16,2a2bb7000b2b2ab40003b9000c0300b1
-
+
 org/apache/geode/internal/admin/remote/FetchResourceAttributesResponse,2
 fromData,20,2a2bb7000f2a2bb80010c00011c00011b50009b1
 toData,14,2a2bb7000d2ab400092bb8000eb1
-
+
 org/apache/geode/internal/admin/remote/FetchStatsRequest,2
 fromData,14,2a2bb7000c2a2bb8000db50003b1
 toData,14,2a2bb7000a2ab400032bb8000bb1
-
+
 org/apache/geode/internal/admin/remote/FetchStatsResponse,2
 fromData,20,2a2bb700152a2bb80016c00011c00011b5000fb1
 toData,14,2a2bb700132ab4000f2bb80014b1
-
+
 org/apache/geode/internal/admin/remote/FetchSysCfgRequest,2
 fromData,6,2a2bb7000bb1
 toData,6,2a2bb7000ab1
-
+
 org/apache/geode/internal/admin/remote/FetchSysCfgResponse,2
 fromData,17,2a2bb7000d2a2bb8000ec0000fb50009b1
 toData,14,2a2bb7000b2ab400092bb8000cb1
-
+
 org/apache/geode/internal/admin/remote/FlushAppCacheSnapshotMessage,2
 fromData,6,2a2bb70006b1
 toData,6,2a2bb70005b1
-
+
 org/apache/geode/internal/admin/remote/HealthListenerMessage,2
 fromData,27,2a2bb7000e2a2bb9000f0100b500042a2bb80010c00011b50005b1
 toData,24,2a2bb7000b2b2ab40004b9000c02002ab400052bb8000db1
-
+
 org/apache/geode/internal/admin/remote/LicenseInfoRequest,2
 fromData,6,2a2bb7000bb1
 toData,6,2a2bb7000ab1
-
+
 org/apache/geode/internal/admin/remote/LicenseInfoResponse,2
 fromData,17,2a2bb7000b2a2bb8000cc00005b50007b1
 toData,14,2a2bb700092ab400072bb8000ab1
-
+
 org/apache/geode/internal/admin/remote/MissingPersistentIDsRequest,1
 fromData,6,2a2bb7002fb1
-
+
 org/apache/geode/internal/admin/remote/MissingPersistentIDsResponse,2
 fromData,124,2a2bb700072bb9000801003d2abb0009591cb7000ab50002033e1d1ca20024bb000b59b7000c3a0419042bb8000d2ab400021904b9000e020057840301a7ffdd2bb9000801003d2abb0009591cb7000ab50003033e1d1ca20024bb000b59b7000c3a0419042bb8000d2ab400031904b9000e020057840301a7ffddb1
 toData,110,2a2bb7000f2b2ab40002b900100100b9001102002ab40002b9001201004d2cb9001301009900152cb900140100c000154e2d2bb80016a7ffe82b2ab40003b900100100b9001102002ab40003b9001201004d2cb9001301009900152cb900140100c000154e2d2bb80016a7ffe8b1
-
+
 org/apache/geode/internal/admin/remote/ObjectDetailsRequest,2
 fromData,24,2a2bb700182a2bb80019b500032a2bb9001a0100b50004b1
 toData,24,2a2bb700152ab400032bb800162b2ab40004b900170200b1
-
+
 org/apache/geode/internal/admin/remote/ObjectDetailsResponse,2
 fromData,33,2a2bb700192a2bb8001ab5000b2a2bb8001ab5000d2a2bb8001ac0000eb50011b1
 toData,30,2a2bb700172ab4000b2bb800182ab4000d2bb800182ab400112bb80018b1
-
+
 org/apache/geode/internal/admin/remote/ObjectNamesRequest,2
 fromData,6,2a2bb70014b1
 toData,6,2a2bb70013b1
-
+
 org/apache/geode/internal/admin/remote/ObjectNamesResponse,2
 fromData,17,2a2bb700162a2bb80017c00008b5000ab1
 toData,14,2a2bb700142ab4000a2bb80015b1
-
+
 org/apache/geode/internal/admin/remote/PrepareRevokePersistentIDRequest,2
 fromData,35,2a2bb700292abb002a59b7002bb500022ab400022bb8002c2a2bb9002d0100b50003b1
 toData,24,2a2bb7002e2ab400022bb8002f2b2ab40003b900300200b1
-
+
 org/apache/geode/internal/admin/remote/RefreshMemberSnapshotRequest,2
 fromData,6,2a2bb70009b1
 toData,6,2a2bb70008b1
-
+
 org/apache/geode/internal/admin/remote/RefreshMemberSnapshotResponse,2
 fromData,17,2a2bb7000e2a2bb8000fc00008b5000ab1
 toData,14,2a2bb7000c2ab4000a2bb8000db1
-
+
 org/apache/geode/internal/admin/remote/RegionAdminMessage,2
 fromData,14,2a2bb700072a2bb80008b50002b1
 toData,14,2a2bb700052ab400022bb80006b1
-
+
 org/apache/geode/internal/admin/remote/RegionAdminRequest,2
 fromData,14,2a2bb7000d2a2bb8000eb50002b1
 toData,14,2a2bb7000b2ab400022bb8000cb1
-
+
 org/apache/geode/internal/admin/remote/RegionAttributesRequest,2
 fromData,6,2a2bb7000db1
 toData,6,2a2bb7000cb1
-
+
 org/apache/geode/internal/admin/remote/RegionAttributesResponse,2
 fromData,17,2a2bb7000c2a2bb8000dc00005b50008b1
 toData,14,2a2bb7000a2ab400082bb8000bb1
-
+
 org/apache/geode/internal/admin/remote/RegionRequest,2
 fromData,57,2a2bb700142a2bb900150100b500032a2bb900150100b500052a2bb80016b500062a2bb80016b500082a2bb80017c00018b5000b2ab80007b1
 toData,50,2a2bb700102b2ab40003b9001102002b2ab40005b9001102002ab400062bb800122ab400082bb800122ab4000b2bb80013b1
-
+
 org/apache/geode/internal/admin/remote/RegionResponse,2
 fromData,33,2a2bb700262a2bb80027b500162a2bb80027b5001b2a2bb80028c0001db5001eb1
 toData,30,2a2bb700232ab400162bb800242ab4001b2bb800242ab4001e2bb80025b1
-
+
 org/apache/geode/internal/admin/remote/RegionSizeRequest,2
 fromData,6,2a2bb70015b1
 toData,6,2a2bb70014b1
-
+
 org/apache/geode/internal/admin/remote/RegionSizeResponse,2
 fromData,26,2a2bb7000e2a2bb9000f0100b500082a2bb9000f0100b5000ab1
 toData,26,2a2bb7000c2b2ab40008b9000d02002b2ab4000ab9000d0200b1
-
+
 org/apache/geode/internal/admin/remote/RegionStatisticsRequest,2
 fromData,6,2a2bb7000db1
 toData,6,2a2bb7000cb1
-
+
 org/apache/geode/internal/admin/remote/RegionStatisticsResponse,2
 fromData,17,2a2bb7000c2a2bb8000dc00005b50008b1
 toData,14,2a2bb7000a2ab400082bb8000bb1
-
+
 org/apache/geode/internal/admin/remote/RegionSubRegionSizeRequest,2
 fromData,6,2a2bb70010b1
 toData,6,2a2bb7000fb1
-
+
 org/apache/geode/internal/admin/remote/RegionSubRegionsSizeResponse,2
 fromData,27,2a2bb700212a2bb900220100b500062a2bb80023c0000ab50002b1
 toData,24,2a2bb7001e2b2ab40006b9001f02002ab400022bb80020b1
-
+
 org/apache/geode/internal/admin/remote/RemoteBridgeServer,2
 fromData,217,2a2bb900420100b500032a2bb900430100b5000a2a2bb900430100b500102a2bb900420100b500122a2bb900420100b500162a2bb900420100b5000e2a2bb900420100b500182a2bb900420100b5001a2a2bb900420100b500142a2bb80044b600452a2bb80046b600472a2bb80044b600482a2bb80049c0004ab6004b2a2bb8004cb6004d2a2bb900420100b5000c2a2bb900430100b500232ab600412bb900420100b9002902002ab600412bb80044b9002702002bb800444d2cc600102ab600412cb9002b0200a700102ab600412bb80044b9002d0200b1
 toData,217,2b2ab40003b9003b02002b2ab4000ab9003c02002b2ab40010b9003c02002b2ab40012b9003b02002b2ab40016b9003b02002b2ab4000eb9003b02002b2ab40018b9003b02002b2ab4001ab9003b02002b2ab40014b9003b02002ab400052bb8003d2ab4001c2bb8003e2ab400072bb8003d2ab4001f2bb8003f2ab400212bb800402b2ab4000cb9003b02002b2ab40023b9003c02002b2ab60041b900280100b9003b02002ab60041b9002601002bb8003d2ab60041b9002a01002bb8003d2ab60041b9002a0100c700102ab60041b9002c01002bb8003db1
-
+
 org/apache/geode/internal/admin/remote/RemoteCacheInfo,2
 fromData,106,2a2bb80030b500032a2bb900310100b500052a2bb900320100b500072a2bb900310100b500092a2bb900310100b5000b2a2bb900310100b5000d2a2bb900310100b5000f2a2bb80033b500102a2bb80034c00020b500112a2bb80035b500122a2bb900320100b50029b1
 toData,103,2ab400032bb8002a2b2ab40005b9002b02002b2ab40007b9002c02002b2ab40009b9002b02002b2ab4000bb9002b02002b2ab4000db9002b02002b2ab4000fb9002b02002ab400102bb8002d2ab400112bb8002e2ab400122bb8002f2b2ab40029b9002c0200b1
-
+
 org/apache/geode/internal/admin/remote/RemoteCacheStatistics,2
 fromData,51,2a2bb900100100b500032a2bb900100100b500052a2bb900100100b500072a2bb900100100b500092a2bb900110100b5000bb1
 toData,51,2b2ab40003b9000e03002b2ab40005b9000e03002b2ab40007b9000e03002b2ab40009b9000e03002b2ab4000bb9000f0200b1
-
+
 org/apache/geode/internal/admin/remote/RemoteDLockInfo,2
 fromData,76,2a2bb80017b500022a2bb80017b500052a2bb80017b500032a2bb900180100b500062a2bb900190100b500092a2bb8001ac0001bb500072a2bb9001c0100b5000c2a2bb9001c0100b5000bb1
 toData,73,2ab400022bb800122ab400052bb800122ab400032bb800122b2ab40006b9001302002b2ab40009b9001402002ab400072bb800152b2ab4000cb9001603002b2ab4000bb900160300b1
-
+
 org/apache/geode/internal/admin/remote/RemoteEntrySnapshot,2
 fromData,36,2a2bb80023b500052a2bb80023b500112a2bb80023c00014b500172a2bb80023b50013b1
 toData,33,2ab400052bb800222ab400112bb800222ab400172bb800222ab400132bb80022b1
-
+
 org/apache/geode/internal/admin/remote/RemoteObjectName,2
 fromData,27,2a2bb80013b500042a2bb80013b500062a2bb900140100b50008b1
 toData,27,2ab400042bb800112ab400062bb800112b2ab40008b900120200b1
-
+
 org/apache/geode/internal/admin/remote/RemoteRegionAttributes,2
 fromData,404,2a2bb80080b500082a2bb80080b5000a2a2bb80081b5000d2a2bb80080b5007a2a2bb80082c00083b5000f2a2bb80082c00083b500112a2bb80082c00084b500132a2bb80082c00084b500152a2bb80082c00084b500172a2bb80080b500192a2bb80082c00084b5001b2a2bb80080b5001d2a2bb80082c00085b5001f2a2bb80082c00086b500212a2bb900870100b500232a2bb900870100b500252a2bb900880100b500272a2bb900890100b5002b2a2bb900880100b5002d2a2bb900870100b5002f2a2bb900870100b500312a2bb900870100b500332a2bb900870100b500352a2bb900870100b500372a2bb80082c0008ab5003b2a2bb80082c0008bc0008bb5003d2a2bb80082c0008cc0008cb5003f2a2bb900870100b5007f2a2bb80082c0008db500412a2bb80082c0008eb500432a2bb80082c0008fb500452a2bb80082c00002b500042a2bb900870100b500472a2bb80080b500392a2bb900870100b5004b2a2bb80081b5004e2a2bb900870100b500052a2bb900870100b500292a2bb80080b500522a2bb900870100b50054b1
 toData,353,2ab400082bb800782ab4000a2bb800782ab4000d2bb800792ab4007a2bb800782ab4000f2bb8007b2ab400112bb8007b2ab400132bb8007b2ab400152bb8007b2ab400172bb8007b2ab400192bb800782ab4001b2bb8007b2ab4001d2bb800782ab4001f2bb8007b2ab400212bb8007b2b2ab40023b9007c02002b2ab40025b9007c02002b2ab40027b9007d02002b2ab4002bb9007e02002b2ab4002db9007d02002b2ab4002fb9007c02002b2ab40031b9007c02002b2ab40033b9007c02002b2ab40035b9007c02002b2ab40037b9007c02002ab4003b2bb8007b2ab4003d2bb8007b2ab4003f2bb8007b2b2ab4007fb9007c02002ab400412bb8007b2ab400432bb8007b2ab400452bb8007b2ab400042bb8007b2b2ab40047b9007c02002ab400392bb800782b2ab4004bb9007c02002ab4004e2bb800792b2ab40005b9007c02002b2ab40029b9007c02002ab400522bb800782b2ab40054b9007c0200b1
-
+
 org/apache/geode/internal/admin/remote/RemoteRegionSnapshot,2
 fromData,59,2a2bb80029b500032a2bb8002ac00009b5000c2a2bb8002ac00005b500072a2bb9002b0100b500102a2bb9002b0100b500122a2bb8002ab5001cb1
 toData,53,2ab400032bb800262ab4000c2bb800272ab400072bb800272b2ab40010b9002802002b2ab40012b9002802002ab4001c2bb80027b1
-
+
 org/apache/geode/internal/admin/remote/RemoteStat,2
 fromData,66,2a2bb8002ab500052a2bb9002b0100b5000e2a2bb9002c0100b500032a2bb8002ab500072a2bb8002ab5000b2a2bb8002dc0002eb500102a2bb9002f0100b50009b1
 toData,63,2ab400052bb800252b2ab4000eb9002602002b2ab40003b9002702002ab400072bb800252ab4000b2bb800252ab400102bb800282b2ab40009b900290200b1
-
+
 org/apache/geode/internal/admin/remote/RemoteStatResource,2
 fromData,45,2a2bb9001a0100b500032a2bb9001a0100b500052a2bb8001bb500072a2bb8001bb5000a2a2bb8001bb5000cb1
 toData,45,2b2ab40003b9001803002b2ab40005b9001803002ab400072bb800192ab4000a2bb800192ab4000c2bb80019b1
-
+
 org/apache/geode/internal/admin/remote/RemoveHealthListenerRequest,2
 fromData,16,2a2bb7000d2a2bb9000e0100b50003b1
 toData,16,2a2bb7000b2b2ab40003b9000c0200b1
-
+
 org/apache/geode/internal/admin/remote/RemoveHealthListenerResponse,2
 fromData,6,2a2bb70008b1
 toData,6,2a2bb70007b1
-
+
 org/apache/geode/internal/admin/remote/ResetHealthStatusRequest,2
 fromData,16,2a2bb7000d2a2bb9000e0100b50003b1
 toData,16,2a2bb7000b2b2ab40003b9000c0200b1
-
+
 org/apache/geode/internal/admin/remote/ResetHealthStatusResponse,2
 fromData,6,2a2bb7000ab1
 toData,6,2a2bb70009b1
-
+
 org/apache/geode/internal/admin/remote/RevokePersistentIDRequest,2
 fromData,25,2a2bb7001d2abb001e59b7001fb500022ab400022bb80020b1
 toData,14,2a2bb700212ab400022bb80022b1
-
+
 org/apache/geode/internal/admin/remote/RootRegionRequest,2
 fromData,6,2a2bb7000bb1
 toData,6,2a2bb7000ab1
-
+
 org/apache/geode/internal/admin/remote/RootRegionResponse,2
 fromData,28,2a2bb700232a2bb80024c00019b5001a2a2bb80024c00019b5001bb1
 toData,22,2a2bb700212ab4001a2bb800222ab4001b2bb80022b1
-
+
 org/apache/geode/internal/admin/remote/ShutdownAllGatewayHubsRequest,2
 fromData,16,2a2bb700072a2bb900080100b50005b1
 toData,16,2a2bb700092b2ab40005b9000a0200b1
-
+
 org/apache/geode/internal/admin/remote/ShutdownAllRequest,2
 fromData,6,2a2bb70044b1
 toData,6,2a2bb70045b1
-
+
 org/apache/geode/internal/admin/remote/ShutdownAllResponse,2
 fromData,16,2a2bb700072a2bb900080100b50002b1
 toData,16,2a2bb700052b2ab40002b900060200b1
-
+
 org/apache/geode/internal/admin/remote/SnapshotResultMessage,2
 fromData,27,2a2bb7000e2a2bb8000fc00010b500062a2bb900110100b50007b1
 toData,24,2a2bb7000b2ab400062bb8000c2b2ab40007b9000d0200b1
-
+
 org/apache/geode/internal/admin/remote/StatAlertsManagerAssignMessage,2
 fromData,30,2a2bb700102a2bb900110100b500052a2bb80012c00013c00013b50002b1
 toData,24,2a2bb7000d2b2ab40005b9000e03002ab400022bb8000fb1
-
+
 org/apache/geode/internal/admin/remote/StatListenerMessage,2
 fromData,86,2a2bb700112a2bb900120100b500042a2bb900130100b500052a2ab40005bc0ab500062a2ab40005bc07b50007033d1c2ab40005a200212ab400061c2bb9001301004f2ab400071c2bb90014010052840201a7ffddb1
 toData,66,2a2bb7000d2b2ab40004b9000e03002b2ab40005b9000f0200033d1c2ab40005a200212b2ab400061c2eb9000f02002b2ab400071c31b900100300840201a7ffddb1
-
+
 org/apache/geode/internal/admin/remote/StoreSysCfgRequest,2
 fromData,17,2a2bb7000d2a2bb8000ec0000fb50003b1
 toData,14,2a2bb7000b2ab400032bb8000cb1
-
+
 org/apache/geode/internal/admin/remote/StoreSysCfgResponse,2
 fromData,6,2a2bb7000eb1
 toData,6,2a2bb7000db1
-
+
 org/apache/geode/internal/admin/remote/SubRegionRequest,2
 fromData,6,2a2bb7000db1
 toData,6,2a2bb7000cb1
-
+
 org/apache/geode/internal/admin/remote/SubRegionResponse,2
 fromData,34,2a2bb7001f2a2bb80020c00013c00013b500142a2bb80020c00013c00013b50015b1
 toData,22,2a2bb7001d2ab400142bb8001e2ab400152bb8001eb1
-
+
 org/apache/geode/internal/admin/remote/TailLogRequest,2
 fromData,6,2a2bb7000bb1
 toData,6,2a2bb7000ab1
-
+
 org/apache/geode/internal/admin/remote/TailLogResponse,2
 fromData,22,2a2bb7001a2a2bb8001bb5000d2a2bb8001bb5000ab1
 toData,22,2a2bb700182ab4000d2bb800192ab4000a2bb80019b1
-
+
 org/apache/geode/internal/admin/remote/UpdateAlertDefinitionMessage,2
 fromData,30,2a2bb700092a2bb9000a0100b500042a2bb8000bc0000cc0000cb50003b1
 toData,24,2a2bb700062b2ab40004b9000702002ab400032bb80008b1
-
+
 org/apache/geode/internal/admin/remote/VersionInfoRequest,2
 fromData,6,2a2bb7000bb1
 toData,6,2a2bb7000ab1
-
+
 org/apache/geode/internal/admin/remote/VersionInfoResponse,2
 fromData,14,2a2bb7000a2a2bb8000bb50006b1
 toData,14,2a2bb700082ab400062bb80009b1
-
+
 org/apache/geode/internal/admin/statalerts/BaseDecoratorImpl,2
 fromData,12,2a2bb8002bc0002cb50002b1
 toData,9,2ab400022bb8002ab1
-
+
 org/apache/geode/internal/admin/statalerts/DummyStatisticInfoImpl,2
 fromData,31,2a2bb900100100b500022a2bb900100100b500032a2bb900100100b50004b1
 toData,31,2b2ab40002b9000f02002b2ab40003b9000f02002b2ab40004b9000f0200b1
-
+
 org/apache/geode/internal/admin/statalerts/FunctionDecoratorImpl,2
 fromData,14,2a2bb700212a2bb80022b50002b1
 toData,14,2a2bb7001f2ab400022bb80020b1
-
+
 org/apache/geode/internal/admin/statalerts/GaugeThresholdDecoratorImpl,2
 fromData,28,2a2bb700252a2bb80026c00027b500032a2bb80026c00027b50004b1
 toData,22,2a2bb700232ab400032bb800242ab400042bb80024b1
-
+
 org/apache/geode/internal/admin/statalerts/MultiAttrDefinitionImpl,2
 fromData,31,2a2bb8002fb500032a2bb80030b500072a2bb80031c00032c00032b50009b1
 toData,25,2ab400032bb8002c2ab400072bb8002d2ab400092bb8002eb1
-
+
 org/apache/geode/internal/admin/statalerts/NumberThresholdDecoratorImpl,2
 fromData,25,2a2bb700262a2bb80027c00028b500032a2bb80029b50004b1
 toData,22,2a2bb700232ab400032bb800242ab400042bb80025b1
-
+
 org/apache/geode/internal/admin/statalerts/SingleAttrDefinitionImpl,2
 fromData,28,2a2bb80031b500032a2bb80032b500072a2bb80033c00020b50002b1
 toData,25,2ab400032bb8002e2ab400072bb8002f2ab400022bb80030b1
-
+
 org/apache/geode/internal/admin/statalerts/StatisticInfoImpl,2
 fromData,1,b1
 toData,1,b1
-
+
 org/apache/geode/internal/cache/AbstractRegion,2
 fromData,11,bb017359130174b70175bf
 toData,6,2a2bb80172b1
-
+
 org/apache/geode/internal/cache/AbstractUpdateOperation$AbstractUpdateMessage,2
 fromData,16,2a2bb700182a2bb900190100b5000cb1
 toData,16,2a2bb7001a2b2ab4000cb9001b0300b1
-
+
 org/apache/geode/internal/cache/AddCacheServerProfileMessage,2
 fromData,16,2a2bb7002f2a2bb900300100b50008b1
 toData,16,2a2bb7002d2b2ab40008b9002e0200b1
-
+
 org/apache/geode/internal/cache/BucketAdvisor$BucketProfile,2
 fromData,36,2a2bb700132a2bb900140100b500042a2bb900140100b500062a2bb900140100b50012b1
 toData,36,2a2bb700152b2ab40004b9001602002b2ab40006b9001602002b2ab40012b900160200b1
-
+
 org/apache/geode/internal/cache/BucketAdvisor$ServerBucketProfile,2
 fromData,22,2a2bb700162a2bb80017b500052a2bb80018b50004b1
 toData,22,2a2bb700192ab400052bb8001a2ab400042bb8001bb1
-
+
 org/apache/geode/internal/cache/BucketServerLocation,2
 fromData,39,2a2bb700062a2bb80007b60008b500032a2bb80009b6000ab500042a2bb8000bb6000cb50005b1
 toData,39,2a2bb7000d2ab40003b8000e2bb8000f2ab40004b800102bb800112ab40005b800122bb80013b1
-
+
 org/apache/geode/internal/cache/BucketServerLocation66,2
 fromData,93,2a2bb7000a2a2bb8000bb6000cb500032a2bb8000db6000eb500042a2bb8000fb60010b500052a2bb900110100b500092a2ab40009bd0006b500072ab400099e001d033d1c2ab40009a200132ab400071c2bb8001253840201a7ffebb1
 toData,91,2a2bb700132ab40003b800142bb800152ab40004b800162bb800172ab40005b800182bb800192b2ab40009b9001a02002ab400099e00262ab400074d2cbe3e03360415041da200152c1504323a0519052bb8001b840401a7ffebb1
-
+
 org/apache/geode/internal/cache/CacheDistributionAdvisor$CacheProfile,2
 fromData,113,2a2bb700692bb9006a01003d2a1cb6001e2a1cb7006b9900162abb006c59b7006db500282ab400282bb8006e2a1cb7006f99000e2a2bb80070c00071b5000e2a1cb7007299000e2a2bb80070c00071b5000f2a2bb80070c00073b500192a1cb7007499000e2a2bb80070c00075b50018b1
 toData,101,2a2bb700612b2ab6001db9006202002ab40028c6000b2ab400282bb800632ab4000eb9002e01009a000c2a2ab4000e2bb700642ab4000fb9002e01009a000c2a2ab4000f2bb700642ab400192bb800652ab40018b9003201009a000b2ab400182bb80065b1
-
+
 org/apache/geode/internal/cache/CacheServerAdvisor$CacheServerProfile,2
 fromData,53,2a2bb700112a2bb80012b500042a2bb900130100b500062abb001459b70015b500052ab400052bb800162a2bb900170100b60018b1
 toData,42,2a2bb7000b2ab400042bb8000c2b2ab40006b9000d02002ab400052bb8000e2b2ab6000fb900100300b1
-
+
 org/apache/geode/internal/cache/ClientRegionEventImpl,2
 fromData,14,2a2bb700142a2bb80015b60003b1
 toData,14,2a2bb700122ab6000d2bb80013b1
-
+
 org/apache/geode/internal/cache/CloseCacheMessage,2
 fromData,16,2a2bb700172a2bb900180100b50002b1
 toData,16,2a2bb700192b2ab40002b9001a0200b1
-
+
 org/apache/geode/internal/cache/ControllerAdvisor$ControllerProfile,2
 fromData,6,2a2bb70007b1
 toData,6,2a2bb70006b1
-
+
 org/apache/geode/internal/cache/CreateRegionProcessor$CreateRegionMessage,2
 fromData,45,2a2bb7009f2a2bb800a0b5000a2a2bb800a1c00055b500432a2bb900a20100b500032a2bb900a30100b50065b1
 toData,42,2a2bb700a52ab4000a2bb800a62ab400432bb800a72b2ab40003b900a802002b2ab40065b900a90200b1
-
+
 org/apache/geode/internal/cache/CreateRegionProcessor$CreateRegionReplyMessage,2
 fromData,161,2a2bb700062bb90007010099000e2a2bb80008c00009b5000a2bb9000b01003d1c9a000b2a01b5000ca700352abb000d591cb7000eb5000c033e1d1ca20022bb000f59b700103a0419042bb800112ab4000c1904b6001257840301a7ffdf2bb90007010099000c2a2b03b80013b500142bb9000701009900162abb001559b70016b500172ab400172bb800112a2bb900070100b500182a2bb900190100b50004b1
 toData,191,2a2bb7001a2b2ab4000ac6000704a7000403b9001b02002ab4000ac6000b2ab4000a2bb8001c2ab4000cc7000d2b03b9001d0200a700322ab4000cb6001e3d2b1cb9001d0200033e1d1ca2001c2ab4000c1db6001fc0000f3a0419042bb80020840301a7ffe52ab40014c600192b04b9001b02002b2ab40014c0002103b80022a7000a2b03b9001b02002ab40017c600152b04b9001b02002ab400172bb80020a7000a2b03b9001b02002b2ab40018b9001b02002b2ab40004b900230300b1
-
+
 org/apache/geode/internal/cache/DestroyOperation$DestroyMessage,2
 fromData,45,2a2bb700322a2bb80033c00034b500022a2bb80033b500272bb800354d2cb6003699000b2a2bb80037b50005b1
 toData,118,2a2bb700382ab400022bb800392ab400272bb800392ab40003b6003ac000084d2cc1003b99002e2cb6003c4e2db6003d990018b2003e2bb8003f2ab40003b600402bb80041a7000ab200422bb8003fa700262cb60043990018b2003e2bb8003f2ab40003b600402bb80041a7000ab200422bb8003fb1
-
+
 org/apache/geode/internal/cache/DestroyOperation$DestroyWithContextMessage,2
 fromData,14,2a2bb700102a2bb80011b50008b1
 toData,14,2a2bb700122ab400082bb80013b1
-
+
 org/apache/geode/internal/cache/DestroyPartitionedRegionMessage,2
 fromData,76,2a2bb700432a2bb80044b500062a2bb900450100b80046b500082a2bb900470100b5000a2bb9004701003d2a1cbc0ab5000d033e1d1ca200152ab4000d1d2bb9004701004f840301a7ffecb1
 toData,77,2a2bb700482ab400062bb800492b2ab40008b4004ab9004b02002b2ab4000ab9004c02002b2ab4000dbeb9004c0200033d1c2ab4000dbea200152b2ab4000d1c2eb9004c0200840201a7ffe8b1
-
+
 org/apache/geode/internal/cache/DestroyRegionOperation$DestroyRegionMessage,2
 fromData,41,2a2bb700582a2bb80059c0005ab500082a2bb8005bb500112a2bb8005cb5001f2a2bb8005db50029b1
 toData,38,2a2bb7005e2ab400082bb8005f2ab400112bb800602ab4001f2bb800612ab400292bb80062b1
-
+
 org/apache/geode/internal/cache/DestroyRegionOperation$DestroyRegionWithContextMessage,2
 fromData,14,2a2bb7000e2a2bb8000fb50006b1
 toData,14,2a2bb700102ab400062bb80011b1
-
+
 org/apache/geode/internal/cache/DistTXCommitMessage,2
 fromData,14,2a2bb700302a2bb80031b50003b1
 toData,14,2a2bb700322ab400032bb80033b1
-
+
 org/apache/geode/internal/cache/DistTXCommitMessage$DistTXCommitReplyMessage,2
 fromData,17,2a2bb700192a2bb8001ac0001bb50004b1
 toData,14,2a2bb700172ab400042bb80018b1
-
+
 org/apache/geode/internal/cache/DistTXPrecommitMessage,2
 fromData,14,2a2bb700332a2bb80034b5000ab1
 toData,14,2a2bb700312ab4000a2bb80032b1
-
+
 org/apache/geode/internal/cache/DistTXPrecommitMessage$DistTXPrecommitReplyMessage,2
 fromData,17,2a2bb700192a2bb8001ac0001bb50004b1
 toData,14,2a2bb700172ab400042bb80018b1
-
+
 org/apache/geode/internal/cache/DistTXPrecommitMessage$DistTxPrecommitResponse,2
 fromData,17,2a2bb80008b500032a2bb80009b50004b1
 toData,17,2ab400032bb800062ab400042bb80007b1
-
+
 org/apache/geode/internal/cache/DistTXRollbackMessage,2
 fromData,6,2a2bb70015b1
 toData,6,2a2bb70016b1
-
+
 org/apache/geode/internal/cache/DistTXRollbackMessage$DistTXRollbackReplyMessage,2
 fromData,14,2a2bb700192a2bb8001ab50004b1
 toData,14,2a2bb700172ab400042bb80018b1
-
+
 org/apache/geode/internal/cache/DistributedCacheOperation$CacheOperationMessage,2
 fromData,291,2bb9009601003d2bb9009601003e2a1cb500972a1c2bb600982a2bb80099b500102a2bb9009a0100b8009bb500092a1c1100807e99000704a7000403b500042a1c10087e99000704a7000403b500581c1102007e99000b2a2bb8009cb500892a1c1104007e99000704a7000403b500072a1c10407e99000704a7000403b5001e2ab4001e9900382bb9009a0100360415049a000b2a03b5001fa7001b150404a0000b2a04b5001fa7000dbb009d59129eb7009fbf2a2bb800a0b500201c1101007e99000704a700040336042a1c1108007e99000704a7000403b500a115049900162abb00a259b700a3b5000e2ab4000e2bb800a41c1110007e99001c1c1120007e99000704a700040336052a15052bb800a5b5000a1d1104007e9900082a04b5000fb1
 toData,202,033d033e2a1cb600a63d2a1db600a73e2b1cb900a802002b1db900a802002ab4000d9e000d2b2ab4000db900a902002ab400102bb800aa2b2ab40009b400abb900ac02002ab40089c6000b2ab400892bb800ad2ab4001e9900542b2ab4001f99000704a7000403b900ac02002ab4001fb800ae36042ab4001f9a001f2ab40020c10021990015013a052ab40020c00021c000213a06a7000c2ab400203a05013a061504190519062bb800af2ab4000ec6000b2ab4000e2bb800b02ab4000ac6000b2ab4000a2bb800b0b1
-
+
 org/apache/geode/internal/cache/DistributedClearOperation$ClearRegionMessage,2
 fromData,53,2a2bb700212ab800222bb90023010032b500022a2bb80024c00025b500062a2bb80024c00026b500172a2bb80024c00027b50011b1
 toData,43,2a2bb700282b2ab40002b60016b9002902002ab400062bb8002a2ab400172bb8002a2ab400112bb8002ab1
-
+
 org/apache/geode/internal/cache/DistributedClearOperation$ClearRegionWithContextMessage,2
 fromData,14,2a2bb7000e2a2bb8000fb50006b1
 toData,14,2a2bb700102ab400062bb80011b1
-
+
 org/apache/geode/internal/cache/DistributedPutAllOperation$EntryVersionsList,2
 fromData,268,2bb9002201003d1c077e07a0000704a70004033e1c10207e1020a0000704a70004033604b80012b20013b900140200990011b8001212231cb80016b9002403001d9900ca2bb80025883605b80012b20013b900140200990015b80012b2001312261505b80027b900280400bb0029591505b700023a0603360715071505a2008e2bb90022010036081508aa000000007b00000000000000030000001e0000002700000035000000542a01b6000757a700572a15042bb8002ab6000757a7004915042bb8002a3a0919061909b6001cb9002b0200572a1909b6000757a7002a15042bb8002a3a092bb8002588360a19091906150ab9002c0200c0002db600212a1909b6000757840701a7ff71b1
 toData,289,033d033e2ab600089e003d1c07803d043e2ab6000b3a041904b9000c01009900271904b9000d0100c0000a3a051905c600131905c1001199000e1c1020803da70006a7ffd5b80012b20013b900140200990015b80012b2001312152a1cb80016b9001705002b1cb9001802001d9900b32ab60008852bb80019bb001a592ab60008b7001b3a040336052ab6000b3a061906b9000c010099008a1906b9000d0100c0000a3a071907c7000d2b03b900180200a7006c1907b6001c3a081908c700132b04b90018020019072bb8001da7005019041908b6001e360915099a00242b05b90018020084050115053609190419081509b6001f5719072bb8001da700212b06b90018020019072b03b6002019071908b6002115090464852bb80019a7ff72b1
-
+
 org/apache/geode/internal/cache/DistributedPutAllOperation$PutAllEntryData,1
 toData,229,2ab4000a4d2ab4000c4e2c2bb8003d2dc1003e9a00072dc700182b03b9003f02002dc0003ec0003e2bb80040a700312dc1004199001e2dc000413a042b04b9003f02001904b9004201002bb80040a7000f2b04b9003f02002d2bb800432b2ab40012b40044b9003f02002ab4000636042ab40026c6000a150407809136042ab40017c6001d15041008809136042ab40017c1004599000b150410208091360415041080809136042b1504b9003f02002ab40026c6000b2ab400262bb8003d2ab40017c6000b2ab400172bb800462ab6002899000b2ab400142bb800462ab400082bb80047b1
-
+
 org/apache/geode/internal/cache/DistributedPutAllOperation$PutAllMessage,2
 fromData,197,2a2bb700392a2bb8003ac0003bb500052a2bb8003c88b500152a2ab40015bd003db500062ab400159e00722bb8003e4dbb003f59b700404e03360415042ab40015a200202ab400061504bb003d592b2ab4000515042c2db7004153840401a7ffdd2bb9004201003604150499002f2bb800433a0503360615062ab40015a2001d2ab4000615063219051506b60044c00045b5002b840601a7ffe02ab400461140007e99000e2a2bb8003ac00047b5000b2a2ab400461180007e99000704a7000403b5001ab1
 toData,165,2a2bb700482ab400052bb800492ab40015852bb8004a2ab400159e007bbb004b592ab40015b7004c4d033e03360415042ab40015a200511d9a00122ab40006150432b4002bc60005043e2ab40006150432b4002b3a052c1905b6004d572ab4000615043201b5002b2ab400061504322bb6004e2ab400061504321905b5002b840401a7ffac2b1db9004f02001d9900082c2bb800502ab4000bc6000b2ab4000b2bb80049b1
-
+
 org/apache/geode/internal/cache/DistributedRegionFunctionStreamingMessage,2
 fromData,171,2a2bb700632bb9006401003d1c047e9900142a2bb900650100b500092ab40009b800661c077e99000d2a2bb900650100b500061c057e99000e2a2bb80067c00068b500072bb800674e2dc100699900252a03b5000e2a2dc00069b8006ab500082ab40008c7001b2a2dc00069b5004ea700102a2dc0006bb500082a04b5000e2a2bb80067c0006cb5000a2a2bb8006db5000c2a2bb8006eb5000b2a1c10407e99000704a7000403b5000db1
 toData,173,2a2bb70070033d2ab400099900081c0480933d2ab40006029f00081c0780933d2ab40007c600081c0580933d2ab4000d9900091c104080933d2b1cb9007102002ab4000999000d2b2ab40009b9007202002ab40006029f000d2b2ab40006b9007202002ab40007c6000b2ab400072bb800732ab4000e99000e2ab400082bb80073a700102ab40008b9005901002bb800732ab4000a2bb800732ab4000cc000742bb800752ab4000b2bb80076b1
-
+
 org/apache/geode/internal/cache/DistributedRemoveAllOperation$RemoveAllEntryData,1
 toData,136,2ab4000a4d2c2bb8003f2b2ab40010b40040b9004102002ab400063e2ab40022c600081d0780913e2ab40015c600191d100880913e2ab40015c100429900091d102080913e1d108080913e2b1db9004102002ab40022c6000b2ab400222bb8003f2ab40015c6000b2ab400152bb800432ab6002499000b2ab400122bb800432ab400082bb80044b1
-
+
 org/apache/geode/internal/cache/DistributedRemoveAllOperation$RemoveAllMessage,2
 fromData,197,2a2bb700382a2bb80039c0003ab500052a2bb8003b88b500152a2ab40015bd003cb500062ab400159e00722bb8003d4dbb003e59b7003f4e03360415042ab40015a200202ab400061504bb003c592b2ab4000515042c2db7004053840401a7ffdd2bb9004101003604150499002f2bb800423a0503360615062ab40015a2001d2ab4000615063219051506b60043c00044b5002b840601a7ffe02ab400451140007e99000e2a2bb80039c00046b5000b2a2ab400451180007e99000704a7000403b5001ab1
 toData,165,2a2bb700472ab400052bb800482ab40015852bb800492ab400159e007bbb004a592ab40015b7004b4d033e03360415042ab40015a200511d9a00122ab40006150432b4002bc60005043e2ab40006150432b4002b3a052c1905b6004c572ab4000615043201b5002b2ab400061504322bb6004d2ab400061504321905b5002b840401a7ffac2b1db9004e02001d9900082c2bb8004f2ab4000bc6000b2ab4000b2bb80048b1
-
+
 org/apache/geode/internal/cache/DistributedTombstoneOperation$TombstoneMessage,2
 fromData,125,2a2bb700172ab800182bb90019010032b5001a2bb9001b01003d2abb001c591cb7001db500122bb9001e01003e03360415041ca2003e1d990019bb001f59b700203a0619062bb8002119063a05a700092bb800223a052ab4001219052bb900230100b80024b90025030057840401a7ffc22a2bb80026c00027b50004b1
 toData,227,2a2bb700282b2ab4001ab60029b9002a02002b2ab40012b9002b0100b9002c0200033d122d4e2ab40012b9002e01009a00252ab40012b9002f0100b900300100b900310100c000323a041904c1001f990005043d2b1cb9003302002ab40012b900340100b9003001003a041904b9003501009900681904b900310100c000363a051905b900370100c000323a061906c1001f99001c1c9a000cbb0038592db70039bf1906c0001f2bb8003aa700191c99000cbb0038592db70039bf1906c0003b2bb6003c2b1905b9003d0100c0003eb6003fb900400300a7ff942ab400042bb80041b1
-
+
 org/apache/geode/internal/cache/DynamicRegionAttributes,2
 fromData,17,2a2bb80005b500022a2bb80005b50003b1
 toData,17,2ab400022bb800042ab400032bb80004b1
-
+
 org/apache/geode/internal/cache/EntryEventImpl,2
 fromData,216,2a2bb80016c00017b500182bb800164d2bb800164e2abb0019592c2d01b7001ab5001b2a2bb9001c0100b8001db5001e2a2bb9001f0100b500082ab4001b2bb80016b600202a2bb80016c00021b500092bb900220100990013b200239a003cbb0024591225b70026bf2bb9002201009900212a2bb80027b500282a2ab40028b500062a2ab40028b80029b50005a7000b2a2bb80016b500052bb9002201009900192a2bb80027b5002a2a2ab4002ab80029b50007a7000b2a2bb80016b500072a2bb8002bb5002c2a2bb8002db5000a2a2bb8002eb50013b1
 toData,279,2ab400182bb801472ab600892bb801472ab4001bb601742bb801472b2ab4001eb40175b9017602002b2ab4000811c03f7eb9017702002ab600492bb801472ab400092bb801472b03b9017802002ab6003f4d2cc100803e1d99000d2cc00080b900a901003e2b1db9017802001d99003b2ab40028c6000e2ab400282bb80179a7002e2ab40006c6000e2ab400062bb80179a7001c2cc000803a041904b900b701002bb8017aa700082c2bb801472ab700414d2cc100803e1d99000d2cc00080b900a901003e2b1db9017802001d9900292ab4002ac6000e2ab4002a2bb80179a7001c2cc000803a041904b900b701002bb8017aa700082c2bb801472ab4002cc0017b2bb8017c2ab600572bb801472ab400132bb8017db1
-
+
 org/apache/geode/internal/cache/EntrySnapshot,2
 fromData,50,2a03b500052bb9004201003d1c9900112abb000759b70043b50004a7000e2abb000359b70044b500042ab400042bb60045b1
 toData,22,2b2ab40004c10007b9004002002ab400042bb60041b1
-
+
 org/apache/geode/internal/cache/EventID,4
 fromData,53,2a2bb8003db500042bb8003db8003e4d2a2cb8003fb500092a2cb8003fb5000b2a2bb900400100b5000c2a2bb900410100b50001b1
 fromDataPre_GFE_8_0_0_0,33,2a2bb8003db500042bb8003db8003e4d2a2cb8003fb500092a2cb8003fb5000bb1
 toData,92,2bb800354d2cb20036b600379d00242ab600384ebb0010592cb700393a042d1904b600151904b600162bb8003aa7000b2ab400042bb8003a2ab400092ab4000bb800332bb8003a2b2ab4000cb9003b02002b2ab40001b9003c0200b1
 toDataPre_GFE_8_0_0_0,24,2ab400042bb8003a2ab400092ab4000bb800332bb8003ab1
-
+
 org/apache/geode/internal/cache/EventTracker$EventSeqnoHolder,2
 fromData,22,2a2bb9000e0100b500042a2bb8000fc00010b50005b1
 toData,19,2b2ab40004b9001103002ab400052bb80012b1
-
+
 org/apache/geode/internal/cache/EvictionAttributesImpl,2
 fromData,33,2a2bb9001b0100b500072a2bb8001cc0001db500052a2bb8001cc0001eb50003b1
 toData,27,2b2ab40007b9001902002ab400052bb8001a2ab400032bb8001ab1
-
+
 org/apache/geode/internal/cache/FilterProfile,2
 fromData,210,bb013159b701324d2c2bb801332a2cb500202ab4000d2bb80134b900300200572ab400052bb80135b9007802002ab400072bb80135b9007802002ab400092bb80135b9007802002ab4000e2bb80134b900300200572ab400062bb80135b9007802002ab400082bb80135b9007802002ab4000a2bb80135b9007802002bb801363e1d9e004f05b80138360403360515051da2002c2bb801393a062bb8013a3a072a1906190703b6013b2ab4000f19061907b90054030057840501a7ffd41504b8013857a7000e3a081504b80138571908bfb1
 toData,181,2ab40020c001312bb8013d2ab4000d2ab40023b401182bb8013e2ab400052bb8013f2ab400072bb8013f2ab400092bb8013f2ab4000e2ab40023b401182bb8013e2ab400062bb8013f2ab400082bb8013f2ab4000a2bb8013f2ab4000f4d2cb900b901003e1d2bb801402cb900ae0100b900af01003a041904b900b001009900361904b900b10100c000b23a051905b9011b0100c0003c3a061905b900b30100c000843a0719062bb8014119072bb8013da7ffc6b1
-
+
 org/apache/geode/internal/cache/FilterProfile$OperationMessage,2
 fromData,129,2a2bb700522a2bb900530100b500092a2bb900540100b500412ab800552bb90056010032b500232a2bb900570100b500292a2bb900580100b5004a2ab40023b8004c99002c2a2bb900540100b500322ab40023b2004ea5000d2ab40023b2004fa600202a2bb80059b50033a700152a2bb900580100b500252a2bb8005ab50027b1
 toData,133,2a2bb700452b2ab40009b9004602002b2ab40041b9004702002b2ab40023b60024b9004802002b2ab40029b9004902002b2ab4004ab9004b03002ab40023b8004c9900312b2ab40033b9004d0100b9004702002ab40023b2004ea5000d2ab40023b2004fa600202ab400332bb80050a700152b2ab40025b9004b03002ab400272bb80051b1
-
+
 org/apache/geode/internal/cache/FilterRoutingInfo,4
 fromData,88,014db8001e4e2dc6000a2db9001f01004d2bb900200100360403360515051504a200372bb800213a06bb000859b700093a0719072bb800222cc6000c2c1906b6002399000f2ab4000619061907b6001057840501a7ffc8b1
 fromDataPre_GFE_7_1_0_0,97,014db8001e4e2dc6000a2db9001f01004d2bb900200100360403360515051504a20040bb002559b700293a0619062bb80022bb000859b700093a0719072bb800222cc6000c2c1906b6002399000f2ab4000619061907b6001057840501a7ffbfb1
 toData,88,2ab40006b600113d2b1cb9002402002ab40006b60016b9001701004e2db9001801009900352db900190100c0001a3a041904b9001b0100c000253a0519052bb600261904b9001c0100c000083a0619062bb80027a7ffc8b1
 toDataPre_GFE_7_1_0_0,88,2ab40006b600113d2b1cb9002402002ab40006b60016b9001701004e2db9001801009900352db900190100c0001a3a041904b9001b0100c000253a0519052bb800271904b9001c0100c000083a0619062bb80027a7ffc8b1
-
+
 org/apache/geode/internal/cache/FilterRoutingInfo$FilterInfo,4
 fromData,9,2a2bb80014b50015b1
 fromDataPre_GFE_8_0_0_0,50,b800279900162a2bb80028b500292a2bb80014b50015a7001b2a2bb8002ab500012a2bb8002bb5000e2a2bb8002bb50012b1
 toData,255,10093e1d2ab4000ec7000707a700112ab4000eb9001601001008680860603e1d2ab40012c7000707a700112ab40012b9001601001008680860603e1d2ab40001c7000703a7000d2ab40001b60017100c68603ebb0018591d01b700194d2ab40001c7000b2c03b6001aa7005d2c04b6001a2ab40001b600172cb8001b2ab40001b60006b9000701003a041904b9000801009900351904b900090100c0000a3a051905b9000b0100c0001cb6001d2cb8001e1905b9000c0100c0001fb60020852cb8001ea7ffc72ab4000e2ab400212cb800222ab400122ab400212cb800222bc100189900102bc000232cb900240200a7000f2cb600253a0419042bb80026b1
 toDataPre_GFE_8_0_0_0,213,b800279900b1bb0018591103e82bb8002cb700194d2ab40001c7000b2c03b6001aa7005a2c04b6001a2ab40001b600172cb8001b2ab40001b60006b9000701004e2db9000801009900342db900090100c0000a3a041904b9000b0100c0001cb6001d2cb8001e1904b9000c0100c0001fb60020852cb8001ea7ffc92ab4000e2ab400212cb800222ab400122ab400212cb800222bc100189900102bc000232cb900240200a7000d2cb600254e2d2bb80026a700232ab400012bb8002d2ab4000e2ab400212bb800222ab400122ab400212bb80022b1
-
+
 org/apache/geode/internal/cache/FindDurableQueueProcessor$FindDurableQueueMessage,2
 fromData,24,2a2bb7001e2a2bb9001f0100b500052a2bb80020b50006b1
 toData,24,2a2bb700212b2ab40005b9002202002ab400062bb80023b1
-
+
 org/apache/geode/internal/cache/FindDurableQueueProcessor$FindDurableQueueReply,2
 fromData,14,2a2bb700042a2bb80005b50002b1
 toData,14,2a2bb700062ab400022bb80007b1
-
+
 org/apache/geode/internal/cache/FindRemoteTXMessage,2
 fromData,27,2a2bb700452a2bb80046c00047b500032a2bb900480100b50004b1
 toData,24,2a2bb700422ab400032bb800432b2ab40004b900440200b1
-
+
 org/apache/geode/internal/cache/FindRemoteTXMessage$FindRemoteTXMessageReply,2
 fromData,46,2a2bb7000a2a2bb9000b0100b500042bb9000b01009900182a2bb9000b0100b500072a2bb8000cc0000db50006b1
 toData,66,2a2bb700032b2ab40004b9000502002ab40006c6000704a70004033d2b1cb9000502001c99001d2b2ab40007b9000502002ab4000601b600082ab400062bb80009b1
-
+
 org/apache/geode/internal/cache/FindVersionTagOperation$FindVersionTagMessage,2
 fromData,55,2a2bb700232a2bb900240100b500032a2bb900250100b500042abb002659b70027b500052ab400052bb800282a2bb900290100b50006b1
 toData,44,2a2bb7001e2b2ab40003b9001f02002b2ab40004b9002002002ab400052bb800212b2ab40006b900220200b1
-
+
 org/apache/geode/internal/cache/FindVersionTagOperation$VersionTagReply,2
 fromData,17,2a2bb7000c2a2bb8000dc0000eb50002b1
 toData,14,2a2bb7000a2ab400022bb8000bb1
-
+
 org/apache/geode/internal/cache/FixedPartitionAttributesImpl,2
 fromData,39,2a2bb80007b500062a2bb900080100b500022a2bb900090100b500032a2bb900090100b50005b1
 toData,39,2ab400062bb8000a2b2ab40002b9000b02002b2ab40003b9000c02002b2ab40005b9000c0200b1
-
+
 org/apache/geode/internal/cache/FunctionStreamingReplyMessage,2
 fromData,64,2a2bb700102a2bb900110100b5000b2a2bb900120100b5000c2a2bb900110100b500042a2bb80013b5000da700144dbb001559b700164e2d2cb60017572dbfb1
 toData,90,2a2bb700182b2ab4000bb9001902002b2ab4000cb9001a02002b2ab40004b9001902002ab4000d2bb8001ba7002e4d2cc1001c99000cbb001d592cb7001ebfbb0015592ab4000db6001fb60020b700214e2d2cb60017572dbfb1
-
+
 org/apache/geode/internal/cache/GridAdvisor$GridProfile,2
 fromData,26,2a2bb7001e2a2bb8001fb500052a2bb80020b500062ab60007b1
 toData,22,2a2bb7001b2ab400052bb8001c2ab400062bb8001db1
-
+
 org/apache/geode/internal/cache/HARegion$HARegionAdvisor$HAProfile,2
 fromData,47,2a2bb700032bb9000401003d2a1cb200057e99000704a7000403b500062a1cb200077e99000704a7000403b50008b1
 toData,45,2a2bb70009033d2ab400069900091cb20005803d2ab400089900091cb20007803d2b1c1100ff7eb9000a0200b1
-
+
 org/apache/geode/internal/cache/InitialImageFlowControl$FlowControlPermitMessage,2
 fromData,16,2a2bb7000d2a2bb9000e0100b50002b1
 toData,16,2a2bb7000f2b2ab40002b900100200b1
-
+
 org/apache/geode/internal/cache/InitialImageOperation$Entry,2
 fromData,89,2a2bb900150100b500032bb9001501003d2a2bb80016b5000f2ab40003b8001199000d2ab20017b50002a7000b2a2bb80018b500022a2bb900190100b500041c047e9900162a1c057e99000704a70004032bb8001ab5000ab1
 toData,101,2b2ab40003b9000c02002ab4000ac6000704a70004033d1c2ab4000ac1000e99000705a700040380913d2b1cb9000c02002ab4000f2bb800102ab40003b800119a000b2ab400022bb800122b2ab40004b9001303002ab4000ac6000b2ab4000a2bb80014b1
-
+
 org/apache/geode/internal/cache/InitialImageOperation$FilterInfoMessage,2
 fromData,230,2a2bb7008f2a2bb80090b5001f2ab4000403322bb80090b5003e2ab4000403322bb80090b500412ab4000403322bb80090b500432ab4000403322bb80090b500452ab4000403322bb80090b500472ab4000403322bb80090b500492ab4000403322bb80090b5004b2ab4000403322bb80090b5004d2ab4000404322bb80090b5003e2ab4000404322bb80090b500412ab4000404322bb80090b500432ab4000404322bb80090b500452ab4000404322bb80090b500472ab4000404322bb80090b500492ab4000404322bb80090b5004b2ab4000404322bb80090b5004d2a2bb80090b50033b1
 toData,284,2a2bb7008d2ab4001fc000312bb8008e2ab400040332b4003ec000312bb8008e2ab400040332b40041c000312bb8008e2ab400040332b40043c000312bb8008e2ab400040332b40045c000312bb8008e2ab400040332b40047c000312bb8008e2ab400040332b40049c000312bb8008e2ab400040332b4004bc000312bb8008e2ab400040332b4004dc000312bb8008e2ab400040432b4003ec000312bb8008e2ab400040432b40041c000312bb8008e2ab400040432b40043c000312bb8008e2ab400040432b40045c000312bb8008e2ab400040432b40047c000312bb8008e2ab400040432b40049c000312bb8008e2ab400040432b4004bc000312bb8008e2ab400040432b4004dc000312bb8008e2ab40033c000312bb8008eb1
-
+
 org/apache/geode/internal/cache/InitialImageOperation$ImageReplyMessage,2
 fromData,224,2a2bb7001c2bb8001d4d014e2cc600102cb6001e9e00092c03b6001f4e2dc1002099000e2a2dc00021b5000fa700082a2cb5000f2a2bb900220100b500102a2bb900220100b500112a2bb900220100b500122a2bb900230100b500132a2bb900220100b500142a2bb80024b500042a2bb900230100b500032a2bb900230100b500152ab4001599000f2abb0025592bb70026b500022bb900270100360415049b00102abb0028591504b70029b5000103360515051504a200292bb8002ac0002b3a062bb8002c37072ab4000119061607b8002db9002e030057840501a7ffd6b1
 toData,234,2a2bb7002f2ab4000fc1002099001dbb00305904b700314d2c2ab4000fb60032572c2bb80033a7000e2ab4000fc000302bb800332b2ab40010b9003402002b2ab40011b9003402002b2ab40012b9003402002b2ab40013b9003502002b2ab40014b9003402002b2ab40003b9003502002b2ab40015b9003502002ab4001599000b2ab400022bb800362b2ab40001c7000702a7000c2ab40001b900370100b9003802002ab40001c600422ab40001b900390100b9003a01004d2cb9003b010099002a2cb9003c0100c0003d4e2db9003e01002bb8003f2db900400100c00041b600422bb80043a7ffd3b1
-
+
 org/apache/geode/internal/cache/InitialImageOperation$InitialImageVersionedEntryList,2
 fromData,406,b80022b20023b9002402003d2bb9003601003e1d057e05a0000704a700040336041d077e07a0000704a700040336052a1d10087e1008a0000704a7000403b500021d10207e1020a0000704a700040336061c990014b80022b2002312371db80026b900380400150499003c2bb800398836071c990015b80022b20023123a1507b80031b90038040003360815081507a200152a2bb8003bc00014b7001257840801a7ffea15059900e42bb800398836071c990015b80022b20023123c1507b80031b9003804002abb0003591507b70005b50004bb0003591507b700053a0803360915091507a200a22bb900360100360a150aaa000000008f00000000000000030000001e0000002c0000003f000000632ab4000401b90011020057a700662ab4000415062bb8003db90011020057a7005315062bb8003d3a0b1908190bb6002db900110200572ab40004190bb90011020057a7002f15062bb8003d3a0b2bb8003988360c190b1908150cb900160200c0003eb600342ab40004190bb90011020057840901a7ff5da7000e2abb000359b70001b50004b1
 toData,396,033d033e0336042ab700209a00091c05803d043e2ab40004b9000d01009e00431c07803d0436042ab40004b9001a01003a051905b9001b01009900271905b9001c0100c000173a061906c600131906c1002199000e1c1020803da70006a7ffd52ab400029900081c1008803db80022b20023b900240200990015b80022b2002312252a1cb80026b9002705002b1cb9002802001d9900282ab7000e852bb8002903360515052ab7000ea200132a1505b700132bb8002a840501a7ffea15049900cd2ab40004b9000d0100852bb80029bb002b592ab40004b9000d0100b7002c3a050336062ab40004b9001a01003a071907b9001b01009900951907b9001c0100c000173a081908c7000d2b03b900280200a700771908b6002d3a091909c700132b04b90028020019082bb8002ea7005b19051909b9002f0200c000303a0a190ac700292b05b9002802001506840601b800313a0a19051909190ab9003203005719082bb8002ea700222b06b90028020019082b03b6003319081909b60034190ab60035852bb80029a7ff67b1
-
+
 org/apache/geode/internal/cache/InitialImageOperation$RVVReplyMessage,2
 fromData,33,2a2bb700162bb9001701003d1c9900132bb9001701003e2a1d2bb80018b50004b1
 toData,51,2a2bb700092ab40004c600222b04b9000a02002b2ab40004c1000bb9000a02002ab400042bb6000ca7000a2b03b9000a0200b1
-
+
 org/apache/geode/internal/cache/InitialImageOperation$RegionStateMessage,2
 fromData,66,2a2bb7001c2a2bb9001d0100b500052bb9001d01003d1c99000f2a2b2ab40005b8001eb500042bb9001d01003d1c9900132bb9001d01003e2a1d2bb8001fb50006b1
 toData,97,2a2bb7000b2b2ab40005b9000c02002ab40004c600192b04b9000c02002b2ab400042ab40005b8000da7000a2b03b9000c02002ab40006c600222b04b9000c02002b2ab40006c1000eb9000c02002ab400062bb8000fa7000a2b03b9000c0200b1
-
+
 org/apache/geode/internal/cache/InitialImageOperation$RequestFilterInfoMessage,2
 fromData,24,2a2bb7001e2a2bb8001fb500042a2bb900200100b50002b1
 toData,24,2a2bb700212ab400042bb800222b2ab40002b900230200b1
-
+
 org/apache/geode/internal/cache/InitialImageOperation$RequestImageMessage,2
 fromData,98,2a2bb700b42a2bb800b5b500172a2bb900b60100b500022a2bb900b70100b500552a2bb900b70100b500032a2bb900b70100b5004b2a2bb800b8c000b9b5003a2a2bb800b8c000bab500262a2bb800b8c000bbb5003b2a2bb800b8c000bcb50030b1
 toData,86,2a2bb700bd2ab400172bb800be2b2ab40002b900bf02002b2ab40055b900c002002b2ab40003b900c002002b2ab4004bb900c002002ab4003a2bb800c12ab400262bb800c12ab4003b2bb800c12ab400302bb800c1b1
-
+
 org/apache/geode/internal/cache/InitialImageOperation$RequestRVVMessage,2
 fromData,34,2a2bb7001e2a2bb8001fb500052a2bb900200100b500022a2bb900210100b50003b1
 toData,34,2a2bb700222ab400052bb800232b2ab40002b9002402002b2ab40003b900250200b1
-
+
 org/apache/geode/internal/cache/InitialImageOperation$RequestSyncMessage,2
 fromData,73,2a2bb700222a2bb80023b500022bb9002401003d2bb9002501003e2a1dbd0026b5000a03360415041da2001f2ab4000a15041c99000a2bb80027a700072bb8002853840401a7ffe1b1
 toData,77,2a2bb7001c2ab400022bb8001d2b2ab4000a0332c1001eb9001f02002b2ab4000abeb9002002002ab4000a4d2cbe3e03360415041da200172c1504323a0519052bb900210200840401a7ffe9b1
-
+
 org/apache/geode/internal/cache/InterestRegistrationEventImpl,2
 fromData,33,2a2bb80010b500052a2bb80011b500062a2bb80012b500072a2bb80013b50008b1
 toData,36,2ab400052bb8000b2ab40006c0000c2bb8000d2ab400072bb8000e2ab400082bb8000fb1
-
+
 org/apache/geode/internal/cache/InvalidateOperation$InvalidateMessage,2
 fromData,25,2a2bb700282a2bb80029c0002ab500022a2bb80029b50015b1
 toData,22,2a2bb7002b2ab400022bb8002c2ab400152bb8002cb1
-
+
 org/apache/geode/internal/cache/InvalidateOperation$InvalidateWithContextMessage,2
 fromData,14,2a2bb7000c2a2bb8000db50004b1
 toData,14,2a2bb7000e2ab400042bb8000fb1
-
+
 org/apache/geode/internal/cache/InvalidatePartitionedRegionMessage,2
 fromData,14,2a2bb700152a2bb80016b50004b1
 toData,14,2a2bb700172ab400042bb80018b1
-
+
 org/apache/geode/internal/cache/InvalidateRegionOperation$InvalidateRegionMessage,2
 fromData,17,2a2bb700122a2bb80013c00014b50007b1
 toData,14,2a2bb700152ab400072bb80016b1
-
+
 org/apache/geode/internal/cache/JtaAfterCompletionMessage,2
 fromData,26,2a2bb700232a2bb900240100b500042a2bb900240100b50002b1
 toData,26,2a2bb700212b2ab40004b9002202002b2ab40002b900220200b1
-
+
 org/apache/geode/internal/cache/MemberFunctionStreamingMessage,2
 fromData,163,2a2bb700512bb9005201003d1c047e9900142a2bb900530100b500082ab40008b800541c077e99000d2a2bb900530100b500051c057e99000e2a2bb80055c00056b500062bb800554e2dc1002a9900252a03b5000a2a2dc0002ab80057b500072ab40007c7001b2a2dc0002ab5001aa700102a2dc00058b500072a04b5000a2a2bb80055b500092a2bb80055c00059b500102a1c10407e99000704a7000403b5000bb1
 toData,162,2a2bb7005b033d2ab400089900081c0480933d2ab40005029f00081c0780933d2ab40006c600081c0580933d2ab4000b9900091c104080933d2b1cb9005c02002ab4000899000d2b2ab40008b9005d02002ab40005029f000d2b2ab40005b9005d02002ab40006c6000b2ab400062bb8005e2ab4000a99000e2ab400072bb8005ea700102ab40007b9001f01002bb8005e2ab400092bb8005e2ab400102bb8005eb1
-
+
 org/apache/geode/internal/cache/Node,2
 fromData,60,2abb001c59b7001db500052ab400052bb8001e2a2bb9001f0100b500032a2bb900200100b500042a2bb900210100b500152a2bb9001f0100b50006b1
 toData,49,2ab400052bb800182b2ab40003b9001902002b2ab40004b9001a02002b2ab40015b9001b02002b2ab40006b900190200b1
-
+
 org/apache/geode/internal/cache/NonLocalRegionEntry,2
 fromData,48,2a2bb80027b500032a2bb80027b500062a2bb900280100b5000c2a2bb900290100b5000e2a2bb80027c0002ab50011b1
 toData,45,2ab400032bb800242ab400062bb800242b2ab4000cb9002503002b2ab4000eb9002602002ab400112bb80024b1
-
+
 org/apache/geode/internal/cache/NonLocalRegionEntryWithStats,2
 fromData,36,2a2bb700132a2bb900140100b500032a2bb900140100b500052a2bb900140100b50007b1
 toData,36,2a2bb700112b2ab40003b9001203002b2ab40005b9001203002b2ab40007b900120300b1
-
+
 org/apache/geode/internal/cache/PRQueryProcessor$EndOfBucket,2
 fromData,11,2a2bb9000b0100b50002b1
 toData,11,2b2ab40002b9000c0200b1
-
+
 org/apache/geode/internal/cache/PartitionAttributesImpl,2
 fromData,102,2a2bb900580100b5001b2a2bb900590100b500082a2bb900580100b5000c2a2bb900580100b500012a2bb8005ab500142a2bb8005bc0001eb5000d2a2bb8005bc0001eb500022a2bb900590100b500162a2bb900590100b500182a2bb8005bc0005cb5002fb1
 toData,97,2ab7002b2b2ab4001bb9005402002b2ab40008b9005503002b2ab60047b9005402002b2ab40001b9005402002ab400142bb800562ab4000d2bb800572ab400022bb800572b2ab40016b9005503002b2ab40018b9005503002ab4002f2bb80057b1
-
+
 org/apache/geode/internal/cache/PartitionRegionConfig,2
 fromData,207,2a2bb900500100b500162a2bb900510100b80052b500032a2bb80053b500182a2bb900540100b500052a2bb900540100b5001f2abb001959b7001ab500042ab400042bb800552a2bb80056b500082a2bb80056b500092a2bb80056b500072a2bb80057b5000c2a2bb80058b5000e2a2bb80058b5000d2a2bb80058b500102a2bb80058b5000f2a2bb900540100b500062a2bb80059c0005ab500112ab40011c7000e2abb002159b70022b500112a2bb8005bb500142a2bb80059c0005ab500012ab40001c7000a2ab80015b50001b1
 toData,178,2b2ab40016b9004702002b2ab40003b40048b9004902002ab400182bb8004a2b2ab40005b9004b02002b2ab4001fb9004b02002ab400042bb8004a2ab400082bb8004c2ab400092bb8004c2ab400072bb8004c2ab4000c2bb8004a2ab4000e2bb8004a2ab4000d2bb8004a2ab400102bb8004a2ab4000f2bb8004a2b2ab40006b9004b02002ab400112bb8004d2ab400142bb8004e2ab40001b9004f010099000b012bb8004da7000b2ab400012bb8004db1
-
+
 org/apache/geode/internal/cache/PoolFactoryImpl$PoolAttributes,2
 fromData,145,2a2bb80032b500032a2bb80032b500052a2bb80032b500072a2bb80032b500092a2bb80032b5000a2a2bb80032b5000b2a2bb80032b5000f2a2bb80033b5000e2a2bb80033b500122a2bb80032b500162a2bb80032b500182a2bb80034b500082a2bb80034b500142a2bb80035b5001b2a2bb80036b5001f2a2bb80036b500202a2bb80032b500132a2bb80034b5001cb1
 toData,145,2ab400032bb8002d2ab400052bb8002d2ab400072bb8002d2ab400092bb8002d2ab4000a2bb8002d2ab4000b2bb8002d2ab4000f2bb8002d2ab4000e2bb8002e2ab400122bb8002e2ab400162bb8002d2ab400182bb8002d2ab400082bb8002f2ab400142bb8002f2ab4001b2bb800302ab4001f2bb800312ab400202bb800312ab400132bb8002d2ab4001c2bb8002fb1
-
+
 org/apache/geode/internal/cache/PreferBytesCachedDeserializable,2
 fromData,9,2a2bb8000fb50003b1
 toData,9,2ab400032bb80010b1
-
+
 org/apache/geode/internal/cache/QueuedOperation,1
 toData,78,2b2ab40002b40035b9003602002ab400072bb800372ab40002b600319900312ab400032bb800372ab40002b600169a000d2ab40002b600159900152b2ab40006b9003602002ab400042bb80038b1
-
+
 org/apache/geode/internal/cache/RegionEventImpl,2
 fromData,48,2a2bb80023b500092a2bb80024b500022a2bb900250100b80026b5000a2a2bb900270100b500032a2bb80028b5000bb1
 toData,51,2ab400092bb8001c2ab400022bb8001d2b2ab4000ab4001eb9001f02002b2ab40003b9002002002ab4000bc000212bb80022b1
-
+
 org/apache/geode/internal/cache/ReleaseClearLockMessage,2
 fromData,24,2a2bb700212a2bb80022b500022a2bb900230100b50003b1
 toData,24,2a2bb700242ab400022bb800252b2ab40003b900260200b1
-
+
 org/apache/geode/internal/cache/RemoteContainsKeyValueMessage,2
 fromData,33,2a2bb7002e2a2bb8002fb500062a2ab4003010407e99000704a7000403b50005b1
 toData,14,2a2bb700312ab400062bb80032b1
-
+
 org/apache/geode/internal/cache/RemoteContainsKeyValueMessage$RemoteContainsKeyValueReplyMessage,2
 fromData,16,2a2bb700152a2bb900160100b50003b1
 toData,16,2a2bb700172b2ab40003b900180200b1
-
+
 org/apache/geode/internal/cache/RemoteDestroyMessage,2
 fromData,131,2a2bb700892a2bb8008ab7008b2a2bb8008ab5000c2a2bb9008c0100b8008db5000e2ab4008e1102007e99000e2a2bb8008ac0008fb500102ab4008e1104007e99000e2a2bb8008ac00035b500672a2bb8008ac00090b500122ab400059900122bb9008c0100572a2bb80091b700222a2bb8008ab500082a2bb8008ac00092b50017b1
 toData,135,2a2bb700932ab6006a2bb800942ab4000c2bb800942b2ab4000eb40095b9009602002ab40010c6000b2ab400102bb800942ab40067c6000b2ab400672bb800942ab400122bb800942ab4000599002a2b2ab4000699000704a7000403b9009602002ab40006b800973d1c2ab700982ab6006e2bb800992ab400082bb800942ab400172bb80094b1
-
+
 org/apache/geode/internal/cache/RemoteDestroyMessage$DestroyReplyMessage,2
 fromData,52,2a2bb700232bb9002401003d1c047e99000704a70004033e1c057e99000704a700040336041d99000d2a15042bb80025b50009b1
 toData,57,2a2bb7001f033d2ab40009c600081c0480913d2ab40009c100209900081c0580913d2b1cb9002102002ab40009c6000b2ab400092bb80022b1
-
+
 org/apache/geode/internal/cache/RemoteFetchEntryMessage,2
 fromData,14,2a2bb700312a2bb80032b50004b1
 toData,14,2a2bb700332ab400042bb80034b1
-
+
 org/apache/geode/internal/cache/RemoteFetchEntryMessage$FetchEntryReplyMessage,2
 fromData,58,2a2bb7001e2bb9001f01003d1c9a002c2ab40002b80020

<TRUNCATED>

[40/43] geode git commit: Fix html in javadocs

Posted by kl...@apache.org.
Fix html in javadocs


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/9ea9f4ee
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/9ea9f4ee
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/9ea9f4ee

Branch: refs/heads/feature/GEODE-2632-17
Commit: 9ea9f4ee393d4377f8e159bcd8235ba3fd67bb51
Parents: b8e41f7
Author: Kirk Lund <kl...@apache.org>
Authored: Wed May 24 16:40:00 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Tue May 30 10:21:11 2017 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/geode/test/junit/categories/UnitTest.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/9ea9f4ee/geode-junit/src/main/java/org/apache/geode/test/junit/categories/UnitTest.java
----------------------------------------------------------------------
diff --git a/geode-junit/src/main/java/org/apache/geode/test/junit/categories/UnitTest.java b/geode-junit/src/main/java/org/apache/geode/test/junit/categories/UnitTest.java
index 5614212..58d12a0 100755
--- a/geode-junit/src/main/java/org/apache/geode/test/junit/categories/UnitTest.java
+++ b/geode-junit/src/main/java/org/apache/geode/test/junit/categories/UnitTest.java
@@ -20,7 +20,7 @@ package org.apache.geode.test.junit.categories;
  *
  * <p>
  * <ul>
- * A {@code UnitTest} should <bold>not<bold> do any of the following:
+ * A {@code UnitTest} should <strong>not</strong> do any of the following:
  * <li>communicate with a database
  * <li>communicate across the network
  * <li>access the file system


[09/43] geode git commit: GEODE-2993: Rethrow CacheClosedException from AbstractGatewaySender.distribute()

Posted by kl...@apache.org.
GEODE-2993: Rethrow CacheClosedException from AbstractGatewaySender.distribute()

- rethrow CacheClosedException
- Add test for cache close while enqueuing event in AEQ.
- Add cleanup of disk dirs created by test.


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/0fe0a106
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/0fe0a106
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/0fe0a106

Branch: refs/heads/feature/GEODE-2632-17
Commit: 0fe0a1061065f07d4b734d7055f56ad1635f1a2a
Parents: c1ab3ff
Author: Lynn Hughes-Godfrey <lh...@pivotal.io>
Authored: Thu May 25 15:31:16 2017 -0700
Committer: Lynn Hughes-Godfrey <lh...@pivotal.io>
Committed: Thu May 25 15:31:16 2017 -0700

----------------------------------------------------------------------
 .../cache/wan/AbstractGatewaySender.java        |   1 +
 .../cache/wan/AsyncEventQueueTestBase.java      |   2 +
 .../asyncqueue/AsyncEventListenerDUnitTest.java | 102 +++++++++++++++++++
 3 files changed, 105 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/0fe0a106/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySender.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySender.java b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySender.java
index 7ed9b51..c38d547 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySender.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySender.java
@@ -973,6 +973,7 @@ public abstract class AbstractGatewaySender implements GatewaySender, Distributi
           ev.enqueueEvent(operation, clonedEvent, substituteValue);
         } catch (CancelException e) {
           logger.debug("caught cancel exception", e);
+          throw e;
         } catch (RegionDestroyedException e) {
           logger.warn(LocalizedMessage.create(
               LocalizedStrings.GatewayImpl_0_AN_EXCEPTION_OCCURRED_WHILE_QUEUEING_1_TO_PERFORM_OPERATION_2_FOR_3,

http://git-wip-us.apache.org/repos/asf/geode/blob/0fe0a106/geode-core/src/test/java/org/apache/geode/internal/cache/wan/AsyncEventQueueTestBase.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/wan/AsyncEventQueueTestBase.java b/geode-core/src/test/java/org/apache/geode/internal/cache/wan/AsyncEventQueueTestBase.java
index 6fe7ee9..dc7a218 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/wan/AsyncEventQueueTestBase.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/wan/AsyncEventQueueTestBase.java
@@ -98,6 +98,7 @@ import org.apache.geode.test.dunit.LogWriterUtils;
 import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.dunit.Wait;
 import org.apache.geode.test.dunit.WaitCriterion;
+import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
 import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
 import org.apache.geode.test.junit.categories.DistributedTest;
 
@@ -1555,6 +1556,7 @@ public class AsyncEventQueueTestBase extends JUnit4DistributedTestCase {
 
   public static void cleanupVM() throws IOException {
     closeCache();
+    JUnit4CacheTestCase.cleanDiskDirs();
   }
 
   public static void closeCache() throws IOException {

http://git-wip-us.apache.org/repos/asf/geode/blob/0fe0a106/geode-core/src/test/java/org/apache/geode/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
index 3dd0550..795af36 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
@@ -19,6 +19,7 @@ import static org.junit.Assert.*;
 import static org.mockito.Matchers.any;
 
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -29,11 +30,18 @@ import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 import java.util.stream.LongStream;
 
+import org.apache.geode.cache.AttributesFactory;
+import org.apache.geode.cache.CacheClosedException;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.wan.GatewayEventFilter;
+import org.apache.geode.cache.wan.GatewayQueueEvent;
+import org.apache.geode.internal.cache.wan.MyAsyncEventListener;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.geode.cache.CacheFactory;
+import org.apache.geode.cache.DataPolicy;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionDestroyedException;
 import org.apache.geode.cache.asyncqueue.AsyncEvent;
@@ -42,11 +50,14 @@ import org.apache.geode.cache.asyncqueue.AsyncEventQueueFactory;
 import org.apache.geode.cache.asyncqueue.internal.AsyncEventQueueFactoryImpl;
 import org.apache.geode.cache.asyncqueue.internal.AsyncEventQueueImpl;
 import org.apache.geode.cache.partition.PartitionRegionHelper;
+import org.apache.geode.cache.persistence.PartitionOfflineException;
 import org.apache.geode.cache.wan.GatewaySender.OrderPolicy;
 import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.internal.AvailablePortHelper;
+import org.apache.geode.internal.cache.ForceReattemptException;
 import org.apache.geode.internal.cache.wan.AsyncEventQueueTestBase;
+import org.apache.geode.test.dunit.IgnoredException;
 import org.apache.geode.test.dunit.LogWriterUtils;
 import org.apache.geode.test.dunit.SerializableRunnableIF;
 import org.apache.geode.test.dunit.VM;
@@ -1674,6 +1685,66 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     Awaitility.waitAtMost(10000, TimeUnit.MILLISECONDS).until(() -> getBucketMoved(vm2, "ln"));
   }
 
+  @Test
+  public void testCacheClosedBeforeAEQWrite() {
+    Integer lnPort =
+        (Integer) vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId(1));
+
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
+    final DistributedMember member1 =
+        vm1.invoke(() -> cache.getDistributedSystem().getDistributedMember());
+
+    vm1.invoke(() -> addAEQWithCacheCloseFilter());
+    vm2.invoke(() -> addAEQWithCacheCloseFilter());
+
+    vm1.invoke(() -> createPersistentPartitionRegion());
+    vm2.invoke(() -> createPersistentPartitionRegion());
+    vm3.invoke(() -> {
+      AttributesFactory fact = new AttributesFactory();
+
+      PartitionAttributesFactory pfact = new PartitionAttributesFactory();
+      pfact.setTotalNumBuckets(16);
+      pfact.setLocalMaxMemory(0);
+      fact.setPartitionAttributes(pfact.create());
+      fact.setOffHeap(isOffHeap());
+      Region r = cache.createRegionFactory(fact.create()).addAsyncEventQueueId("ln")
+          .create(getTestMethodName() + "_PR");
+
+    });
+
+    vm3.invoke(() -> {
+      Region r = cache.getRegion(Region.SEPARATOR + getTestMethodName() + "_PR");
+      r.put(1, 1);
+      r.put(2, 2);
+      // This will trigger the gateway event filter to close the cache
+      try {
+        r.removeAll(Collections.singleton(1));
+        fail("Should have received a partition offline exception");
+      } catch (PartitionOfflineException expected) {
+
+      }
+    });
+  }
+
+  private void createPersistentPartitionRegion() {
+    AttributesFactory fact = new AttributesFactory();
+
+    PartitionAttributesFactory pfact = new PartitionAttributesFactory();
+    pfact.setTotalNumBuckets(16);
+    fact.setPartitionAttributes(pfact.create());
+    fact.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+    fact.setOffHeap(isOffHeap());
+    Region r = cache.createRegionFactory(fact.create()).addAsyncEventQueueId("ln")
+        .create(getTestMethodName() + "_PR");
+  }
+
+  private void addAEQWithCacheCloseFilter() {
+    cache.createAsyncEventQueueFactory().addGatewayEventFilter(new CloseCacheGatewayFilter())
+        .setPersistent(true).setParallel(true).create("ln", new MyAsyncEventListener());
+  }
+
   private static Set<Object> getKeysSeen(VM vm, String asyncEventQueueId) {
     return vm.invoke(() -> {
       final BucketMovingAsyncEventListener listener =
@@ -1690,6 +1761,37 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     });
   }
 
+  private final class CloseCacheGatewayFilter implements GatewayEventFilter {
+    @Override
+    public boolean beforeEnqueue(final GatewayQueueEvent event) {
+      if (event.getOperation().isRemoveAll()) {
+        new Thread(() -> cache.close()).start();
+        try {
+          Thread.sleep(1000);
+        } catch (InterruptedException e) {
+          // ignore
+        }
+        throw new CacheClosedException();
+      }
+      return true;
+    }
+
+    @Override
+    public boolean beforeTransmit(final GatewayQueueEvent event) {
+      return false;
+    }
+
+    @Override
+    public void afterAcknowledgement(final GatewayQueueEvent event) {
+
+    }
+
+    @Override
+    public void close() {
+
+    }
+  }
+
   private static class BucketMovingAsyncEventListener implements AsyncEventListener {
     private final DistributedMember destination;
     private boolean moved;


[38/43] geode git commit: Revert BlockingHARegionJUnitTest

Posted by kl...@apache.org.
Revert BlockingHARegionJUnitTest


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/91c13dab
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/91c13dab
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/91c13dab

Branch: refs/heads/feature/GEODE-2632-17
Commit: 91c13dabf3d5591d8e08cfeddf3b13feb7058503
Parents: 4f6a7a7
Author: Kirk Lund <kl...@apache.org>
Authored: Wed May 24 19:13:53 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Tue May 30 10:21:11 2017 -0700

----------------------------------------------------------------------
 .../cache/ha/BlockingHARegionJUnitTest.java     | 494 ++++++++++---------
 1 file changed, 270 insertions(+), 224 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/91c13dab/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionJUnitTest.java
index 1534192..d0f5793 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionJUnitTest.java
@@ -14,116 +14,76 @@
  */
 package org.apache.geode.internal.cache.ha;
 
-import static java.util.concurrent.TimeUnit.*;
 import static org.apache.geode.distributed.ConfigurationProperties.*;
-import static org.apache.geode.internal.cache.ha.HARegionQueue.*;
 import static org.junit.Assert.*;
 
-import java.util.ArrayList;
-import java.util.List;
 import java.util.Properties;
 
-import org.awaitility.Awaitility;
-import org.awaitility.core.ConditionFactory;
-import org.junit.After;
+import org.apache.geode.internal.cache.InternalCache;
+import org.apache.geode.test.junit.categories.ClientSubscriptionTest;
 import org.junit.Before;
 import org.junit.Ignore;
-import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheFactory;
 import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.internal.cache.EventID;
-import org.apache.geode.internal.cache.InternalCache;
 import org.apache.geode.test.dunit.ThreadUtils;
-import org.apache.geode.test.junit.categories.ClientSubscriptionTest;
+import org.apache.geode.test.dunit.Wait;
+import org.apache.geode.test.dunit.WaitCriterion;
 import org.apache.geode.test.junit.categories.IntegrationTest;
-import org.apache.geode.test.junit.rules.serializable.SerializableErrorCollector;
 
-/**
- * Integration tests for Blocking HARegionQueue.
- *
- * <p>
- * #40314: Filled up queue causes all publishers to block
- *
- * <p>
- * #37627: In case of out of order messages, (sequence Id violation), in spite of HARQ not full, the
- * capacity (putPermits) of the HARQ exhausted.
- */
 @Category({IntegrationTest.class, ClientSubscriptionTest.class})
 public class BlockingHARegionJUnitTest {
 
-  public static final String REGION = "BlockingHARegionJUnitTest_Region";
-  private static final long THREAD_TIMEOUT = 2 * 60 * 1000;
-
-  private final Object numberForThreadsLock = new Object();
-  private int numberForDoPuts;
-  private int numberForDoTakes;
-
-  volatile boolean stopThreads;
+  private static InternalCache cache = null;
 
-  private InternalCache cache;
-  private HARegionQueueAttributes queueAttributes;
-  private List<Thread> threads;
-  private ThreadGroup threadGroup;
-
-  @Rule
-  public SerializableErrorCollector errorCollector = new SerializableErrorCollector();
+  /** boolean to record an exception occurence in another thread **/
+  private static volatile boolean exceptionOccurred = false;
+  /** StringBuffer to store the exception **/
+  private static StringBuffer exceptionString = new StringBuffer();
+  /** boolen to quit the for loop **/
+  private static volatile boolean quitForLoop = false;
 
   @Before
   public void setUp() throws Exception {
-    synchronized (this.numberForThreadsLock) {
-      this.numberForDoPuts = 0;
-      this.numberForDoTakes = 0;
-    }
-
-    this.stopThreads = false;
-    this.threads = new ArrayList<>();
-    this.threadGroup = new ThreadGroup(getClass().getSimpleName()) {
-      @Override
-      public void uncaughtException(Thread t, Throwable e) {
-        errorCollector.addError(e);
-      }
-    };
-
-    this.queueAttributes = new HARegionQueueAttributes();
-
-    Properties config = new Properties();
-    config.setProperty(MCAST_PORT, "0");
-
-    this.cache = (InternalCache) CacheFactory.create(DistributedSystem.connect(config));
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    try {
-      this.stopThreads = true;
-      for (Thread thread : this.threads) {
-        thread.interrupt();
-        ThreadUtils.join(thread, THREAD_TIMEOUT);
-      }
-    } finally {
-      if (this.cache != null) {
-        this.cache.close();
-      }
+    Properties props = new Properties();
+    props.setProperty(MCAST_PORT, "0");
+    if (cache != null) {
+      cache.close(); // fault tolerance
     }
+    cache = (InternalCache) CacheFactory.create(DistributedSystem.connect(props));
   }
 
   /**
-   * This test has a scenario where the HARegionQueue capacity is just 1. There will be two thread.
+   * This test has a scenario where the HAReqionQueue capacity is just 1. There will be two thread.
    * One doing a 1000 puts and the other doing a 1000 takes. The validation for this test is that it
    * should not encounter any exceptions
    */
   @Test
   public void testBoundedPuts() throws Exception {
-    this.queueAttributes.setBlockingQueueCapacity(1);
-    HARegionQueue hrq = getHARegionQueueInstance(REGION, this.cache, this.queueAttributes,
-        BLOCKING_HA_QUEUE, false);
-    hrq.setPrimary(true); // fix for 40314 - capacity constraint is checked for primary only
+    exceptionOccurred = false;
+    HARegionQueueAttributes harqa = new HARegionQueueAttributes();
+    harqa.setBlockingQueueCapacity(1);
+    HARegionQueue hrq = HARegionQueue.getHARegionQueueInstance("BlockingHARegionJUnitTest_Region",
+        cache, harqa, HARegionQueue.BLOCKING_HA_QUEUE, false);
+    hrq.setPrimary(true);// fix for 40314 - capacity constraint is checked for primary only.
+    Thread thread1 = new DoPuts(hrq, 1000);
+    Thread thread2 = new DoTake(hrq, 1000);
+
+    thread1.start();
+    thread2.start();
+
+    ThreadUtils.join(thread1, 30 * 1000);
+    ThreadUtils.join(thread2, 30 * 1000);
+
+    if (exceptionOccurred) {
+      fail(" Test failed due to " + exceptionString);
+    }
 
-    startDoPuts(hrq, 1000);
-    startDoTakes(hrq, 1000);
+    cache.close();
   }
 
   /**
@@ -136,23 +96,62 @@ public class BlockingHARegionJUnitTest {
    */
   @Test
   public void testPutBeingBlocked() throws Exception {
-    this.queueAttributes.setBlockingQueueCapacity(1);
-    HARegionQueue hrq = getHARegionQueueInstance(REGION, this.cache, this.queueAttributes,
-        BLOCKING_HA_QUEUE, false);
-    hrq.setPrimary(true); // fix for 40314 - capacity constraint is checked for primary only
+    exceptionOccurred = false;
+    quitForLoop = false;
+    HARegionQueueAttributes harqa = new HARegionQueueAttributes();
+    harqa.setBlockingQueueCapacity(1);
+    final HARegionQueue hrq = HARegionQueue.getHARegionQueueInstance(
+        "BlockingHARegionJUnitTest_Region", cache, harqa, HARegionQueue.BLOCKING_HA_QUEUE, false);
+    hrq.setPrimary(true);// fix for 40314 - capacity constraint is checked for primary only.
+    final Thread thread1 = new DoPuts(hrq, 2);
+    thread1.start();
+    WaitCriterion ev = new WaitCriterion() {
+      public boolean done() {
+        return hrq.region.size() == 2;
+      }
 
-    Thread doPuts = startDoPuts(hrq, 2);
+      public String description() {
+        return null;
+      }
+    };
+    Wait.waitForCriterion(ev, 1000, 200, true);
+    assertTrue(thread1.isAlive()); // thread should still be alive (in wait state)
+
+    Thread thread2 = new DoTake(hrq, 1);
+    thread2.start(); // start take thread
+    ev = new WaitCriterion() {
+      public boolean done() {
+        return hrq.region.size() == 3;
+      }
 
-    await().until(() -> assertTrue(hrq.region.size() == 2));
+      public String description() {
+        return null;
+      }
+    };
+    // sleep. take will proceed and so will sleeping put
+    Wait.waitForCriterion(ev, 3 * 1000, 200, true);
 
-    // thread should still be alive (in wait state)
-    assertTrue(doPuts.isAlive());
+    // thread should have died since put should have proceeded
+    ev = new WaitCriterion() {
+      public boolean done() {
+        return !thread1.isAlive();
+      }
 
-    startDoTakes(hrq, 1);
+      public String description() {
+        return "thread1 still alive";
+      }
+    };
+    Wait.waitForCriterion(ev, 30 * 1000, 1000, true);
 
-    await().until(() -> assertTrue(hrq.region.size() == 3));
+    ThreadUtils.join(thread1, 30 * 1000); // for completeness
+    ThreadUtils.join(thread2, 30 * 1000);
+    if (exceptionOccurred) {
+      fail(" Test failed due to " + exceptionString);
+    }
+    cache.close();
   }
 
+
   /**
    * This test tests that the region capacity is never exceeded even in highly concurrent
    * environments. The region capacity is set to 10000. Then 5 threads start doing put
@@ -162,26 +161,62 @@ public class BlockingHARegionJUnitTest {
    */
   @Test
   public void testConcurrentPutsNotExceedingLimit() throws Exception {
-    this.queueAttributes.setBlockingQueueCapacity(10000);
-    HARegionQueue hrq = getHARegionQueueInstance(REGION, this.cache, this.queueAttributes,
-        BLOCKING_HA_QUEUE, false);
-    hrq.setPrimary(true); // fix for 40314 - capacity constraint is checked for primary only
-
-    Thread doPuts1 = startDoPuts(hrq, 20000, 1);
-    Thread doPuts2 = startDoPuts(hrq, 20000, 2);
-    Thread doPuts3 = startDoPuts(hrq, 20000, 3);
-    Thread doPuts4 = startDoPuts(hrq, 20000, 4);
-    Thread doPuts5 = startDoPuts(hrq, 20000, 5);
+    exceptionOccurred = false;
+    quitForLoop = false;
+    HARegionQueueAttributes harqa = new HARegionQueueAttributes();
+    harqa.setBlockingQueueCapacity(10000);
+    final HARegionQueue hrq = HARegionQueue.getHARegionQueueInstance(
+        "BlockingHARegionJUnitTest_Region", cache, harqa, HARegionQueue.BLOCKING_HA_QUEUE, false);
+    hrq.setPrimary(true);// fix for 40314 - capacity constraint is checked for primary only.
+    Thread thread1 = new DoPuts(hrq, 20000, 1);
+    Thread thread2 = new DoPuts(hrq, 20000, 2);
+    Thread thread3 = new DoPuts(hrq, 20000, 3);
+    Thread thread4 = new DoPuts(hrq, 20000, 4);
+    Thread thread5 = new DoPuts(hrq, 20000, 5);
+
+    thread1.start();
+    thread2.start();
+    thread3.start();
+    thread4.start();
+    thread5.start();
+
+    WaitCriterion ev = new WaitCriterion() {
+      public boolean done() {
+        return hrq.region.size() == 20000;
+      }
 
-    await().until(() -> assertTrue(hrq.region.size() == 20000));
+      public String description() {
+        return null;
+      }
+    };
+    Wait.waitForCriterion(ev, 30 * 1000, 200, true);
 
-    assertTrue(doPuts1.isAlive());
-    assertTrue(doPuts2.isAlive());
-    assertTrue(doPuts3.isAlive());
-    assertTrue(doPuts4.isAlive());
-    assertTrue(doPuts5.isAlive());
+    assertTrue(thread1.isAlive());
+    assertTrue(thread2.isAlive());
+    assertTrue(thread3.isAlive());
+    assertTrue(thread4.isAlive());
+    assertTrue(thread5.isAlive());
 
     assertTrue(hrq.region.size() == 20000);
+
+    quitForLoop = true;
+    Thread.sleep(20000);
+
+    thread1.interrupt();
+    thread2.interrupt();
+    thread3.interrupt();
+    thread4.interrupt();
+    thread5.interrupt();
+
+    Thread.sleep(2000);
+
+    ThreadUtils.join(thread1, 5 * 60 * 1000);
+    ThreadUtils.join(thread2, 5 * 60 * 1000);
+    ThreadUtils.join(thread3, 5 * 60 * 1000);
+    ThreadUtils.join(thread4, 5 * 60 * 1000);
+    ThreadUtils.join(thread5, 5 * 60 * 1000);
+
+    cache.close();
   }
 
   /**
@@ -191,41 +226,84 @@ public class BlockingHARegionJUnitTest {
    * state. the region size would be verified to be 20000 (10000 puts and 10000 DACE objects). then
    * the threads are interrupted and made to quit the loop
    */
-  @Ignore("Test is disabled until/if blocking queue capacity becomes a hard limit")
+  @Ignore("TODO: test is disabled")
   @Test
   public void testConcurrentPutsTakesNotExceedingLimit() throws Exception {
-    this.queueAttributes.setBlockingQueueCapacity(10000);
-    HARegionQueue hrq = getHARegionQueueInstance(REGION, this.cache, this.queueAttributes,
-        BLOCKING_HA_QUEUE, false);
-    hrq.setPrimary(true); // fix for 40314 - capacity constraint is checked for primary only
-
-    Thread doPuts1 = startDoPuts(hrq, 40000, 1);
-    Thread doPuts2 = startDoPuts(hrq, 40000, 2);
-    Thread doPuts3 = startDoPuts(hrq, 40000, 3);
-    Thread doPuts4 = startDoPuts(hrq, 40000, 4);
-    Thread doPuts5 = startDoPuts(hrq, 40000, 5);
-
-    Thread doTakes1 = startDoTakes(hrq, 5000);
-    Thread doTakes2 = startDoTakes(hrq, 5000);
-    Thread doTakes3 = startDoTakes(hrq, 5000);
-    Thread doTakes4 = startDoTakes(hrq, 5000);
-    Thread doTakes5 = startDoTakes(hrq, 5000);
-
-    ThreadUtils.join(doTakes1, 30 * 1000);
-    ThreadUtils.join(doTakes2, 30 * 1000);
-    ThreadUtils.join(doTakes3, 30 * 1000);
-    ThreadUtils.join(doTakes4, 30 * 1000);
-    ThreadUtils.join(doTakes5, 30 * 1000);
-
-    await().until(() -> assertTrue(hrq.region.size() == 20000));
-
-    assertTrue(doPuts1.isAlive());
-    assertTrue(doPuts2.isAlive());
-    assertTrue(doPuts3.isAlive());
-    assertTrue(doPuts4.isAlive());
-    assertTrue(doPuts5.isAlive());
+    exceptionOccurred = false;
+    quitForLoop = false;
+    HARegionQueueAttributes harqa = new HARegionQueueAttributes();
+    harqa.setBlockingQueueCapacity(10000);
+    final HARegionQueue hrq = HARegionQueue.getHARegionQueueInstance(
+        "BlockingHARegionJUnitTest_Region", cache, harqa, HARegionQueue.BLOCKING_HA_QUEUE, false);
+    Thread thread1 = new DoPuts(hrq, 40000, 1);
+    Thread thread2 = new DoPuts(hrq, 40000, 2);
+    Thread thread3 = new DoPuts(hrq, 40000, 3);
+    Thread thread4 = new DoPuts(hrq, 40000, 4);
+    Thread thread5 = new DoPuts(hrq, 40000, 5);
+
+    Thread thread6 = new DoTake(hrq, 5000);
+    Thread thread7 = new DoTake(hrq, 5000);
+    Thread thread8 = new DoTake(hrq, 5000);
+    Thread thread9 = new DoTake(hrq, 5000);
+    Thread thread10 = new DoTake(hrq, 5000);
+
+    thread1.start();
+    thread2.start();
+    thread3.start();
+    thread4.start();
+    thread5.start();
+
+    thread6.start();
+    thread7.start();
+    thread8.start();
+    thread9.start();
+    thread10.start();
+
+    ThreadUtils.join(thread6, 30 * 1000);
+    ThreadUtils.join(thread7, 30 * 1000);
+    ThreadUtils.join(thread8, 30 * 1000);
+    ThreadUtils.join(thread9, 30 * 1000);
+    ThreadUtils.join(thread10, 30 * 1000);
+
+    WaitCriterion ev = new WaitCriterion() {
+      public boolean done() {
+        return hrq.region.size() == 20000;
+      }
+
+      public String description() {
+        return null;
+      }
+    };
+    Wait.waitForCriterion(ev, 30 * 1000, 200, true);
+
+    assertTrue(thread1.isAlive());
+    assertTrue(thread2.isAlive());
+    assertTrue(thread3.isAlive());
+    assertTrue(thread4.isAlive());
+    assertTrue(thread5.isAlive());
 
     assertTrue(hrq.region.size() == 20000);
+
+    quitForLoop = true;
+
+    Thread.sleep(2000);
+
+    thread1.interrupt();
+    thread2.interrupt();
+    thread3.interrupt();
+    thread4.interrupt();
+    thread5.interrupt();
+
+    Thread.sleep(2000);
+
+
+    ThreadUtils.join(thread1, 30 * 1000);
+    ThreadUtils.join(thread2, 30 * 1000);
+    ThreadUtils.join(thread3, 30 * 1000);
+    ThreadUtils.join(thread4, 30 * 1000);
+    ThreadUtils.join(thread5, 30 * 1000);
+
+    cache.close();
   }
 
   /**
@@ -237,92 +315,62 @@ public class BlockingHARegionJUnitTest {
    */
   @Test
   public void testHARQMaxCapacity_Bug37627() throws Exception {
-    this.queueAttributes.setBlockingQueueCapacity(1);
-    this.queueAttributes.setExpiryTime(180);
-    HARegionQueue hrq = getHARegionQueueInstance(REGION, this.cache, this.queueAttributes,
-        BLOCKING_HA_QUEUE, false);
-    hrq.setPrimary(true); // fix for 40314 - capacity constraint is checked for primary only
-
-    EventID event1 = new EventID(new byte[] {1}, 1, 2); // violation
-    EventID event2 = new EventID(new byte[] {1}, 1, 1); // ignored
-    EventID event3 = new EventID(new byte[] {1}, 1, 3);
-
-    newThread(new Runnable() {
-      @Override
-      public void run() {
-        try {
-          hrq.put(new ConflatableObject("key1", "value1", event1, false, "region1"));
-          hrq.take();
-          hrq.put(new ConflatableObject("key2", "value1", event2, false, "region1"));
-          hrq.put(new ConflatableObject("key3", "value1", event3, false, "region1"));
-        } catch (Exception e) {
-          errorCollector.addError(e);
+    try {
+      exceptionOccurred = false;
+      quitForLoop = false;
+      HARegionQueueAttributes harqa = new HARegionQueueAttributes();
+      harqa.setBlockingQueueCapacity(1);
+      harqa.setExpiryTime(180);
+      final HARegionQueue hrq = HARegionQueue.getHARegionQueueInstance(
+          "BlockingHARegionJUnitTest_Region", cache, harqa, HARegionQueue.BLOCKING_HA_QUEUE, false);
+      hrq.setPrimary(true);// fix for 40314 - capacity constraint is checked for primary only.
+      final EventID id1 = new EventID(new byte[] {1}, 1, 2); // violation
+      final EventID ignore = new EventID(new byte[] {1}, 1, 1); //
+      final EventID id2 = new EventID(new byte[] {1}, 1, 3); //
+      Thread t1 = new Thread() {
+        public void run() {
+          try {
+            hrq.put(new ConflatableObject("key1", "value1", id1, false, "region1"));
+            hrq.take();
+            hrq.put(new ConflatableObject("key2", "value1", ignore, false, "region1"));
+            hrq.put(new ConflatableObject("key3", "value1", id2, false, "region1"));
+          } catch (Exception e) {
+            exceptionString.append("First Put in region queue failed");
+            exceptionOccurred = true;
+          }
         }
+      };
+      t1.start();
+      ThreadUtils.join(t1, 20 * 1000);
+      if (exceptionOccurred) {
+        fail(" Test failed due to " + exceptionString);
+      }
+    } finally {
+      if (cache != null) {
+        cache.close();
       }
-    });
-  }
-
-  private Thread newThread(Runnable runnable) {
-    Thread thread = new Thread(this.threadGroup, runnable);
-    this.threads.add(thread);
-    thread.start();
-    return thread;
-  }
-
-  private Thread startDoPuts(HARegionQueue haRegionQueue, int count) {
-    return startDoPuts(haRegionQueue, count, 0);
-  }
-
-  private Thread startDoPuts(HARegionQueue haRegionQueue, int count, int regionId) {
-    Thread thread = new DoPuts(this.threadGroup, haRegionQueue, count, regionId);
-    this.threads.add(thread);
-    thread.start();
-    return thread;
-  }
-
-  private Thread startDoTakes(HARegionQueue haRegionQueue, int count) {
-    Thread thread = new DoTakes(this.threadGroup, haRegionQueue, count);
-    this.threads.add(thread);
-    thread.start();
-    return thread;
-  }
-
-  private ConditionFactory await() {
-    return Awaitility.await().atMost(2, MINUTES);
-  }
-
-  int nextDoPutsThreadNum() {
-    synchronized (this.numberForThreadsLock) {
-      return numberForDoPuts++;
-    }
-  }
-
-  int nextDoTakesThreadNum() {
-    synchronized (this.numberForThreadsLock) {
-      return numberForDoTakes++;
     }
   }
 
   /**
    * class which does specified number of puts on the queue
    */
-  private class DoPuts extends Thread {
+  private static class DoPuts extends Thread {
 
-    private final HARegionQueue regionQueue;
+    HARegionQueue regionQueue = null;
+    final int numberOfPuts;
 
-    private final int numberOfPuts;
+    DoPuts(HARegionQueue haRegionQueue, int numberOfPuts) {
+      this.regionQueue = haRegionQueue;
+      this.numberOfPuts = numberOfPuts;
+    }
 
     /**
      * region id can be specified to generate Thread unique events
      */
-    private final int regionId;
+    int regionId = 0;
 
-    DoPuts(ThreadGroup threadGroup, HARegionQueue haRegionQueue, int numberOfPuts) {
-      this(threadGroup, haRegionQueue, numberOfPuts, 0);
-    }
-
-    DoPuts(ThreadGroup threadGroup, HARegionQueue haRegionQueue, int numberOfPuts, int regionId) {
-      super(threadGroup, "DoPuts-" + nextDoPutsThreadNum());
+    DoPuts(HARegionQueue haRegionQueue, int numberOfPuts, int regionId) {
       this.regionQueue = haRegionQueue;
       this.numberOfPuts = numberOfPuts;
       this.regionId = regionId;
@@ -330,16 +378,19 @@ public class BlockingHARegionJUnitTest {
 
     @Override
     public void run() {
-      for (int i = 0; i < this.numberOfPuts; i++) {
-        if (stopThreads || Thread.currentThread().isInterrupted()) {
-          break;
-        }
+      for (int i = 0; i < numberOfPuts; i++) {
         try {
           this.regionQueue.put(new ConflatableObject("" + i, "" + i,
-              new EventID(new byte[this.regionId], i, i), false, REGION));
+              new EventID(new byte[regionId], i, i), false, "BlockingHARegionJUnitTest_Region"));
+          if (quitForLoop) {
+            break;
+          }
+          if (Thread.currentThread().isInterrupted()) {
+            break;
+          }
         } catch (Exception e) {
-          errorCollector.addError(e);
-          break;
+          exceptionOccurred = true;
+          exceptionString.append(" Exception occurred due to " + e);
         }
       }
     }
@@ -348,29 +399,24 @@ public class BlockingHARegionJUnitTest {
   /**
    * class which does a specified number of takes
    */
-  private class DoTakes extends Thread {
+  private static class DoTake extends Thread {
 
-    private final HARegionQueue regionQueue;
+    final HARegionQueue regionQueue;
+    final int numberOfTakes;
 
-    private final int numberOfTakes;
-
-    DoTakes(ThreadGroup threadGroup, HARegionQueue haRegionQueue, int numberOfTakes) {
-      super(threadGroup, "DoTakes-" + nextDoTakesThreadNum());
+    DoTake(HARegionQueue haRegionQueue, int numberOfTakes) {
       this.regionQueue = haRegionQueue;
       this.numberOfTakes = numberOfTakes;
     }
 
     @Override
     public void run() {
-      for (int i = 0; i < this.numberOfTakes; i++) {
-        if (stopThreads || Thread.currentThread().isInterrupted()) {
-          break;
-        }
+      for (int i = 0; i < numberOfTakes; i++) {
         try {
           assertNotNull(this.regionQueue.take());
         } catch (Exception e) {
-          errorCollector.addError(e);
-          break;
+          exceptionOccurred = true;
+          exceptionString.append(" Exception occurred due to " + e);
         }
       }
     }


[12/43] geode git commit: GEODE-2939: Make sure bucket region initiate event tracker from the image provider.

Posted by kl...@apache.org.
GEODE-2939: Make sure bucket region initiate event tracker from the image provider.

Save all event states from remote processes.
Initiate event tracker from the image provider only.


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/56f976c8
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/56f976c8
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/56f976c8

Branch: refs/heads/feature/GEODE-2632-17
Commit: 56f976c89fabed58a086a845593efc2ef6e75114
Parents: 29ea88a
Author: eshu <es...@pivotal.io>
Authored: Thu May 25 16:38:55 2017 -0700
Committer: eshu <es...@pivotal.io>
Committed: Thu May 25 17:14:09 2017 -0700

----------------------------------------------------------------------
 .../geode/internal/cache/BucketRegion.java      | 29 ++++++++
 .../cache/CacheDistributionAdvisee.java         |  8 ++
 .../internal/cache/CreateRegionProcessor.java   | 36 ++++-----
 .../geode/internal/cache/DistributedRegion.java |  9 +++
 .../geode/internal/cache/EventTracker.java      |  3 +-
 .../internal/cache/InitialImageOperation.java   |  3 +
 .../internal/cache/EventTrackerDUnitTest.java   | 78 ++++++++++++++++++++
 7 files changed, 147 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/56f976c8/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
index 7bfffb7..31b341a 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
@@ -29,8 +29,11 @@ import org.apache.geode.internal.Assert;
 import org.apache.geode.internal.HeapDataOutputStream;
 import org.apache.geode.internal.Version;
 import org.apache.geode.internal.cache.BucketAdvisor.BucketProfile;
+import org.apache.geode.internal.cache.CreateRegionProcessor.CreateRegionReplyProcessor;
+import org.apache.geode.internal.cache.EventTracker.EventSeqnoHolder;
 import org.apache.geode.internal.cache.FilterRoutingInfo.FilterInfo;
 import org.apache.geode.internal.cache.control.MemoryEvent;
+import org.apache.geode.internal.cache.ha.ThreadIdentifier;
 import org.apache.geode.internal.cache.partitioned.Bucket;
 import org.apache.geode.internal.cache.partitioned.DestroyMessage;
 import org.apache.geode.internal.cache.partitioned.InvalidateMessage;
@@ -92,6 +95,8 @@ public class BucketRegion extends DistributedRegion implements Bucket {
   private final AtomicLong numOverflowBytesOnDisk = new AtomicLong();
   private final AtomicLong numEntriesInVM = new AtomicLong();
   private final AtomicLong evictions = new AtomicLong();
+  // For GII
+  private CreateRegionReplyProcessor createRegionReplyProcessor;
 
   /**
    * Contains size in bytes of the values stored in theRealMap. Sizes are tallied during put and
@@ -281,6 +286,30 @@ public class BucketRegion extends DistributedRegion implements Bucket {
   }
 
   @Override
+  public void registerCreateRegionReplyProcessor(CreateRegionReplyProcessor processor) {
+    this.createRegionReplyProcessor = processor;
+  }
+
+  @Override
+  protected void recordEventStateFromImageProvider(InternalDistributedMember provider) {
+    if (this.createRegionReplyProcessor != null) {
+      Map<ThreadIdentifier, EventSeqnoHolder> providerEventStates =
+          this.createRegionReplyProcessor.getEventState(provider);
+      if (providerEventStates != null) {
+        recordEventState(provider, providerEventStates);
+      } else {
+        // Does not see this to happen. Just in case we get gii from a node
+        // that was not in the cluster originally when we sent
+        // createRegionMessage (its event tracker was saved),
+        // but later available before we could get gii from anyone else.
+        // This will not cause data inconsistent issue. Log this message for debug purpose.
+        logger.info("Could not initiate event tracker from GII provider {}", provider);
+      }
+      this.createRegionReplyProcessor = null;
+    }
+  }
+
+  @Override
   protected CacheDistributionAdvisor createDistributionAdvisor(
       InternalRegionArguments internalRegionArgs) {
     return internalRegionArgs.getBucketAdvisor();

http://git-wip-us.apache.org/repos/asf/geode/blob/56f976c8/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisee.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisee.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisee.java
index e4a7957..d933019 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisee.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisee.java
@@ -17,6 +17,7 @@ package org.apache.geode.internal.cache;
 import org.apache.geode.cache.RegionAttributes;
 import org.apache.geode.distributed.internal.DistributionAdvisee;
 import org.apache.geode.internal.cache.CacheDistributionAdvisor.CacheProfile;
+import org.apache.geode.internal.cache.CreateRegionProcessor.CreateRegionReplyProcessor;
 
 /**
  * Distributed cache object (typically a <code>Region</code>) which uses a
@@ -54,4 +55,11 @@ public interface CacheDistributionAdvisee extends DistributionAdvisee {
    * @param profile the remote member's profile
    */
   public void remoteRegionInitialized(CacheProfile profile);
+
+  /**
+   * Allow this advisee to know the CreateRegionReplyProcessor that is creating it.
+   * 
+   * @param processor the CreateRegionReplyProcessor that is creating the advisee
+   */
+  default public void registerCreateRegionReplyProcessor(CreateRegionReplyProcessor processor) {}
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/56f976c8/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java
index c1d1e77..1e38065 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java
@@ -21,6 +21,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.logging.log4j.Logger;
 
@@ -48,6 +49,8 @@ import org.apache.geode.internal.Assert;
 import org.apache.geode.internal.InternalDataSerializer;
 import org.apache.geode.internal.cache.CacheDistributionAdvisor.CacheProfile;
 import org.apache.geode.internal.cache.CacheDistributionAdvisor.InitialImageAdvice;
+import org.apache.geode.internal.cache.EventTracker.EventSeqnoHolder;
+import org.apache.geode.internal.cache.ha.ThreadIdentifier;
 import org.apache.geode.internal.cache.partitioned.Bucket;
 import org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException;
 import org.apache.geode.internal.cache.partitioned.RegionAdvisor;
@@ -96,6 +99,7 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
       }
 
       CreateRegionReplyProcessor replyProc = new CreateRegionReplyProcessor(recps);
+      newRegion.registerCreateRegionReplyProcessor(replyProc);
 
       boolean useMcast = false; // multicast is disabled for this message for now
       CreateRegionMessage msg = getCreateRegionMessage(recps, replyProc, useMcast);
@@ -199,17 +203,16 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
           .getDistributedSystem(), members);
     }
 
-    /**
-     * guards application of event state to the region so that we deserialize and apply event state
-     * only once
-     */
-    private Object eventStateLock = new Object();
-
-    /** whether event state has been recorded in the region */
-    private boolean eventStateRecorded = false;
+    private final Map<DistributedMember, Map<ThreadIdentifier, EventSeqnoHolder>> remoteEventStates =
+        new ConcurrentHashMap<>();
 
     private boolean allMembersSkippedChecks = true;
 
+    public Map<ThreadIdentifier, EventSeqnoHolder> getEventState(
+        InternalDistributedMember provider) {
+      return this.remoteEventStates.get(provider);
+    }
+
     /**
      * true if all members skipped CreateRegionMessage#checkCompatibility(), in which case
      * CreateRegionMessage should be retried.
@@ -218,6 +221,7 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
       return this.allMembersSkippedChecks;
     }
 
+    @SuppressWarnings("unchecked")
     @Override
     public void process(DistributionMessage msg) {
       Assert.assertTrue(msg instanceof CreateRegionReplyMessage,
@@ -246,17 +250,13 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
             RegionAdvisor ra = (RegionAdvisor) cda;
             ra.putBucketRegionProfiles(reply.bucketProfiles);
           }
-          if (reply.eventState != null && lr.hasEventTracker()) {
-            synchronized (eventStateLock) {
-              if (!this.eventStateRecorded) {
-                this.eventStateRecorded = true;
-                Object eventState = null;
-                eventState = reply.eventState;
-                lr.recordEventState(reply.getSender(), (Map) eventState);
-              }
-            }
+
+          // Save all event states, need to initiate the event tracker from the GII provider
+          if (reply.eventState != null) {
+            remoteEventStates.put(reply.getSender(),
+                (Map<ThreadIdentifier, EventSeqnoHolder>) reply.eventState);
           }
-          reply.eventState = null;
+
           if (lr.isUsedForPartitionedRegionBucket()) {
             ((BucketRegion) lr).updateEventSeqNum(reply.seqKeyForWan);
           }

http://git-wip-us.apache.org/repos/asf/geode/blob/56f976c8/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
index 650fe2a..9df64d0 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
@@ -261,6 +261,15 @@ public class DistributedRegion extends LocalRegion implements CacheDistributionA
   }
 
   /**
+   * Record the event state from image provider
+   * 
+   * @param provider the member that provided the initial image and event state
+   */
+  protected void recordEventStateFromImageProvider(InternalDistributedMember provider) {
+    // No Op. Only Bucket region will initiate event states
+  }
+
+  /**
    * Intended for used during construction of a DistributedRegion
    * 
    * @return the advisor to be used by the region

http://git-wip-us.apache.org/repos/asf/geode/blob/56f976c8/geode-core/src/main/java/org/apache/geode/internal/cache/EventTracker.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/EventTracker.java b/geode-core/src/main/java/org/apache/geode/internal/cache/EventTracker.java
index 2c86aed..b919043 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/EventTracker.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/EventTracker.java
@@ -99,7 +99,8 @@ public class EventTracker {
   String name;
 
   /**
-   * whether or not this tracker has been initialized with state from another process
+   * whether or not this tracker has been initialized to allow entry operation. replicate region
+   * does not initiate event tracker from its replicates.
    */
   volatile boolean initialized;
 

http://git-wip-us.apache.org/repos/asf/geode/blob/56f976c8/geode-core/src/main/java/org/apache/geode/internal/cache/InitialImageOperation.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/InitialImageOperation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/InitialImageOperation.java
index 82df980..f8e9d0f 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/InitialImageOperation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/InitialImageOperation.java
@@ -231,11 +231,13 @@ public class InitialImageOperation {
       }
     }
     long giiStart = this.region.getCachePerfStats().startGetInitialImage();
+    InternalDistributedMember provider = null;
 
     for (Iterator itr = recipients.iterator(); !this.gotImage && itr.hasNext();) {
       // if we got a partial image from the previous recipient, then clear it
 
       InternalDistributedMember recipient = (InternalDistributedMember) itr.next();
+      provider = recipient;
 
       // In case of HARegion, before getting the region snapshot(image) get the filters
       // registered by the associated client and apply them.
@@ -546,6 +548,7 @@ public class InitialImageOperation {
     } // for
 
     if (this.gotImage) {
+      this.region.recordEventStateFromImageProvider(provider);
       this.region.getCachePerfStats().endGetInitialImage(giiStart);
       if (this.isDeltaGII) {
         this.region.getCachePerfStats().incDeltaGIICompleted();

http://git-wip-us.apache.org/repos/asf/geode/blob/56f976c8/geode-core/src/test/java/org/apache/geode/internal/cache/EventTrackerDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/EventTrackerDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/EventTrackerDUnitTest.java
index 3faf41f..77c0998 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/EventTrackerDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/EventTrackerDUnitTest.java
@@ -19,8 +19,11 @@ import static org.junit.Assert.*;
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
 
+import org.awaitility.Awaitility;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -405,4 +408,79 @@ public class EventTrackerDUnitTest extends JUnit4CacheTestCase {
   protected static int getCacheServerPort() {
     return cacheServerPort;
   }
+
+  /**
+   * Tests event track is initialized after gii
+   */
+  @Test
+  public void testEventTrackerIsInitalized() throws CacheException {
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    VM vm2 = host.getVM(2);
+
+    createPRInVMs(vm0, vm1, vm2);
+
+    createPR();
+
+    doPutsInVMs(vm0, vm1, vm2);
+
+    doPuts();
+
+    verifyEventTrackerContent();
+
+    // close the region
+    getCache().getRegion(getName()).close();
+
+    // create the region again.
+    createPR();
+
+    for (int i = 0; i < 12; i++) {
+      waitEntryIsLocal(i);
+    }
+
+    // verify event track initialized after create region
+    verifyEventTrackerContent();
+
+  }
+
+  private void waitEntryIsLocal(int i) {
+    Awaitility.await().pollInterval(10, TimeUnit.MILLISECONDS).pollDelay(10, TimeUnit.MILLISECONDS)
+        .atMost(30, TimeUnit.SECONDS)
+        .until(() -> getCache().getRegion(getName()).getEntry(i) != null);
+  }
+
+  private void verifyEventTrackerContent() {
+    PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(getName());
+    BucketRegion br = pr.getDataStore().getLocalBucketById(0);
+    Map<?, ?> eventStates = br.getEventState();
+    assertTrue(eventStates.size() == 4);
+  }
+
+  public void createPRInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> createPR());
+    }
+  }
+
+  private void createPR() {
+    PartitionAttributesFactory paf =
+        new PartitionAttributesFactory().setRedundantCopies(3).setTotalNumBuckets(4);
+    RegionFactory fact = getCache().createRegionFactory(RegionShortcut.PARTITION)
+        .setPartitionAttributes(paf.create());
+    fact.create(getName());
+  }
+
+  public void doPutsInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> doPuts());
+    }
+  }
+
+  private void doPuts() {
+    Region region = getCache().getRegion(getName());
+    for (int i = 0; i < 12; i++) {
+      region.put(i, i);
+    }
+  }
 }


[33/43] geode git commit: Cleanup CacheClientUpdater

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/07efaa8e/geode-wan/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderOperationsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-wan/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderOperationsDUnitTest.java b/geode-wan/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderOperationsDUnitTest.java
index 8cedbf0..702e6c8 100644
--- a/geode-wan/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderOperationsDUnitTest.java
+++ b/geode-wan/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderOperationsDUnitTest.java
@@ -43,7 +43,8 @@ import org.apache.geode.test.junit.categories.DistributedTest;
 public class ParallelGatewaySenderOperationsDUnitTest extends WANTestBase {
 
   @Rule
-  public DistributedRestoreSystemProperties restoreSystemProperties = new DistributedRestoreSystemProperties();
+  public DistributedRestoreSystemProperties restoreSystemProperties =
+      new DistributedRestoreSystemProperties();
 
   @Override
   protected final void postSetUpWANTestBase() throws Exception {


[29/43] geode git commit: Run spotlessApply

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GatewayReceiverCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GatewayReceiverCommand.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GatewayReceiverCommand.java
index 704f2da..d489b88 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GatewayReceiverCommand.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GatewayReceiverCommand.java
@@ -188,7 +188,7 @@ public class GatewayReceiverCommand extends BaseCommand {
     int dsid = clientMessage.getPart(partNumber++).getInt();
 
     boolean removeOnException =
-      clientMessage.getPart(partNumber++).getSerializedForm()[0] == 1 ? true : false;
+        clientMessage.getPart(partNumber++).getSerializedForm()[0] == 1 ? true : false;
 
     // Keep track of whether a response has been written for
     // exceptions
@@ -218,8 +218,7 @@ public class GatewayReceiverCommand extends BaseCommand {
         } catch (Exception e) {
           logger.warn(LocalizedMessage.create(
               LocalizedStrings.ProcessBatch_0_CAUGHT_EXCEPTION_PROCESSING_BATCH_REQUEST_1_CONTAINING_2_EVENTS,
-              new Object[] {
-                serverConnection.getName(), Integer.valueOf(batchId),
+              new Object[] {serverConnection.getName(), Integer.valueOf(batchId),
                   Integer.valueOf(numberOfEvents)}),
               e);
           throw e;
@@ -252,8 +251,7 @@ public class GatewayReceiverCommand extends BaseCommand {
         } catch (Exception e) {
           logger.warn(LocalizedMessage.create(
               LocalizedStrings.ProcessBatch_0_CAUGHT_EXCEPTION_PROCESSING_BATCH_REQUEST_1_CONTAINING_2_EVENTS,
-              new Object[] {
-                serverConnection.getName(), Integer.valueOf(batchId),
+              new Object[] {serverConnection.getName(), Integer.valueOf(batchId),
                   Integer.valueOf(numberOfEvents)}),
               e);
           throw e;
@@ -266,8 +264,7 @@ public class GatewayReceiverCommand extends BaseCommand {
         } catch (Exception e) {
           logger.warn(LocalizedMessage.create(
               LocalizedStrings.ProcessBatch_0_CAUGHT_EXCEPTION_PROCESSING_BATCH_REQUEST_1_CONTAINING_2_EVENTS,
-              new Object[] {
-                serverConnection.getName(), Integer.valueOf(batchId),
+              new Object[] {serverConnection.getName(), Integer.valueOf(batchId),
                   Integer.valueOf(numberOfEvents)}),
               e);
           throw e;
@@ -303,8 +300,7 @@ public class GatewayReceiverCommand extends BaseCommand {
               } catch (Exception e) {
                 logger.warn(LocalizedMessage.create(
                     LocalizedStrings.ProcessBatch_0_CAUGHT_EXCEPTION_PROCESSING_BATCH_CREATE_REQUEST_1_FOR_2_EVENTS,
-                    new Object[] {
-                      serverConnection.getName(), Integer.valueOf(batchId),
+                    new Object[] {serverConnection.getName(), Integer.valueOf(batchId),
                         Integer.valueOf(numberOfEvents)}),
                     e);
                 throw e;
@@ -313,14 +309,15 @@ public class GatewayReceiverCommand extends BaseCommand {
             if (logger.isDebugEnabled()) {
               logger.debug(
                   "{}: Processing batch create request {} on {} for region {} key {} value {} callbackArg {}, eventId={}",
-                  serverConnection.getName(), batchId, serverConnection.getSocketString(), regionName, key,
-                  valuePart, callbackArg, eventId);
+                  serverConnection.getName(), batchId, serverConnection.getSocketString(),
+                  regionName, key, valuePart, callbackArg, eventId);
             }
             versionTimeStamp = clientMessage.getPart(index++).getLong();
             // Process the create request
             if (key == null || regionName == null) {
               StringId message = null;
-              Object[] messageArgs = new Object[] { serverConnection.getName(), Integer.valueOf(batchId)};
+              Object[] messageArgs =
+                  new Object[] {serverConnection.getName(), Integer.valueOf(batchId)};
               if (key == null) {
                 message =
                     LocalizedStrings.ProcessBatch_0_THE_INPUT_REGION_NAME_FOR_THE_BATCH_CREATE_REQUEST_1_IS_NULL;
@@ -381,15 +378,13 @@ public class GatewayReceiverCommand extends BaseCommand {
                   // This exception will be logged in the catch block below
                   throw new Exception(
                       LocalizedStrings.ProcessBatch_0_FAILED_TO_CREATE_OR_UPDATE_ENTRY_FOR_REGION_1_KEY_2_VALUE_3_CALLBACKARG_4
-                          .toLocalizedString(new Object[] {
-                            serverConnection.getName(), regionName, key,
-                              valuePart, callbackArg}));
+                          .toLocalizedString(new Object[] {serverConnection.getName(), regionName,
+                              key, valuePart, callbackArg}));
                 }
               } catch (Exception e) {
                 logger.warn(LocalizedMessage.create(
                     LocalizedStrings.ProcessBatch_0_CAUGHT_EXCEPTION_PROCESSING_BATCH_CREATE_REQUEST_1_FOR_2_EVENTS,
-                    new Object[] {
-                      serverConnection.getName(), Integer.valueOf(batchId),
+                    new Object[] {serverConnection.getName(), Integer.valueOf(batchId),
                         Integer.valueOf(numberOfEvents)}),
                     e);
                 throw e;
@@ -425,8 +420,7 @@ public class GatewayReceiverCommand extends BaseCommand {
               } catch (Exception e) {
                 logger.warn(LocalizedMessage.create(
                     LocalizedStrings.ProcessBatch_0_CAUGHT_EXCEPTION_PROCESSING_BATCH_UPDATE_REQUEST_1_CONTAINING_2_EVENTS,
-                    new Object[] {
-                      serverConnection.getName(), Integer.valueOf(batchId),
+                    new Object[] {serverConnection.getName(), Integer.valueOf(batchId),
                         Integer.valueOf(numberOfEvents)}),
                     e);
                 throw e;
@@ -436,13 +430,14 @@ public class GatewayReceiverCommand extends BaseCommand {
             if (logger.isDebugEnabled()) {
               logger.debug(
                   "{}: Processing batch update request {} on {} for region {} key {} value {} callbackArg {}",
-                  serverConnection.getName(), batchId, serverConnection.getSocketString(), regionName, key,
-                  valuePart, callbackArg);
+                  serverConnection.getName(), batchId, serverConnection.getSocketString(),
+                  regionName, key, valuePart, callbackArg);
             }
             // Process the update request
             if (key == null || regionName == null) {
               StringId message = null;
-              Object[] messageArgs = new Object[] { serverConnection.getName(), Integer.valueOf(batchId)};
+              Object[] messageArgs =
+                  new Object[] {serverConnection.getName(), Integer.valueOf(batchId)};
               if (key == null) {
                 message =
                     LocalizedStrings.ProcessBatch_0_THE_INPUT_KEY_FOR_THE_BATCH_UPDATE_REQUEST_1_IS_NULL;
@@ -490,8 +485,8 @@ public class GatewayReceiverCommand extends BaseCommand {
                   serverConnection.setModificationInfo(true, regionName, key);
                   stats.incUpdateRequest();
                 } else {
-                  final Object[] msgArgs =
-                      new Object[] { serverConnection.getName(), regionName, key, valuePart, callbackArg};
+                  final Object[] msgArgs = new Object[] {serverConnection.getName(), regionName,
+                      key, valuePart, callbackArg};
                   final StringId message =
                       LocalizedStrings.ProcessBatch_0_FAILED_TO_UPDATE_ENTRY_FOR_REGION_1_KEY_2_VALUE_3_AND_CALLBACKARG_4;
                   String s = message.toLocalizedString(msgArgs);
@@ -501,11 +496,12 @@ public class GatewayReceiverCommand extends BaseCommand {
               } catch (CancelException e) {
                 // FIXME better exception hierarchy would avoid this check
                 if (serverConnection.getCachedRegionHelper().getCache().getCancelCriterion()
-                                    .isCancelInProgress()) {
+                    .isCancelInProgress()) {
                   if (logger.isDebugEnabled()) {
                     logger.debug(
                         "{} ignoring message of type {} from client {} because shutdown occurred during message processing.",
-                        serverConnection.getName(), MessageType.getString(clientMessage.getMessageType()),
+                        serverConnection.getName(),
+                        MessageType.getString(clientMessage.getMessageType()),
                         serverConnection.getProxyID());
                   }
                   serverConnection.setFlagProcessMessagesAsFalse();
@@ -518,8 +514,7 @@ public class GatewayReceiverCommand extends BaseCommand {
                 // Preserve the connection under all circumstances
                 logger.warn(LocalizedMessage.create(
                     LocalizedStrings.ProcessBatch_0_CAUGHT_EXCEPTION_PROCESSING_BATCH_UPDATE_REQUEST_1_CONTAINING_2_EVENTS,
-                    new Object[] {
-                      serverConnection.getName(), Integer.valueOf(batchId),
+                    new Object[] {serverConnection.getName(), Integer.valueOf(batchId),
                         Integer.valueOf(numberOfEvents)}),
                     e);
                 throw e;
@@ -540,8 +535,7 @@ public class GatewayReceiverCommand extends BaseCommand {
               } catch (Exception e) {
                 logger.warn(LocalizedMessage.create(
                     LocalizedStrings.ProcessBatch_0_CAUGHT_EXCEPTION_PROCESSING_BATCH_DESTROY_REQUEST_1_CONTAINING_2_EVENTS,
-                    new Object[] {
-                      serverConnection.getName(), Integer.valueOf(batchId),
+                    new Object[] {serverConnection.getName(), Integer.valueOf(batchId),
                         Integer.valueOf(numberOfEvents)}),
                     e);
                 throw e;
@@ -551,7 +545,8 @@ public class GatewayReceiverCommand extends BaseCommand {
             versionTimeStamp = clientMessage.getPart(index++).getLong();
             if (logger.isDebugEnabled()) {
               logger.debug("{}: Processing batch destroy request {} on {} for region {} key {}",
-                  serverConnection.getName(), batchId, serverConnection.getSocketString(), regionName, key);
+                  serverConnection.getName(), batchId, serverConnection.getSocketString(),
+                  regionName, key);
             }
 
             // Process the destroy request
@@ -565,7 +560,8 @@ public class GatewayReceiverCommand extends BaseCommand {
                 message =
                     LocalizedStrings.ProcessBatch_0_THE_INPUT_REGION_NAME_FOR_THE_BATCH_DESTROY_REQUEST_1_IS_NULL;
               }
-              Object[] messageArgs = new Object[] { serverConnection.getName(), Integer.valueOf(batchId)};
+              Object[] messageArgs =
+                  new Object[] {serverConnection.getName(), Integer.valueOf(batchId)};
               String s = message.toLocalizedString(messageArgs);
               logger.warn(s);
               throw new Exception(s);
@@ -598,7 +594,7 @@ public class GatewayReceiverCommand extends BaseCommand {
               } catch (EntryNotFoundException e) {
                 logger.info(LocalizedMessage.create(
                     LocalizedStrings.ProcessBatch_0_DURING_BATCH_DESTROY_NO_ENTRY_WAS_FOUND_FOR_KEY_1,
-                    new Object[] { serverConnection.getName(), key}));
+                    new Object[] {serverConnection.getName(), key}));
                 // throw new Exception(e);
               }
             }
@@ -633,8 +629,7 @@ public class GatewayReceiverCommand extends BaseCommand {
             } catch (Exception e) {
               logger.warn(LocalizedMessage.create(
                   LocalizedStrings.ProcessBatch_0_CAUGHT_EXCEPTION_PROCESSING_BATCH_UPDATE_VERSION_REQUEST_1_CONTAINING_2_EVENTS,
-                  new Object[] {
-                    serverConnection.getName(), Integer.valueOf(batchId),
+                  new Object[] {serverConnection.getName(), Integer.valueOf(batchId),
                       Integer.valueOf(numberOfEvents)}),
                   e);
               throw e;
@@ -644,17 +639,16 @@ public class GatewayReceiverCommand extends BaseCommand {
             if (logger.isDebugEnabled()) {
               logger.debug(
                   "{}: Processing batch update-version request {} on {} for region {} key {} value {} callbackArg {}",
-                  serverConnection.getName(), batchId, serverConnection.getSocketString(), regionName, key,
-                  valuePart, callbackArg);
+                  serverConnection.getName(), batchId, serverConnection.getSocketString(),
+                  regionName, key, valuePart, callbackArg);
             }
             // Process the update time-stamp request
             if (key == null || regionName == null) {
               StringId message =
                   LocalizedStrings.ProcessBatch_0_CAUGHT_EXCEPTION_PROCESSING_BATCH_UPDATE_VERSION_REQUEST_1_CONTAINING_2_EVENTS;
 
-              Object[] messageArgs = new Object[] {
-                serverConnection.getName(), Integer.valueOf(batchId),
-                  Integer.valueOf(numberOfEvents)};
+              Object[] messageArgs = new Object[] {serverConnection.getName(),
+                  Integer.valueOf(batchId), Integer.valueOf(numberOfEvents)};
               String s = message.toLocalizedString(messageArgs);
               logger.warn(s);
               throw new Exception(s);
@@ -679,13 +673,13 @@ public class GatewayReceiverCommand extends BaseCommand {
                 // Update the version tag
                 try {
 
-                  region.basicBridgeUpdateVersionStamp(key, callbackArg, serverConnection.getProxyID(),
-                      false, clientEvent);
+                  region.basicBridgeUpdateVersionStamp(key, callbackArg,
+                      serverConnection.getProxyID(), false, clientEvent);
 
                 } catch (EntryNotFoundException e) {
                   logger.info(LocalizedMessage.create(
                       LocalizedStrings.ProcessBatch_0_DURING_BATCH_UPDATE_VERSION_NO_ENTRY_WAS_FOUND_FOR_KEY_1,
-                      new Object[] { serverConnection.getName(), key}));
+                      new Object[] {serverConnection.getName(), key}));
                   // throw new Exception(e);
                 }
               }
@@ -695,8 +689,7 @@ public class GatewayReceiverCommand extends BaseCommand {
           default:
             logger.fatal(LocalizedMessage.create(
                 LocalizedStrings.Processbatch_0_UNKNOWN_ACTION_TYPE_1_FOR_BATCH_FROM_2,
-                new Object[] {
-                  serverConnection.getName(), Integer.valueOf(actionType),
+                new Object[] {serverConnection.getName(), Integer.valueOf(actionType),
                     serverConnection.getSocketString()}));
             stats.incUnknowsOperationsReceived();
         }
@@ -718,7 +711,7 @@ public class GatewayReceiverCommand extends BaseCommand {
         if (e.getCause() instanceof PdxRegistryMismatchException) {
           fatalException = e.getCause();
           logger.fatal(LocalizedMessage.create(LocalizedStrings.GatewayReceiver_PDX_CONFIGURATION,
-              new Object[] { serverConnection.getMembershipID()}), e.getCause());
+              new Object[] {serverConnection.getMembershipID()}), e.getCause());
           break;
         }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Get70.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Get70.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Get70.java
index 7017aa8..2ca8804 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Get70.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Get70.java
@@ -98,8 +98,8 @@ public class Get70 extends BaseCommand {
     }
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received 7.0 get request ({} bytes) from {} for region {} key {} txId {}",
-          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName, key,
-          clientMessage.getTransactionId());
+          serverConnection.getName(), clientMessage.getPayloadLength(),
+          serverConnection.getSocketString(), regionName, key, clientMessage.getTransactionId());
     }
 
     // Process the get request
@@ -197,14 +197,16 @@ public class Get70 extends BaseCommand {
       if (region instanceof PartitionedRegion) {
         PartitionedRegion pr = (PartitionedRegion) region;
         if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
-          writeResponseWithRefreshMetadata(data, callbackArg, clientMessage, isObject, serverConnection, pr,
-              pr.getNetworkHopType(), versionTag, keyNotPresent);
+          writeResponseWithRefreshMetadata(data, callbackArg, clientMessage, isObject,
+              serverConnection, pr, pr.getNetworkHopType(), versionTag, keyNotPresent);
           pr.clearNetworkHopData();
         } else {
-          writeResponse(data, callbackArg, clientMessage, isObject, versionTag, keyNotPresent, serverConnection);
+          writeResponse(data, callbackArg, clientMessage, isObject, versionTag, keyNotPresent,
+              serverConnection);
         }
       } else {
-        writeResponse(data, callbackArg, clientMessage, isObject, versionTag, keyNotPresent, serverConnection);
+        writeResponse(data, callbackArg, clientMessage, isObject, versionTag, keyNotPresent,
+            serverConnection);
       }
     } finally {
       OffHeapHelper.release(originalData);

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll.java
index 5f7cb29..01c5c9c 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll.java
@@ -69,8 +69,9 @@ public class GetAll extends BaseCommand {
     if (logger.isDebugEnabled()) {
       StringBuffer buffer = new StringBuffer();
       buffer.append(serverConnection.getName()).append(": Received getAll request (")
-            .append(clientMessage.getPayloadLength()).append(" bytes) from ").append(serverConnection.getSocketString())
-            .append(" for region ").append(regionName).append(" keys ");
+          .append(clientMessage.getPayloadLength()).append(" bytes) from ")
+          .append(serverConnection.getSocketString()).append(" for region ").append(regionName)
+          .append(" keys ");
       if (keys != null) {
         for (int i = 0; i < keys.length; i++) {
           buffer.append(keys[i]).append(" ");
@@ -92,7 +93,8 @@ public class GetAll extends BaseCommand {
             .toLocalizedString();
       }
       logger.warn("{}: {}", serverConnection.getName(), message);
-      writeChunkedErrorResponse(clientMessage, MessageType.GET_ALL_DATA_ERROR, message, serverConnection);
+      writeChunkedErrorResponse(clientMessage, MessageType.GET_ALL_DATA_ERROR, message,
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll651.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll651.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll651.java
index b0a1915..ad8ef49 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll651.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll651.java
@@ -69,8 +69,9 @@ public class GetAll651 extends BaseCommand {
     if (logger.isDebugEnabled()) {
       StringBuffer buffer = new StringBuffer();
       buffer.append(serverConnection.getName()).append(": Received getAll request (")
-            .append(clientMessage.getPayloadLength()).append(" bytes) from ").append(serverConnection.getSocketString())
-            .append(" for region ").append(regionName).append(" keys ");
+          .append(clientMessage.getPayloadLength()).append(" bytes) from ")
+          .append(serverConnection.getSocketString()).append(" for region ").append(regionName)
+          .append(" keys ");
       if (keys != null) {
         for (int i = 0; i < keys.length; i++) {
           buffer.append(keys[i]).append(" ");
@@ -90,7 +91,8 @@ public class GetAll651 extends BaseCommand {
             .toLocalizedString();
       }
       logger.warn("{}: {}", serverConnection.getName(), message);
-      writeChunkedErrorResponse(clientMessage, MessageType.GET_ALL_DATA_ERROR, message, serverConnection);
+      writeChunkedErrorResponse(clientMessage, MessageType.GET_ALL_DATA_ERROR, message,
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll70.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll70.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll70.java
index 579593f..267a5b2 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll70.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll70.java
@@ -78,8 +78,9 @@ public class GetAll70 extends BaseCommand {
     if (logger.isDebugEnabled()) {
       StringBuffer buffer = new StringBuffer();
       buffer.append(serverConnection.getName()).append(": Received getAll request (")
-            .append(clientMessage.getPayloadLength()).append(" bytes) from ").append(serverConnection.getSocketString())
-            .append(" for region ").append(regionName).append(" keys ");
+          .append(clientMessage.getPayloadLength()).append(" bytes) from ")
+          .append(serverConnection.getSocketString()).append(" for region ").append(regionName)
+          .append(" keys ");
       if (keys != null) {
         for (int i = 0; i < keys.length; i++) {
           buffer.append(keys[i]).append(" ");
@@ -99,7 +100,8 @@ public class GetAll70 extends BaseCommand {
             .toLocalizedString();
       }
       logger.warn("{}: {}", serverConnection.getName(), message);
-      writeChunkedErrorResponse(clientMessage, MessageType.GET_ALL_DATA_ERROR, message, serverConnection);
+      writeChunkedErrorResponse(clientMessage, MessageType.GET_ALL_DATA_ERROR, message,
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }
@@ -120,7 +122,8 @@ public class GetAll70 extends BaseCommand {
 
     // Send chunk response
     try {
-      fillAndSendGetAllResponseChunks(region, regionName, keys, serverConnection, requestSerializedValues);
+      fillAndSendGetAllResponseChunks(region, regionName, keys, serverConnection,
+          requestSerializedValues);
       serverConnection.setAsTrue(RESPONDED);
     } catch (Exception e) {
       // If an interrupted exception is thrown , rethrow it

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAllWithCallback.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAllWithCallback.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAllWithCallback.java
index c6663de..db97d53 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAllWithCallback.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAllWithCallback.java
@@ -91,9 +91,9 @@ public class GetAllWithCallback extends BaseCommand {
     if (logger.isDebugEnabled()) {
       StringBuffer buffer = new StringBuffer();
       buffer.append(serverConnection.getName()).append(": Received getAll request (")
-            .append(clientMessage.getPayloadLength()).append(" bytes) from ").append(serverConnection.getSocketString())
-            .append(" for region ").append(regionName).append(" with callback ").append(callback)
-            .append(" keys ");
+          .append(clientMessage.getPayloadLength()).append(" bytes) from ")
+          .append(serverConnection.getSocketString()).append(" for region ").append(regionName)
+          .append(" with callback ").append(callback).append(" keys ");
       if (keys != null) {
         for (int i = 0; i < keys.length; i++) {
           buffer.append(keys[i]).append(" ");
@@ -113,8 +113,9 @@ public class GetAllWithCallback extends BaseCommand {
             .toLocalizedString();
       }
       logger.warn(LocalizedMessage.create(LocalizedStrings.TWO_ARG_COLON,
-          new Object[] { serverConnection.getName(), message}));
-      writeChunkedErrorResponse(clientMessage, MessageType.GET_ALL_DATA_ERROR, message, serverConnection);
+          new Object[] {serverConnection.getName(), message}));
+      writeChunkedErrorResponse(clientMessage, MessageType.GET_ALL_DATA_ERROR, message,
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand.java
index bcdbd08..201b5c0 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand.java
@@ -60,7 +60,8 @@ public class GetClientPRMetadataCommand extends BaseCommand {
           .create(LocalizedStrings.GetClientPRMetadata_THE_INPUT_REGION_PATH_IS_NULL));
       errMessage =
           LocalizedStrings.GetClientPRMetadata_THE_INPUT_REGION_PATH_IS_NULL.toLocalizedString();
-      writeErrorResponse(clientMessage, MessageType.GET_CLIENT_PR_METADATA_ERROR, errMessage.toString(), serverConnection);
+      writeErrorResponse(clientMessage, MessageType.GET_CLIENT_PR_METADATA_ERROR,
+          errMessage.toString(), serverConnection);
       serverConnection.setAsTrue(RESPONDED);
     } else {
       Region region = crHelper.getRegion(regionFullPath);
@@ -70,7 +71,8 @@ public class GetClientPRMetadataCommand extends BaseCommand {
             regionFullPath));
         errMessage = LocalizedStrings.GetClientPRMetadata_REGION_NOT_FOUND.toLocalizedString()
             + regionFullPath;
-        writeErrorResponse(clientMessage, MessageType.GET_CLIENT_PR_METADATA_ERROR, errMessage.toString(), serverConnection);
+        writeErrorResponse(clientMessage, MessageType.GET_CLIENT_PR_METADATA_ERROR,
+            errMessage.toString(), serverConnection);
         serverConnection.setAsTrue(RESPONDED);
       } else {
         try {

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand66.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand66.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand66.java
index 4c519a9..7b370fe 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand66.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand66.java
@@ -58,7 +58,8 @@ public class GetClientPRMetadataCommand66 extends BaseCommand {
           .create(LocalizedStrings.GetClientPRMetadata_THE_INPUT_REGION_PATH_IS_NULL));
       errMessage =
           LocalizedStrings.GetClientPRMetadata_THE_INPUT_REGION_PATH_IS_NULL.toLocalizedString();
-      writeErrorResponse(clientMessage, MessageType.GET_CLIENT_PR_METADATA_ERROR, errMessage.toString(), serverConnection);
+      writeErrorResponse(clientMessage, MessageType.GET_CLIENT_PR_METADATA_ERROR,
+          errMessage.toString(), serverConnection);
       serverConnection.setAsTrue(RESPONDED);
     } else {
       Region region = crHelper.getRegion(regionFullPath);
@@ -68,7 +69,8 @@ public class GetClientPRMetadataCommand66 extends BaseCommand {
             regionFullPath));
         errMessage = LocalizedStrings.GetClientPRMetadata_REGION_NOT_FOUND.toLocalizedString()
             + regionFullPath;
-        writeErrorResponse(clientMessage, MessageType.GET_CLIENT_PR_METADATA_ERROR, errMessage.toString(), serverConnection);
+        writeErrorResponse(clientMessage, MessageType.GET_CLIENT_PR_METADATA_ERROR,
+            errMessage.toString(), serverConnection);
         serverConnection.setAsTrue(RESPONDED);
       } else {
         try {

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetFunctionAttribute.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetFunctionAttribute.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetFunctionAttribute.java
index f56a4d9..2c1f26c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetFunctionAttribute.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetFunctionAttribute.java
@@ -34,7 +34,8 @@ public class GetFunctionAttribute extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
+      throws IOException {
     serverConnection.setAsTrue(REQUIRES_RESPONSE);
     String functionId = clientMessage.getPart(0).getString();
     if (functionId == null) {

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXEnumById.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXEnumById.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXEnumById.java
index cc7dd05..15215de 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXEnumById.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXEnumById.java
@@ -40,8 +40,9 @@ public class GetPDXEnumById extends BaseCommand {
       throws IOException, ClassNotFoundException {
     serverConnection.setAsTrue(REQUIRES_RESPONSE);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Received get pdx enum by id request ({} parts) from {}", serverConnection.getName(),
-          clientMessage.getNumberOfParts(), serverConnection.getSocketString());
+      logger.debug("{}: Received get pdx enum by id request ({} parts) from {}",
+          serverConnection.getName(), clientMessage.getNumberOfParts(),
+          serverConnection.getSocketString());
     }
     int enumId = clientMessage.getPart(0).getInt();
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXIdForEnum.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXIdForEnum.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXIdForEnum.java
index 7bf5b4f..40c62a7 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXIdForEnum.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXIdForEnum.java
@@ -41,7 +41,8 @@ public class GetPDXIdForEnum extends BaseCommand {
     serverConnection.setAsTrue(REQUIRES_RESPONSE);
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received get pdx id for enum request ({} parts) from {}",
-          serverConnection.getName(), clientMessage.getNumberOfParts(), serverConnection.getSocketString());
+          serverConnection.getName(), clientMessage.getNumberOfParts(),
+          serverConnection.getSocketString());
     }
 
     EnumInfo enumInfo = (EnumInfo) clientMessage.getPart(0).getObject();

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXIdForType.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXIdForType.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXIdForType.java
index e5dc5f0..c1de0fc 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXIdForType.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXIdForType.java
@@ -41,7 +41,8 @@ public class GetPDXIdForType extends BaseCommand {
     serverConnection.setAsTrue(REQUIRES_RESPONSE);
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received get pdx id for type request ({} parts) from {}",
-          serverConnection.getName(), clientMessage.getNumberOfParts(), serverConnection.getSocketString());
+          serverConnection.getName(), clientMessage.getNumberOfParts(),
+          serverConnection.getSocketString());
     }
     int noOfParts = clientMessage.getNumberOfParts();
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXTypeById.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXTypeById.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXTypeById.java
index 032e8b3..16e9dd0 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXTypeById.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetPDXTypeById.java
@@ -40,8 +40,9 @@ public class GetPDXTypeById extends BaseCommand {
       throws IOException, ClassNotFoundException {
     serverConnection.setAsTrue(REQUIRES_RESPONSE);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Received get pdx type by id request ({} parts) from {}", serverConnection.getName(),
-          clientMessage.getNumberOfParts(), serverConnection.getSocketString());
+      logger.debug("{}: Received get pdx type by id request ({} parts) from {}",
+          serverConnection.getName(), clientMessage.getNumberOfParts(),
+          serverConnection.getSocketString());
     }
     int pdxId = clientMessage.getPart(0).getInt();
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalid.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalid.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalid.java
index 314ba07..040e1c2 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalid.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalid.java
@@ -36,11 +36,12 @@ public class Invalid extends BaseCommand {
   private Invalid() {}
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
+      throws IOException {
     logger.error(
         LocalizedMessage.create(LocalizedStrings.Invalid_0_INVALID_MESSAGE_TYPE_WITH_TX_1_FROM_2,
-            new Object[] {
-              serverConnection.getName(), Integer.valueOf(clientMessage.getTransactionId()),
+            new Object[] {serverConnection.getName(),
+                Integer.valueOf(clientMessage.getTransactionId()),
                 serverConnection.getSocketString()}));
     writeErrorResponse(clientMessage, MessageType.INVALID, serverConnection);
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalidate.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalidate.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalidate.java
index 22bf6f4..801b3ad 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalidate.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Invalidate.java
@@ -87,9 +87,9 @@ public class Invalidate extends BaseCommand {
       return;
     }
     if (logger.isDebugEnabled()) {
-      logger.debug(serverConnection.getName() + ": Received invalidate request (" + clientMessage.getPayloadLength()
-                   + " bytes) from " + serverConnection.getSocketString() + " for region " + regionName + " key "
-                   + key);
+      logger.debug(serverConnection.getName() + ": Received invalidate request ("
+          + clientMessage.getPayloadLength() + " bytes) from " + serverConnection.getSocketString()
+          + " for region " + regionName + " key " + key);
     }
 
     // Process the invalidate request
@@ -108,7 +108,8 @@ public class Invalidate extends BaseCommand {
             .append(LocalizedStrings.BaseCommand__THE_INPUT_REGION_NAME_FOR_THE_0_REQUEST_IS_NULL
                 .toLocalizedString("invalidate"));
       }
-      writeErrorResponse(clientMessage, MessageType.DESTROY_DATA_ERROR, errMessage.toString(), serverConnection);
+      writeErrorResponse(clientMessage, MessageType.DESTROY_DATA_ERROR, errMessage.toString(),
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }
@@ -124,7 +125,8 @@ public class Invalidate extends BaseCommand {
     ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
     long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
     long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-    EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
+    EventID eventId =
+        new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
     Breadcrumbs.setEventId(eventId);
 
@@ -157,7 +159,8 @@ public class Invalidate extends BaseCommand {
         }
       }
 
-      region.basicBridgeInvalidate(key, callbackArg, serverConnection.getProxyID(), true, clientEvent);
+      region.basicBridgeInvalidate(key, callbackArg, serverConnection.getProxyID(), true,
+          clientEvent);
       tag = clientEvent.getVersionTag();
       serverConnection.setModificationInfo(true, regionName, key);
     } catch (EntryNotFoundException e) {
@@ -199,7 +202,8 @@ public class Invalidate extends BaseCommand {
     if (region instanceof PartitionedRegion) {
       PartitionedRegion pr = (PartitionedRegion) region;
       if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
-        writeReplyWithRefreshMetadata(clientMessage, serverConnection, pr, pr.getNetworkHopType(), tag);
+        writeReplyWithRefreshMetadata(clientMessage, serverConnection, pr, pr.getNetworkHopType(),
+            tag);
         pr.clearNetworkHopData();
       } else {
         writeReply(clientMessage, serverConnection, tag);

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/KeySet.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/KeySet.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/KeySet.java
index a35c4b0..66de347 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/KeySet.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/KeySet.java
@@ -62,7 +62,8 @@ public class KeySet extends BaseCommand {
     final boolean isDebugEnabled = logger.isDebugEnabled();
     if (isDebugEnabled) {
       logger.debug("{}: Received key set request ({} bytes) from {} for region {}",
-          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName);
+          serverConnection.getName(), clientMessage.getPayloadLength(),
+          serverConnection.getSocketString(), regionName);
     }
 
     // Process the key set request
@@ -76,7 +77,8 @@ public class KeySet extends BaseCommand {
             LocalizedStrings.KeySet_0_THE_INPUT_REGION_NAME_FOR_THE_KEY_SET_REQUEST_IS_NULL,
             serverConnection.getName()));
       }
-      writeKeySetErrorResponse(clientMessage, MessageType.KEY_SET_DATA_ERROR, message, serverConnection);
+      writeKeySetErrorResponse(clientMessage, MessageType.KEY_SET_DATA_ERROR, message,
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }
@@ -128,7 +130,8 @@ public class KeySet extends BaseCommand {
       checkForInterrupt(serverConnection, e);
 
       // Otherwise, write an exception message and continue
-      writeChunkedException(clientMessage, e, serverConnection, serverConnection.getChunkedResponseMessage());
+      writeChunkedException(clientMessage, e, serverConnection,
+          serverConnection.getChunkedResponseMessage());
       serverConnection.setAsTrue(RESPONDED);
       return;
     }
@@ -137,7 +140,8 @@ public class KeySet extends BaseCommand {
       // logger.fine(getName() + ": Sent chunk (1 of 1) of register interest
       // response (" + chunkedResponseMsg.getBufferLength() + " bytes) for
       // region " + regionName + " key " + key);
-      logger.debug("{}: Sent key set response for the region {}", serverConnection.getName(), regionName);
+      logger.debug("{}: Sent key set response for the region {}", serverConnection.getName(),
+          regionName);
     }
     // bserverStats.incLong(writeDestroyResponseTimeId,
     // DistributionStats.getStatTime() - start);

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/MakePrimary.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/MakePrimary.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/MakePrimary.java
index 0786990..ac9901c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/MakePrimary.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/MakePrimary.java
@@ -43,11 +43,12 @@ public class MakePrimary extends BaseCommand {
     final boolean isDebugEnabled = logger.isDebugEnabled();
     if (isDebugEnabled) {
       logger.debug("{}: Received make primary request ({} bytes) isClientReady={}: from {}",
-          serverConnection.getName(), clientMessage.getPayloadLength(), isClientReady, serverConnection.getSocketString());
+          serverConnection.getName(), clientMessage.getPayloadLength(), isClientReady,
+          serverConnection.getSocketString());
     }
     try {
-      serverConnection.getAcceptor().getCacheClientNotifier().makePrimary(serverConnection.getProxyID(),
-          isClientReady);
+      serverConnection.getAcceptor().getCacheClientNotifier()
+          .makePrimary(serverConnection.getProxyID(), isClientReady);
       writeReply(clientMessage, serverConnection);
       serverConnection.setAsTrue(RESPONDED);
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PeriodicAck.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PeriodicAck.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PeriodicAck.java
index e57385f..121ec37 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PeriodicAck.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PeriodicAck.java
@@ -37,8 +37,9 @@ public class PeriodicAck extends BaseCommand {
       throws IOException, ClassNotFoundException {
     serverConnection.setAsTrue(REQUIRES_RESPONSE);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Received periodic ack request ({} bytes) from {}", serverConnection.getName(),
-          clientMessage.getPayloadLength(), serverConnection.getSocketString());
+      logger.debug("{}: Received periodic ack request ({} bytes) from {}",
+          serverConnection.getName(), clientMessage.getPayloadLength(),
+          serverConnection.getSocketString());
     }
     try {
       int numEvents = clientMessage.getNumberOfParts();

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Ping.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Ping.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Ping.java
index 9755410..adcfd14 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Ping.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Ping.java
@@ -39,11 +39,13 @@ public class Ping extends BaseCommand {
   private Ping() {}
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
+      throws IOException {
     final boolean isDebugEnabled = logger.isDebugEnabled();
     if (isDebugEnabled) {
-      logger.debug("{}: rcv tx: {} from {} rcvTime: {}", serverConnection.getName(), clientMessage.getTransactionId(),
-          serverConnection.getSocketString(), (DistributionStats.getStatTime() - start));
+      logger.debug("{}: rcv tx: {} from {} rcvTime: {}", serverConnection.getName(),
+          clientMessage.getTransactionId(), serverConnection.getSocketString(),
+          (DistributionStats.getStatTime() - start));
     }
     ClientHealthMonitor chm = ClientHealthMonitor.getInstance();
     if (chm != null)
@@ -53,7 +55,8 @@ public class Ping extends BaseCommand {
     writeReply(clientMessage, serverConnection);
     serverConnection.setAsTrue(RESPONDED);
     if (isDebugEnabled) {
-      logger.debug("{}: Sent ping reply to {}", serverConnection.getName(), serverConnection.getSocketString());
+      logger.debug("{}: Sent ping reply to {}", serverConnection.getName(),
+          serverConnection.getSocketString());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put.java
index d724f66..76c8184 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put.java
@@ -94,15 +94,16 @@ public class Put extends BaseCommand {
 
     if (logger.isTraceEnabled()) {
       logger.trace("{}: Received put request ({} bytes) from {} for region {} key {} value {}",
-          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName, key,
-          valuePart);
+          serverConnection.getName(), clientMessage.getPayloadLength(),
+          serverConnection.getSocketString(), regionName, key, valuePart);
     }
 
     // Process the put request
     if (key == null || regionName == null) {
       if (key == null) {
         logger.warn(LocalizedMessage.create(
-            LocalizedStrings.Put_0_THE_INPUT_KEY_FOR_THE_PUT_REQUEST_IS_NULL, serverConnection.getName()));
+            LocalizedStrings.Put_0_THE_INPUT_KEY_FOR_THE_PUT_REQUEST_IS_NULL,
+            serverConnection.getName()));
         errMessage =
             LocalizedStrings.Put_THE_INPUT_KEY_FOR_THE_PUT_REQUEST_IS_NULL.toLocalizedString();
       }
@@ -113,7 +114,8 @@ public class Put extends BaseCommand {
         errMessage = LocalizedStrings.Put_THE_INPUT_REGION_NAME_FOR_THE_PUT_REQUEST_IS_NULL
             .toLocalizedString();
       }
-      writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+      writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(),
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }
@@ -131,7 +133,7 @@ public class Put extends BaseCommand {
       // Invalid to 'put' a null value in an existing key
       logger.info(LocalizedMessage.create(
           LocalizedStrings.Put_0_ATTEMPTED_TO_PUT_A_NULL_VALUE_FOR_EXISTING_KEY_1,
-          new Object[] { serverConnection.getName(), key}));
+          new Object[] {serverConnection.getName(), key}));
       errMessage =
           LocalizedStrings.Put_ATTEMPTED_TO_PUT_A_NULL_VALUE_FOR_EXISTING_KEY_0.toLocalizedString();
       writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage, serverConnection);
@@ -142,7 +144,8 @@ public class Put extends BaseCommand {
     ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
     long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
     long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-    EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
+    EventID eventId =
+        new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
     try {
       byte[] value = valuePart.getSerializedForm();
@@ -173,8 +176,8 @@ public class Put extends BaseCommand {
         // Create the null entry. Since the value is null, the value of the
         // isObject
         // the true after null doesn't matter and is not used.
-        result = region.basicBridgeCreate(key, null, true, callbackArg, serverConnection.getProxyID(), true,
-            new EventIDHolder(eventId), false);
+        result = region.basicBridgeCreate(key, null, true, callbackArg,
+            serverConnection.getProxyID(), true, new EventIDHolder(eventId), false);
       } else {
         // Put the entry
         result = region.basicBridgePut(key, value, null, isObject, callbackArg,
@@ -184,7 +187,8 @@ public class Put extends BaseCommand {
         serverConnection.setModificationInfo(true, regionName, key);
       } else {
         StringId message = LocalizedStrings.PUT_0_FAILED_TO_PUT_ENTRY_FOR_REGION_1_KEY_2_VALUE_3;
-        Object[] messageArgs = new Object[] { serverConnection.getName(), regionName, key, valuePart};
+        Object[] messageArgs =
+            new Object[] {serverConnection.getName(), regionName, key, valuePart};
         String s = message.toLocalizedString(messageArgs);
         logger.info(s);
         throw new Exception(s);
@@ -227,7 +231,8 @@ public class Put extends BaseCommand {
     serverConnection.setAsTrue(RESPONDED);
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Sent put response back to {} for region {} key {} value {}",
-          serverConnection.getName(), serverConnection.getSocketString(), regionName, key, valuePart);
+          serverConnection.getName(), serverConnection.getSocketString(), regionName, key,
+          valuePart);
     }
     stats.incWritePutResponseTime(DistributionStats.getStatTime() - start);
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put61.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put61.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put61.java
index 3f9a72e..6f1583a 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put61.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put61.java
@@ -130,7 +130,8 @@ public class Put61 extends BaseCommand {
         }
         errMessage.append(putMsg);
       }
-      writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+      writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(),
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }
@@ -150,7 +151,8 @@ public class Put61 extends BaseCommand {
         logger.debug("{}:{}", serverConnection.getName(), putMsg);
       }
       errMessage.append(putMsg);
-      writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+      writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(),
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }
@@ -160,7 +162,8 @@ public class Put61 extends BaseCommand {
     ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
     long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
     long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-    EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
+    EventID eventId =
+        new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
     try {
       Object value = null;
@@ -199,8 +202,8 @@ public class Put61 extends BaseCommand {
         // Create the null entry. Since the value is null, the value of the
         // isObject
         // the true after null doesn't matter and is not used.
-        result = region.basicBridgeCreate(key, null, true, callbackArg, serverConnection.getProxyID(), true,
-            new EventIDHolder(eventId), false);
+        result = region.basicBridgeCreate(key, null, true, callbackArg,
+            serverConnection.getProxyID(), true, new EventIDHolder(eventId), false);
       } else {
         // Put the entry
         byte[] delta = null;
@@ -213,8 +216,8 @@ public class Put61 extends BaseCommand {
       if (result) {
         serverConnection.setModificationInfo(true, regionName, key);
       } else {
-        String message = serverConnection.getName() + ": Failed to 6.1 put entry for region " + regionName
-                         + " key " + key + " value " + valuePart;
+        String message = serverConnection.getName() + ": Failed to 6.1 put entry for region "
+            + regionName + " key " + key + " value " + valuePart;
         if (isDebugEnabled) {
           logger.debug(message);
         }
@@ -275,7 +278,8 @@ public class Put61 extends BaseCommand {
     serverConnection.setAsTrue(RESPONDED);
     if (isDebugEnabled) {
       logger.debug("{}: Sent 6.1 put response back to {} for region {} key {} value {}",
-          serverConnection.getName(), serverConnection.getSocketString(), regionName, key, valuePart);
+          serverConnection.getName(), serverConnection.getSocketString(), regionName, key,
+          valuePart);
     }
     stats.incWritePutResponseTime(DistributionStats.getStatTime() - start);
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put65.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put65.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put65.java
index 581aec6..a078372 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put65.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Put65.java
@@ -146,7 +146,8 @@ public class Put65 extends BaseCommand {
       logger.debug(
           "{}: Received {}put request ({} bytes) from {} for region {} key {} txId {} posdup: {}",
           serverConnection.getName(), (isDelta ? " delta " : " "), clientMessage.getPayloadLength(),
-          serverConnection.getSocketString(), regionName, key, clientMessage.getTransactionId(), clientMessage.isRetry());
+          serverConnection.getSocketString(), regionName, key, clientMessage.getTransactionId(),
+          clientMessage.isRetry());
     }
 
     // Process the put request
@@ -165,7 +166,8 @@ public class Put65 extends BaseCommand {
         }
         errMessage.append(putMsg);
       }
-      writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+      writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(),
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }
@@ -185,7 +187,8 @@ public class Put65 extends BaseCommand {
         logger.debug("{}:{}", serverConnection.getName(), putMsg);
       }
       errMessage.append(putMsg);
-      writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+      writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(),
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }
@@ -194,8 +197,8 @@ public class Put65 extends BaseCommand {
     long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
     long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
 
-    EventIDHolder clientEvent =
-        new EventIDHolder(new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId));
+    EventIDHolder clientEvent = new EventIDHolder(
+        new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId));
 
     Breadcrumbs.setEventId(clientEvent.getEventId());
 
@@ -267,8 +270,8 @@ public class Put65 extends BaseCommand {
           }
           // invoke basicBridgePutIfAbsent anyway to ensure that the event is distributed to all
           // servers - bug #51664
-          region.basicBridgePutIfAbsent(key, value, isObject, callbackArg, serverConnection.getProxyID(),
-              true, clientEvent);
+          region.basicBridgePutIfAbsent(key, value, isObject, callbackArg,
+              serverConnection.getProxyID(), true, clientEvent);
           oldValue = null;
         } else {
           oldValue = region.basicBridgePutIfAbsent(key, value, isObject, callbackArg,
@@ -356,8 +359,8 @@ public class Put65 extends BaseCommand {
         // Create the null entry. Since the value is null, the value of the
         // isObject
         // the true after null doesn't matter and is not used.
-        result = region.basicBridgeCreate(key, null, true, callbackArg, serverConnection.getProxyID(), true,
-            clientEvent, false);
+        result = region.basicBridgeCreate(key, null, true, callbackArg,
+            serverConnection.getProxyID(), true, clientEvent, false);
         if (clientMessage.isRetry() && clientEvent.isConcurrencyConflict()
             && clientEvent.getVersionTag() != null) {
           result = true;
@@ -372,7 +375,8 @@ public class Put65 extends BaseCommand {
         if (isDelta) {
           delta = valuePart.getSerializedForm();
         }
-        TXManagerImpl txMgr = (TXManagerImpl) serverConnection.getCache().getCacheTransactionManager();
+        TXManagerImpl txMgr =
+            (TXManagerImpl) serverConnection.getCache().getCacheTransactionManager();
         // bug 43068 - use create() if in a transaction and op is CREATE
         if (txMgr.getTXState() != null && operation.isCreate()) {
           result = region.basicBridgeCreate(key, (byte[]) value, isObject, callbackArg,
@@ -393,8 +397,8 @@ public class Put65 extends BaseCommand {
       if (result) {
         serverConnection.setModificationInfo(true, regionName, key);
       } else {
-        String message = serverConnection.getName() + ": Failed to put entry for region " + regionName
-                         + " key " + key + " value " + valuePart;
+        String message = serverConnection.getName() + ": Failed to put entry for region "
+            + regionName + " key " + key + " value " + valuePart;
         if (isDebugEnabled) {
           logger.debug(message);
         }
@@ -443,8 +447,8 @@ public class Put65 extends BaseCommand {
     if (region instanceof PartitionedRegion) {
       PartitionedRegion pr = (PartitionedRegion) region;
       if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
-        writeReplyWithRefreshMetadata(clientMessage, serverConnection, pr, sendOldValue, oldValueIsObject, oldValue,
-            pr.getNetworkHopType(), clientEvent.getVersionTag());
+        writeReplyWithRefreshMetadata(clientMessage, serverConnection, pr, sendOldValue,
+            oldValueIsObject, oldValue, pr.getNetworkHopType(), clientEvent.getVersionTag());
         pr.clearNetworkHopData();
       } else {
         writeReply(clientMessage, serverConnection, sendOldValue, oldValueIsObject, oldValue,
@@ -457,7 +461,8 @@ public class Put65 extends BaseCommand {
     serverConnection.setAsTrue(RESPONDED);
     if (isDebugEnabled) {
       logger.debug("{}: Sent put response back to {} for region {} key {} value {}",
-          serverConnection.getName(), serverConnection.getSocketString(), regionName, key, valuePart);
+          serverConnection.getName(), serverConnection.getSocketString(), regionName, key,
+          valuePart);
     }
     stats.incWritePutResponseTime(DistributionStats.getStatTime() - start);
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll.java
index 281f737..ba8b145 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll.java
@@ -91,7 +91,8 @@ public class PutAll extends BaseCommand {
                 .toLocalizedString();
         logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
         errMessage.append(putAllMsg);
-        writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+        writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(),
+            serverConnection);
         serverConnection.setAsTrue(RESPONDED);
         return;
       }
@@ -108,7 +109,8 @@ public class PutAll extends BaseCommand {
       ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
       long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
       long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-      EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
+      EventID eventId =
+          new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
       // part 2: number of keys
       numberOfKeysPart = clientMessage.getPart(2);
@@ -126,7 +128,8 @@ public class PutAll extends BaseCommand {
                   .toLocalizedString();
           logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
           errMessage.append(putAllMsg);
-          writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+          writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(),
+              serverConnection);
           serverConnection.setAsTrue(RESPONDED);
           return;
         }
@@ -138,7 +141,8 @@ public class PutAll extends BaseCommand {
                   .toLocalizedString();
           logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
           errMessage.append(putAllMsg);
-          writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+          writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(),
+              serverConnection);
           serverConnection.setAsTrue(RESPONDED);
           return;
         }
@@ -155,8 +159,9 @@ public class PutAll extends BaseCommand {
         // isObjectMap.put(key, new Boolean(isObject));
       } // for
 
-      if (clientMessage.getNumberOfParts() == (3 + 2 * numberOfKeys + 1)) {// it means optional timeout has
-                                                                 // been added
+      if (clientMessage.getNumberOfParts() == (3 + 2 * numberOfKeys + 1)) {// it means optional
+                                                                           // timeout has
+        // been added
         int timeout = clientMessage.getPart(3 + 2 * numberOfKeys).getInt();
         serverConnection.setRequestSpecificTimeout(timeout);
       }
@@ -179,7 +184,8 @@ public class PutAll extends BaseCommand {
 
       if (logger.isDebugEnabled()) {
         logger.debug("{}: Received putAll request ({} bytes) from {} for region {}",
-            serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName);
+            serverConnection.getName(), clientMessage.getPayloadLength(),
+            serverConnection.getSocketString(), regionName);
       }
 
       region.basicBridgePutAll(map, Collections.<Object, VersionTag>emptyMap(),
@@ -188,7 +194,8 @@ public class PutAll extends BaseCommand {
       if (region instanceof PartitionedRegion) {
         PartitionedRegion pr = (PartitionedRegion) region;
         if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
-          writeReplyWithRefreshMetadata(clientMessage, serverConnection, pr, pr.getNetworkHopType());
+          writeReplyWithRefreshMetadata(clientMessage, serverConnection, pr,
+              pr.getNetworkHopType());
           pr.clearNetworkHopData();
           replyWithMetaData = true;
         }

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll70.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll70.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll70.java
index ae2de09..fb2bdb8 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll70.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll70.java
@@ -97,7 +97,8 @@ public class PutAll70 extends BaseCommand {
                 .toLocalizedString();
         logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
         errMessage.append(putAllMsg);
-        writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+        writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(),
+            serverConnection);
         serverConnection.setAsTrue(RESPONDED);
         return;
       }
@@ -114,7 +115,8 @@ public class PutAll70 extends BaseCommand {
       ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
       long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
       long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-      EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
+      EventID eventId =
+          new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
       // part 2: invoke callbacks (used by import)
       Part callbacksPart = clientMessage.getPart(2);
@@ -137,7 +139,8 @@ public class PutAll70 extends BaseCommand {
                   .toLocalizedString();
           logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
           errMessage.append(putAllMsg);
-          writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+          writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(),
+              serverConnection);
           serverConnection.setAsTrue(RESPONDED);
           return;
         }
@@ -149,7 +152,8 @@ public class PutAll70 extends BaseCommand {
                   .toLocalizedString();
           logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
           errMessage.append(putAllMsg);
-          writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+          writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(),
+              serverConnection);
           serverConnection.setAsTrue(RESPONDED);
           return;
         }
@@ -198,8 +202,9 @@ public class PutAll70 extends BaseCommand {
         // isObjectMap.put(key, new Boolean(isObject));
       } // for
 
-      if (clientMessage.getNumberOfParts() == (4 + 2 * numberOfKeys + 1)) {// it means optional timeout has
-                                                                 // been added
+      if (clientMessage.getNumberOfParts() == (4 + 2 * numberOfKeys + 1)) {// it means optional
+                                                                           // timeout has
+        // been added
         int timeout = clientMessage.getPart(4 + 2 * numberOfKeys).getInt();
         serverConnection.setRequestSpecificTimeout(timeout);
       }
@@ -231,11 +236,12 @@ public class PutAll70 extends BaseCommand {
 
       if (logger.isDebugEnabled()) {
         logger.debug("{}: Received putAll request ({} bytes) from {} for region {}",
-            serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName);
+            serverConnection.getName(), clientMessage.getPayloadLength(),
+            serverConnection.getSocketString(), regionName);
       }
 
-      response = region.basicBridgePutAll(map, retryVersions, serverConnection.getProxyID(), eventId,
-          skipCallbacks, null);
+      response = region.basicBridgePutAll(map, retryVersions, serverConnection.getProxyID(),
+          eventId, skipCallbacks, null);
       if (!region.getConcurrencyChecksEnabled()) {
         // the client only needs this if versioning is being used
         response = null;
@@ -244,7 +250,8 @@ public class PutAll70 extends BaseCommand {
       if (region instanceof PartitionedRegion) {
         PartitionedRegion pr = (PartitionedRegion) region;
         if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
-          writeReplyWithRefreshMetadata(clientMessage, response, serverConnection, pr, pr.getNetworkHopType());
+          writeReplyWithRefreshMetadata(clientMessage, response, serverConnection, pr,
+              pr.getNetworkHopType());
           pr.clearNetworkHopData();
           replyWithMetaData = true;
         }
@@ -279,8 +286,8 @@ public class PutAll70 extends BaseCommand {
       stats.incProcessPutAllTime(start - oldStart);
     }
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sending putAll70 response back to {} for region {}: {}", serverConnection.getName(),
-          serverConnection.getSocketString(), regionName, response);
+      logger.debug("{}: Sending putAll70 response back to {} for region {}: {}",
+          serverConnection.getName(), serverConnection.getSocketString(), regionName, response);
     }
     // Starting in 7.0.1 we do not send the keys back
     if (response != null && Version.GFE_70.compareTo(serverConnection.getClientVersion()) < 0) {

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll80.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll80.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll80.java
index aed5926..c6c26f7 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll80.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutAll80.java
@@ -111,7 +111,8 @@ public class PutAll80 extends BaseCommand {
                 .toLocalizedString();
         logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
         errMessage.append(putAllMsg);
-        writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+        writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(),
+            serverConnection);
         serverConnection.setAsTrue(RESPONDED);
         return;
       }
@@ -131,7 +132,8 @@ public class PutAll80 extends BaseCommand {
       ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
       long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
       long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-      EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
+      EventID eventId =
+          new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
       Breadcrumbs.setEventId(eventId);
 
@@ -152,10 +154,11 @@ public class PutAll80 extends BaseCommand {
 
       if (logger.isDebugEnabled()) {
         StringBuilder buffer = new StringBuilder();
-        buffer.append(serverConnection.getName()).append(": Received ").append(this.putAllClassName())
-              .append(" request from ").append(serverConnection.getSocketString()).append(" for region ")
-              .append(regionName).append(callbackArg != null ? (" callbackArg " + callbackArg) : "")
-              .append(" with ").append(numberOfKeys).append(" entries.");
+        buffer.append(serverConnection.getName()).append(": Received ")
+            .append(this.putAllClassName()).append(" request from ")
+            .append(serverConnection.getSocketString()).append(" for region ").append(regionName)
+            .append(callbackArg != null ? (" callbackArg " + callbackArg) : "").append(" with ")
+            .append(numberOfKeys).append(" entries.");
         logger.debug(buffer.toString());
       }
       // building the map
@@ -171,7 +174,8 @@ public class PutAll80 extends BaseCommand {
                   .toLocalizedString();
           logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
           errMessage.append(putAllMsg);
-          writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+          writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR,
+              errMessage.toString(), serverConnection);
           serverConnection.setAsTrue(RESPONDED);
           return;
         }
@@ -183,7 +187,8 @@ public class PutAll80 extends BaseCommand {
                   .toLocalizedString();
           logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
           errMessage.append(putAllMsg);
-          writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+          writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR,
+              errMessage.toString(), serverConnection);
           serverConnection.setAsTrue(RESPONDED);
           return;
         }
@@ -232,9 +237,10 @@ public class PutAll80 extends BaseCommand {
         // isObjectMap.put(key, new Boolean(isObject));
       } // for
 
-      if (clientMessage.getNumberOfParts() == (BASE_PART_COUNT + 2 * numberOfKeys + 1)) {// it means optional
-                                                                               // timeout has been
-                                                                               // added
+      if (clientMessage.getNumberOfParts() == (BASE_PART_COUNT + 2 * numberOfKeys + 1)) {// it means
+                                                                                         // optional
+        // timeout has been
+        // added
         int timeout = clientMessage.getPart(BASE_PART_COUNT + 2 * numberOfKeys).getInt();
         serverConnection.setRequestSpecificTimeout(timeout);
       }
@@ -265,8 +271,8 @@ public class PutAll80 extends BaseCommand {
          */
       }
 
-      response = region.basicBridgePutAll(map, retryVersions, serverConnection.getProxyID(), eventId,
-          skipCallbacks, callbackArg);
+      response = region.basicBridgePutAll(map, retryVersions, serverConnection.getProxyID(),
+          eventId, skipCallbacks, callbackArg);
       if (!region.getConcurrencyChecksEnabled() || clientIsEmpty || !clientHasCCEnabled) {
         // the client only needs this if versioning is being used and the client
         // has storage
@@ -281,7 +287,8 @@ public class PutAll80 extends BaseCommand {
       if (region instanceof PartitionedRegion) {
         PartitionedRegion pr = (PartitionedRegion) region;
         if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
-          writeReplyWithRefreshMetadata(clientMessage, response, serverConnection, pr, pr.getNetworkHopType());
+          writeReplyWithRefreshMetadata(clientMessage, response, serverConnection, pr,
+              pr.getNetworkHopType());
           pr.clearNetworkHopData();
           replyWithMetaData = true;
         }

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutUserCredentials.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutUserCredentials.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutUserCredentials.java
index dc3de67..ea5c875 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutUserCredentials.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/PutUserCredentials.java
@@ -51,16 +51,17 @@ public class PutUserCredentials extends BaseCommand {
           writeResponse(uniqueId, null, clientMessage, false, serverConnection);
         } catch (GemFireSecurityException gfse) {
           if (serverConnection.getSecurityLogWriter().warningEnabled()) {
-            serverConnection.getSecurityLogWriter().warning(LocalizedStrings.ONE_ARG, serverConnection.getName()
-                                                                                      + ": Security exception: " + gfse.toString()
-                                                                                      + (gfse.getCause() != null ? ", caused by: " + gfse.getCause().toString() : ""));
+            serverConnection.getSecurityLogWriter().warning(LocalizedStrings.ONE_ARG,
+                serverConnection.getName() + ": Security exception: " + gfse.toString()
+                    + (gfse.getCause() != null ? ", caused by: " + gfse.getCause().toString()
+                        : ""));
           }
           writeException(clientMessage, gfse, false, serverConnection);
         } catch (Exception ex) {
           if (serverConnection.getLogWriter().warningEnabled()) {
             serverConnection.getLogWriter().warning(
                 LocalizedStrings.CacheClientNotifier_AN_EXCEPTION_WAS_THROWN_FOR_CLIENT_0_1,
-                new Object[] { serverConnection.getProxyID(), ""}, ex);
+                new Object[] {serverConnection.getProxyID(), ""}, ex);
           }
           writeException(clientMessage, ex, false, serverConnection);
         } finally {

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Query.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Query.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Query.java
index 8b5b35e..b7d2810 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Query.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Query.java
@@ -88,7 +88,8 @@ public class Query extends BaseCommandQuery {
         }
       }
 
-      processQuery(clientMessage, query, queryString, regionNames, start, null, queryContext, serverConnection, true);
+      processQuery(clientMessage, query, queryString, regionNames, start, null, queryContext,
+          serverConnection, true);
     } catch (QueryInvalidException e) {
       throw new QueryInvalidException(e.getMessage() + queryString);
     } catch (QueryExecutionLowMemoryException e) {

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Query651.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Query651.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Query651.java
index 97f5d56..e52fa3f 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Query651.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Query651.java
@@ -61,8 +61,9 @@ public class Query651 extends BaseCommandQuery {
         int params = clientMessage.getPart(1).getInt(); // Number of parameters.
         // In case of native client there will be extra two parameters at 2 and 3 index.
         int paramStartIndex = 2;
-        if (clientMessage.getNumberOfParts() > (1 /* type */ + 1 /* query string */ + 1 /* params length */
-                                                + params /* number of params */)) {
+        if (clientMessage
+            .getNumberOfParts() > (1 /* type */ + 1 /* query string */ + 1 /* params length */
+                + params /* number of params */)) {
           int timeout = clientMessage.getPart(3).getInt();
           serverConnection.setRequestSpecificTimeout(timeout);
           paramStartIndex = 4;
@@ -85,8 +86,8 @@ public class Query651 extends BaseCommandQuery {
     }
 
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Received query request from {} queryString: {}{}", serverConnection.getName(),
-          serverConnection.getSocketString(), queryString,
+      logger.debug("{}: Received query request from {} queryString: {}{}",
+          serverConnection.getName(), serverConnection.getSocketString(), queryString,
           (queryParams != null ? (" with num query parameters :" + queryParams.length) : ""));
     }
     try {
@@ -128,7 +129,8 @@ public class Query651 extends BaseCommandQuery {
         }
       }
 
-      processQueryUsingParams(clientMessage, query, queryString, regionNames, start, null, queryContext, serverConnection, true, queryParams);
+      processQueryUsingParams(clientMessage, query, queryString, regionNames, start, null,
+          queryContext, serverConnection, true, queryParams);
     } catch (QueryInvalidException e) {
       throw new QueryInvalidException(e.getMessage() + queryString);
     }


[23/43] geode git commit: Cleanup BaseCommand

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GatewayReceiverCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GatewayReceiverCommand.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GatewayReceiverCommand.java
index d44a4ad..704f2da 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GatewayReceiverCommand.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GatewayReceiverCommand.java
@@ -79,14 +79,14 @@ public class GatewayReceiverCommand extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
     Part regionNamePart = null, keyPart = null, valuePart = null, callbackArgPart = null;
     String regionName = null;
     Object callbackArg = null, key = null;
     int partNumber = 0;
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    GatewayReceiverStats stats = (GatewayReceiverStats) servConn.getCacheServerStats();
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    GatewayReceiverStats stats = (GatewayReceiverStats) serverConnection.getCacheServerStats();
     EventID eventId = null;
     LocalRegion region = null;
     List<BatchException70> exceptions = new ArrayList<BatchException70>();
@@ -102,20 +102,20 @@ public class GatewayReceiverCommand extends BaseCommand {
     // statement so that all messages can take advantage of it.
     boolean earlyAck = false;// msg.getEarlyAck();
 
-    stats.incBatchSize(msg.getPayloadLength());
+    stats.incBatchSize(clientMessage.getPayloadLength());
 
     // Retrieve the number of events
-    Part numberOfEventsPart = msg.getPart(0);
+    Part numberOfEventsPart = clientMessage.getPart(0);
     int numberOfEvents = numberOfEventsPart.getInt();
     stats.incEventsReceived(numberOfEvents);
 
     // Retrieve the batch id
-    Part batchIdPart = msg.getPart(1);
+    Part batchIdPart = clientMessage.getPart(1);
     int batchId = batchIdPart.getInt();
 
     // If this batch has already been seen, do not reply.
     // Instead, drop the batch and continue.
-    if (batchId <= servConn.getLatestBatchIdReplied()) {
+    if (batchId <= serverConnection.getLatestBatchIdReplied()) {
       if (GatewayReceiver.APPLY_RETRIES) {
         // Do nothing!!!
         logger.warn(LocalizedMessage.create(
@@ -125,17 +125,17 @@ public class GatewayReceiverCommand extends BaseCommand {
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.ProcessBatch_RECEIVED_PROCESS_BATCH_REQUEST_0_THAT_HAS_ALREADY_BEEN_OR_IS_BEING_PROCESSED__THIS_PROCESS_BATCH_REQUEST_IS_BEING_IGNORED,
             batchId));
-        writeReply(msg, servConn, batchId, numberOfEvents);
+        writeReply(clientMessage, serverConnection, batchId, numberOfEvents);
         return;
       }
       stats.incDuplicateBatchesReceived();
     }
 
     // Verify the batches arrive in order
-    if (batchId != servConn.getLatestBatchIdReplied() + 1) {
+    if (batchId != serverConnection.getLatestBatchIdReplied() + 1) {
       logger.warn(LocalizedMessage.create(
           LocalizedStrings.ProcessBatch_RECEIVED_PROCESS_BATCH_REQUEST_0_OUT_OF_ORDER_THE_ID_OF_THE_LAST_BATCH_PROCESSED_WAS_1_THIS_BATCH_REQUEST_WILL_BE_PROCESSED_BUT_SOME_MESSAGES_MAY_HAVE_BEEN_LOST,
-          new Object[] {batchId, servConn.getLatestBatchIdReplied()}));
+          new Object[] {batchId, serverConnection.getLatestBatchIdReplied()}));
       stats.incOutoforderBatchesReceived();
     }
 
@@ -146,7 +146,7 @@ public class GatewayReceiverCommand extends BaseCommand {
     // If early ack mode, acknowledge right away
     // Not sure if earlyAck makes sense with sliding window
     if (earlyAck) {
-      servConn.incrementLatestBatchIdReplied(batchId);
+      serverConnection.incrementLatestBatchIdReplied(batchId);
 
       // writeReply(msg, servConn);
       // servConn.setAsTrue(RESPONDED);
@@ -162,13 +162,13 @@ public class GatewayReceiverCommand extends BaseCommand {
     if (logger.isDebugEnabled()) {
       logger.debug(
           "{}: Received process batch request {} containing {} events ({} bytes) with {} acknowledgement on {}",
-          servConn.getName(), batchId, numberOfEvents, msg.getPayloadLength(),
-          (earlyAck ? "early" : "normal"), servConn.getSocketString());
+          serverConnection.getName(), batchId, numberOfEvents, clientMessage.getPayloadLength(),
+          (earlyAck ? "early" : "normal"), serverConnection.getSocketString());
       if (earlyAck) {
         logger.debug(
             "{}: Sent process batch early response for batch {} containing {} events ({} bytes) with {} acknowledgement on {}",
-            servConn.getName(), batchId, numberOfEvents, msg.getPayloadLength(),
-            (earlyAck ? "early" : "normal"), servConn.getSocketString());
+            serverConnection.getName(), batchId, numberOfEvents, clientMessage.getPayloadLength(),
+            (earlyAck ? "early" : "normal"), serverConnection.getSocketString());
       }
     }
     // logger.warn("Received process batch request " + batchId + " containing
@@ -185,10 +185,10 @@ public class GatewayReceiverCommand extends BaseCommand {
     // Retrieve the events from the message parts. The '2' below
     // represents the number of events (part0) and the batchId (part1)
     partNumber = 2;
-    int dsid = msg.getPart(partNumber++).getInt();
+    int dsid = clientMessage.getPart(partNumber++).getInt();
 
     boolean removeOnException =
-        msg.getPart(partNumber++).getSerializedForm()[0] == 1 ? true : false;
+      clientMessage.getPart(partNumber++).getSerializedForm()[0] == 1 ? true : false;
 
     // Keep track of whether a response has been written for
     // exceptions
@@ -202,7 +202,7 @@ public class GatewayReceiverCommand extends BaseCommand {
       indexWithoutPDXEvent++;
       // System.out.println("Processing event " + i + " in batch " + batchId + "
       // starting with part number " + partNumber);
-      Part actionTypePart = msg.getPart(partNumber);
+      Part actionTypePart = clientMessage.getPart(partNumber);
       int actionType = actionTypePart.getInt();
 
       long versionTimeStamp = VersionTag.ILLEGAL_VERSION_TIMESTAMP;
@@ -211,14 +211,15 @@ public class GatewayReceiverCommand extends BaseCommand {
       boolean callbackArgExists = false;
 
       try {
-        Part possibleDuplicatePart = msg.getPart(partNumber + 1);
+        Part possibleDuplicatePart = clientMessage.getPart(partNumber + 1);
         byte[] possibleDuplicatePartBytes;
         try {
           possibleDuplicatePartBytes = (byte[]) possibleDuplicatePart.getObject();
         } catch (Exception e) {
           logger.warn(LocalizedMessage.create(
               LocalizedStrings.ProcessBatch_0_CAUGHT_EXCEPTION_PROCESSING_BATCH_REQUEST_1_CONTAINING_2_EVENTS,
-              new Object[] {servConn.getName(), Integer.valueOf(batchId),
+              new Object[] {
+                serverConnection.getName(), Integer.valueOf(batchId),
                   Integer.valueOf(numberOfEvents)}),
               e);
           throw e;
@@ -231,7 +232,7 @@ public class GatewayReceiverCommand extends BaseCommand {
         callbackArg = null;
 
         // Retrieve the region name from the message parts
-        regionNamePart = msg.getPart(partNumber + 2);
+        regionNamePart = clientMessage.getPart(partNumber + 2);
         regionName = regionNamePart.getString();
         if (regionName.equals(PeerTypeRegistration.REGION_FULL_PATH)) {
           indexWithoutPDXEvent--;
@@ -243,28 +244,30 @@ public class GatewayReceiverCommand extends BaseCommand {
         // duplication of events, but it is unused now. In
         // fact the event id is overridden by the FROM_GATEWAY
         // token.
-        Part eventIdPart = msg.getPart(partNumber + 3);
-        eventIdPart.setVersion(servConn.getClientVersion());
+        Part eventIdPart = clientMessage.getPart(partNumber + 3);
+        eventIdPart.setVersion(serverConnection.getClientVersion());
         // String eventId = eventIdPart.getString();
         try {
           eventId = (EventID) eventIdPart.getObject();
         } catch (Exception e) {
           logger.warn(LocalizedMessage.create(
               LocalizedStrings.ProcessBatch_0_CAUGHT_EXCEPTION_PROCESSING_BATCH_REQUEST_1_CONTAINING_2_EVENTS,
-              new Object[] {servConn.getName(), Integer.valueOf(batchId),
+              new Object[] {
+                serverConnection.getName(), Integer.valueOf(batchId),
                   Integer.valueOf(numberOfEvents)}),
               e);
           throw e;
         }
 
         // Retrieve the key from the message parts
-        keyPart = msg.getPart(partNumber + 4);
+        keyPart = clientMessage.getPart(partNumber + 4);
         try {
           key = keyPart.getStringOrObject();
         } catch (Exception e) {
           logger.warn(LocalizedMessage.create(
               LocalizedStrings.ProcessBatch_0_CAUGHT_EXCEPTION_PROCESSING_BATCH_REQUEST_1_CONTAINING_2_EVENTS,
-              new Object[] {servConn.getName(), Integer.valueOf(batchId),
+              new Object[] {
+                serverConnection.getName(), Integer.valueOf(batchId),
                   Integer.valueOf(numberOfEvents)}),
               e);
           throw e;
@@ -281,7 +284,7 @@ public class GatewayReceiverCommand extends BaseCommand {
              */
 
             // Retrieve the value from the message parts (do not deserialize it)
-            valuePart = msg.getPart(partNumber + 5);
+            valuePart = clientMessage.getPart(partNumber + 5);
             // try {
             // logger.warn(getName() + ": Creating key " + key + " value " +
             // valuePart.getObject());
@@ -289,18 +292,19 @@ public class GatewayReceiverCommand extends BaseCommand {
 
             // Retrieve the callbackArg from the message parts if necessary
             int index = partNumber + 6;
-            callbackArgExistsPart = msg.getPart(index++); {
+            callbackArgExistsPart = clientMessage.getPart(index++); {
             byte[] partBytes = (byte[]) callbackArgExistsPart.getObject();
             callbackArgExists = partBytes[0] == 0x01;
           }
             if (callbackArgExists) {
-              callbackArgPart = msg.getPart(index++);
+              callbackArgPart = clientMessage.getPart(index++);
               try {
                 callbackArg = callbackArgPart.getObject();
               } catch (Exception e) {
                 logger.warn(LocalizedMessage.create(
                     LocalizedStrings.ProcessBatch_0_CAUGHT_EXCEPTION_PROCESSING_BATCH_CREATE_REQUEST_1_FOR_2_EVENTS,
-                    new Object[] {servConn.getName(), Integer.valueOf(batchId),
+                    new Object[] {
+                      serverConnection.getName(), Integer.valueOf(batchId),
                         Integer.valueOf(numberOfEvents)}),
                     e);
                 throw e;
@@ -309,14 +313,14 @@ public class GatewayReceiverCommand extends BaseCommand {
             if (logger.isDebugEnabled()) {
               logger.debug(
                   "{}: Processing batch create request {} on {} for region {} key {} value {} callbackArg {}, eventId={}",
-                  servConn.getName(), batchId, servConn.getSocketString(), regionName, key,
+                  serverConnection.getName(), batchId, serverConnection.getSocketString(), regionName, key,
                   valuePart, callbackArg, eventId);
             }
-            versionTimeStamp = msg.getPart(index++).getLong();
+            versionTimeStamp = clientMessage.getPart(index++).getLong();
             // Process the create request
             if (key == null || regionName == null) {
               StringId message = null;
-              Object[] messageArgs = new Object[] {servConn.getName(), Integer.valueOf(batchId)};
+              Object[] messageArgs = new Object[] { serverConnection.getName(), Integer.valueOf(batchId)};
               if (key == null) {
                 message =
                     LocalizedStrings.ProcessBatch_0_THE_INPUT_REGION_NAME_FOR_THE_BATCH_CREATE_REQUEST_1_IS_NULL;
@@ -331,7 +335,7 @@ public class GatewayReceiverCommand extends BaseCommand {
             }
             region = (LocalRegion) crHelper.getRegion(regionName);
             if (region == null) {
-              handleRegionNull(servConn, regionName, batchId);
+              handleRegionNull(serverConnection, regionName, batchId);
             } else {
               clientEvent = new EventIDHolder(eventId);
               if (versionTimeStamp > 0) {
@@ -348,7 +352,7 @@ public class GatewayReceiverCommand extends BaseCommand {
                 boolean isObject = valuePart.isObject();
                 // [sumedh] This should be done on client while sending
                 // since that is the WAN gateway
-                AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+                AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
                 if (authzRequest != null) {
                   PutOperationContext putContext =
                       authzRequest.putAuthorize(regionName, key, value, isObject, callbackArg);
@@ -361,29 +365,31 @@ public class GatewayReceiverCommand extends BaseCommand {
                   result = addPdxType(crHelper, key, value);
                 } else {
                   result = region.basicBridgeCreate(key, value, isObject, callbackArg,
-                      servConn.getProxyID(), false, clientEvent, false);
+                      serverConnection.getProxyID(), false, clientEvent, false);
                   // If the create fails (presumably because it already exists),
                   // attempt to update the entry
                   if (!result) {
                     result = region.basicBridgePut(key, value, null, isObject, callbackArg,
-                        servConn.getProxyID(), false, clientEvent);
+                        serverConnection.getProxyID(), false, clientEvent);
                   }
                 }
 
                 if (result || clientEvent.isConcurrencyConflict()) {
-                  servConn.setModificationInfo(true, regionName, key);
+                  serverConnection.setModificationInfo(true, regionName, key);
                   stats.incCreateRequest();
                 } else {
                   // This exception will be logged in the catch block below
                   throw new Exception(
                       LocalizedStrings.ProcessBatch_0_FAILED_TO_CREATE_OR_UPDATE_ENTRY_FOR_REGION_1_KEY_2_VALUE_3_CALLBACKARG_4
-                          .toLocalizedString(new Object[] {servConn.getName(), regionName, key,
+                          .toLocalizedString(new Object[] {
+                            serverConnection.getName(), regionName, key,
                               valuePart, callbackArg}));
                 }
               } catch (Exception e) {
                 logger.warn(LocalizedMessage.create(
                     LocalizedStrings.ProcessBatch_0_CAUGHT_EXCEPTION_PROCESSING_BATCH_CREATE_REQUEST_1_FOR_2_EVENTS,
-                    new Object[] {servConn.getName(), Integer.valueOf(batchId),
+                    new Object[] {
+                      serverConnection.getName(), Integer.valueOf(batchId),
                         Integer.valueOf(numberOfEvents)}),
                     e);
                 throw e;
@@ -400,7 +406,7 @@ public class GatewayReceiverCommand extends BaseCommand {
              */
 
             // Retrieve the value from the message parts (do not deserialize it)
-            valuePart = msg.getPart(partNumber + 5);
+            valuePart = clientMessage.getPart(partNumber + 5);
             // try {
             // logger.warn(getName() + ": Updating key " + key + " value " +
             // valuePart.getObject());
@@ -408,34 +414,35 @@ public class GatewayReceiverCommand extends BaseCommand {
 
             // Retrieve the callbackArg from the message parts if necessary
             index = partNumber + 6;
-            callbackArgExistsPart = msg.getPart(index++); {
+            callbackArgExistsPart = clientMessage.getPart(index++); {
             byte[] partBytes = (byte[]) callbackArgExistsPart.getObject();
             callbackArgExists = partBytes[0] == 0x01;
           }
             if (callbackArgExists) {
-              callbackArgPart = msg.getPart(index++);
+              callbackArgPart = clientMessage.getPart(index++);
               try {
                 callbackArg = callbackArgPart.getObject();
               } catch (Exception e) {
                 logger.warn(LocalizedMessage.create(
                     LocalizedStrings.ProcessBatch_0_CAUGHT_EXCEPTION_PROCESSING_BATCH_UPDATE_REQUEST_1_CONTAINING_2_EVENTS,
-                    new Object[] {servConn.getName(), Integer.valueOf(batchId),
+                    new Object[] {
+                      serverConnection.getName(), Integer.valueOf(batchId),
                         Integer.valueOf(numberOfEvents)}),
                     e);
                 throw e;
               }
             }
-            versionTimeStamp = msg.getPart(index++).getLong();
+            versionTimeStamp = clientMessage.getPart(index++).getLong();
             if (logger.isDebugEnabled()) {
               logger.debug(
                   "{}: Processing batch update request {} on {} for region {} key {} value {} callbackArg {}",
-                  servConn.getName(), batchId, servConn.getSocketString(), regionName, key,
+                  serverConnection.getName(), batchId, serverConnection.getSocketString(), regionName, key,
                   valuePart, callbackArg);
             }
             // Process the update request
             if (key == null || regionName == null) {
               StringId message = null;
-              Object[] messageArgs = new Object[] {servConn.getName(), Integer.valueOf(batchId)};
+              Object[] messageArgs = new Object[] { serverConnection.getName(), Integer.valueOf(batchId)};
               if (key == null) {
                 message =
                     LocalizedStrings.ProcessBatch_0_THE_INPUT_KEY_FOR_THE_BATCH_UPDATE_REQUEST_1_IS_NULL;
@@ -450,7 +457,7 @@ public class GatewayReceiverCommand extends BaseCommand {
             }
             region = (LocalRegion) crHelper.getRegion(regionName);
             if (region == null) {
-              handleRegionNull(servConn, regionName, batchId);
+              handleRegionNull(serverConnection, regionName, batchId);
             } else {
               clientEvent = new EventIDHolder(eventId);
               if (versionTimeStamp > 0) {
@@ -465,7 +472,7 @@ public class GatewayReceiverCommand extends BaseCommand {
               try {
                 byte[] value = valuePart.getSerializedForm();
                 boolean isObject = valuePart.isObject();
-                AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+                AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
                 if (authzRequest != null) {
                   PutOperationContext putContext = authzRequest.putAuthorize(regionName, key, value,
                       isObject, callbackArg, PutOperationContext.UPDATE);
@@ -477,14 +484,14 @@ public class GatewayReceiverCommand extends BaseCommand {
                   result = addPdxType(crHelper, key, value);
                 } else {
                   result = region.basicBridgePut(key, value, null, isObject, callbackArg,
-                      servConn.getProxyID(), false, clientEvent);
+                      serverConnection.getProxyID(), false, clientEvent);
                 }
                 if (result || clientEvent.isConcurrencyConflict()) {
-                  servConn.setModificationInfo(true, regionName, key);
+                  serverConnection.setModificationInfo(true, regionName, key);
                   stats.incUpdateRequest();
                 } else {
                   final Object[] msgArgs =
-                      new Object[] {servConn.getName(), regionName, key, valuePart, callbackArg};
+                      new Object[] { serverConnection.getName(), regionName, key, valuePart, callbackArg};
                   final StringId message =
                       LocalizedStrings.ProcessBatch_0_FAILED_TO_UPDATE_ENTRY_FOR_REGION_1_KEY_2_VALUE_3_AND_CALLBACKARG_4;
                   String s = message.toLocalizedString(msgArgs);
@@ -493,16 +500,16 @@ public class GatewayReceiverCommand extends BaseCommand {
                 }
               } catch (CancelException e) {
                 // FIXME better exception hierarchy would avoid this check
-                if (servConn.getCachedRegionHelper().getCache().getCancelCriterion()
-                    .isCancelInProgress()) {
+                if (serverConnection.getCachedRegionHelper().getCache().getCancelCriterion()
+                                    .isCancelInProgress()) {
                   if (logger.isDebugEnabled()) {
                     logger.debug(
                         "{} ignoring message of type {} from client {} because shutdown occurred during message processing.",
-                        servConn.getName(), MessageType.getString(msg.getMessageType()),
-                        servConn.getProxyID());
+                        serverConnection.getName(), MessageType.getString(clientMessage.getMessageType()),
+                        serverConnection.getProxyID());
                   }
-                  servConn.setFlagProcessMessagesAsFalse();
-                  servConn.setClientDisconnectedException(e);
+                  serverConnection.setFlagProcessMessagesAsFalse();
+                  serverConnection.setClientDisconnectedException(e);
                 } else {
                   throw e;
                 }
@@ -511,7 +518,8 @@ public class GatewayReceiverCommand extends BaseCommand {
                 // Preserve the connection under all circumstances
                 logger.warn(LocalizedMessage.create(
                     LocalizedStrings.ProcessBatch_0_CAUGHT_EXCEPTION_PROCESSING_BATCH_UPDATE_REQUEST_1_CONTAINING_2_EVENTS,
-                    new Object[] {servConn.getName(), Integer.valueOf(batchId),
+                    new Object[] {
+                      serverConnection.getName(), Integer.valueOf(batchId),
                         Integer.valueOf(numberOfEvents)}),
                     e);
                 throw e;
@@ -521,28 +529,29 @@ public class GatewayReceiverCommand extends BaseCommand {
           case 2: // Destroy
             // Retrieve the callbackArg from the message parts if necessary
             index = partNumber + 5;
-            callbackArgExistsPart = msg.getPart(index++); {
+            callbackArgExistsPart = clientMessage.getPart(index++); {
             byte[] partBytes = (byte[]) callbackArgExistsPart.getObject();
             callbackArgExists = partBytes[0] == 0x01;
           }
             if (callbackArgExists) {
-              callbackArgPart = msg.getPart(index++);
+              callbackArgPart = clientMessage.getPart(index++);
               try {
                 callbackArg = callbackArgPart.getObject();
               } catch (Exception e) {
                 logger.warn(LocalizedMessage.create(
                     LocalizedStrings.ProcessBatch_0_CAUGHT_EXCEPTION_PROCESSING_BATCH_DESTROY_REQUEST_1_CONTAINING_2_EVENTS,
-                    new Object[] {servConn.getName(), Integer.valueOf(batchId),
+                    new Object[] {
+                      serverConnection.getName(), Integer.valueOf(batchId),
                         Integer.valueOf(numberOfEvents)}),
                     e);
                 throw e;
               }
             }
 
-            versionTimeStamp = msg.getPart(index++).getLong();
+            versionTimeStamp = clientMessage.getPart(index++).getLong();
             if (logger.isDebugEnabled()) {
               logger.debug("{}: Processing batch destroy request {} on {} for region {} key {}",
-                  servConn.getName(), batchId, servConn.getSocketString(), regionName, key);
+                  serverConnection.getName(), batchId, serverConnection.getSocketString(), regionName, key);
             }
 
             // Process the destroy request
@@ -556,14 +565,14 @@ public class GatewayReceiverCommand extends BaseCommand {
                 message =
                     LocalizedStrings.ProcessBatch_0_THE_INPUT_REGION_NAME_FOR_THE_BATCH_DESTROY_REQUEST_1_IS_NULL;
               }
-              Object[] messageArgs = new Object[] {servConn.getName(), Integer.valueOf(batchId)};
+              Object[] messageArgs = new Object[] { serverConnection.getName(), Integer.valueOf(batchId)};
               String s = message.toLocalizedString(messageArgs);
               logger.warn(s);
               throw new Exception(s);
             }
             region = (LocalRegion) crHelper.getRegion(regionName);
             if (region == null) {
-              handleRegionNull(servConn, regionName, batchId);
+              handleRegionNull(serverConnection, regionName, batchId);
             } else {
               clientEvent = new EventIDHolder(eventId);
               if (versionTimeStamp > 0) {
@@ -576,20 +585,20 @@ public class GatewayReceiverCommand extends BaseCommand {
               handleMessageRetry(region, clientEvent);
               // Destroy the entry
               try {
-                AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+                AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
                 if (authzRequest != null) {
                   DestroyOperationContext destroyContext =
                       authzRequest.destroyAuthorize(regionName, key, callbackArg);
                   callbackArg = destroyContext.getCallbackArg();
                 }
-                region.basicBridgeDestroy(key, callbackArg, servConn.getProxyID(), false,
+                region.basicBridgeDestroy(key, callbackArg, serverConnection.getProxyID(), false,
                     clientEvent);
-                servConn.setModificationInfo(true, regionName, key);
+                serverConnection.setModificationInfo(true, regionName, key);
                 stats.incDestroyRequest();
               } catch (EntryNotFoundException e) {
                 logger.info(LocalizedMessage.create(
                     LocalizedStrings.ProcessBatch_0_DURING_BATCH_DESTROY_NO_ENTRY_WAS_FOUND_FOR_KEY_1,
-                    new Object[] {servConn.getName(), key}));
+                    new Object[] { serverConnection.getName(), key}));
                 // throw new Exception(e);
               }
             }
@@ -598,43 +607,44 @@ public class GatewayReceiverCommand extends BaseCommand {
 
             try {
               // Region name
-              regionNamePart = msg.getPart(partNumber + 2);
+              regionNamePart = clientMessage.getPart(partNumber + 2);
               regionName = regionNamePart.getString();
 
               // Retrieve the event id from the message parts
-              eventIdPart = msg.getPart(partNumber + 3);
+              eventIdPart = clientMessage.getPart(partNumber + 3);
               eventId = (EventID) eventIdPart.getObject();
 
               // Retrieve the key from the message parts
-              keyPart = msg.getPart(partNumber + 4);
+              keyPart = clientMessage.getPart(partNumber + 4);
               key = keyPart.getStringOrObject();
 
               // Retrieve the callbackArg from the message parts if necessary
               index = partNumber + 5;
-              callbackArgExistsPart = msg.getPart(index++);
+              callbackArgExistsPart = clientMessage.getPart(index++);
 
               byte[] partBytes = (byte[]) callbackArgExistsPart.getObject();
               callbackArgExists = partBytes[0] == 0x01;
 
               if (callbackArgExists) {
-                callbackArgPart = msg.getPart(index++);
+                callbackArgPart = clientMessage.getPart(index++);
                 callbackArg = callbackArgPart.getObject();
               }
 
             } catch (Exception e) {
               logger.warn(LocalizedMessage.create(
                   LocalizedStrings.ProcessBatch_0_CAUGHT_EXCEPTION_PROCESSING_BATCH_UPDATE_VERSION_REQUEST_1_CONTAINING_2_EVENTS,
-                  new Object[] {servConn.getName(), Integer.valueOf(batchId),
+                  new Object[] {
+                    serverConnection.getName(), Integer.valueOf(batchId),
                       Integer.valueOf(numberOfEvents)}),
                   e);
               throw e;
             }
 
-            versionTimeStamp = msg.getPart(index++).getLong();
+            versionTimeStamp = clientMessage.getPart(index++).getLong();
             if (logger.isDebugEnabled()) {
               logger.debug(
                   "{}: Processing batch update-version request {} on {} for region {} key {} value {} callbackArg {}",
-                  servConn.getName(), batchId, servConn.getSocketString(), regionName, key,
+                  serverConnection.getName(), batchId, serverConnection.getSocketString(), regionName, key,
                   valuePart, callbackArg);
             }
             // Process the update time-stamp request
@@ -642,7 +652,8 @@ public class GatewayReceiverCommand extends BaseCommand {
               StringId message =
                   LocalizedStrings.ProcessBatch_0_CAUGHT_EXCEPTION_PROCESSING_BATCH_UPDATE_VERSION_REQUEST_1_CONTAINING_2_EVENTS;
 
-              Object[] messageArgs = new Object[] {servConn.getName(), Integer.valueOf(batchId),
+              Object[] messageArgs = new Object[] {
+                serverConnection.getName(), Integer.valueOf(batchId),
                   Integer.valueOf(numberOfEvents)};
               String s = message.toLocalizedString(messageArgs);
               logger.warn(s);
@@ -652,7 +663,7 @@ public class GatewayReceiverCommand extends BaseCommand {
               region = (LocalRegion) crHelper.getRegion(regionName);
 
               if (region == null) {
-                handleRegionNull(servConn, regionName, batchId);
+                handleRegionNull(serverConnection, regionName, batchId);
               } else {
 
                 clientEvent = new EventIDHolder(eventId);
@@ -668,13 +679,13 @@ public class GatewayReceiverCommand extends BaseCommand {
                 // Update the version tag
                 try {
 
-                  region.basicBridgeUpdateVersionStamp(key, callbackArg, servConn.getProxyID(),
+                  region.basicBridgeUpdateVersionStamp(key, callbackArg, serverConnection.getProxyID(),
                       false, clientEvent);
 
                 } catch (EntryNotFoundException e) {
                   logger.info(LocalizedMessage.create(
                       LocalizedStrings.ProcessBatch_0_DURING_BATCH_UPDATE_VERSION_NO_ENTRY_WAS_FOUND_FOR_KEY_1,
-                      new Object[] {servConn.getName(), key}));
+                      new Object[] { serverConnection.getName(), key}));
                   // throw new Exception(e);
                 }
               }
@@ -684,29 +695,30 @@ public class GatewayReceiverCommand extends BaseCommand {
           default:
             logger.fatal(LocalizedMessage.create(
                 LocalizedStrings.Processbatch_0_UNKNOWN_ACTION_TYPE_1_FOR_BATCH_FROM_2,
-                new Object[] {servConn.getName(), Integer.valueOf(actionType),
-                    servConn.getSocketString()}));
+                new Object[] {
+                  serverConnection.getName(), Integer.valueOf(actionType),
+                    serverConnection.getSocketString()}));
             stats.incUnknowsOperationsReceived();
         }
       } catch (CancelException e) {
         if (logger.isDebugEnabled()) {
           logger.debug(
               "{} ignoring message of type {} from client {} because shutdown occurred during message processing.",
-              servConn.getName(), MessageType.getString(msg.getMessageType()),
-              servConn.getProxyID());
+              serverConnection.getName(), MessageType.getString(clientMessage.getMessageType()),
+              serverConnection.getProxyID());
         }
-        servConn.setFlagProcessMessagesAsFalse();
-        servConn.setClientDisconnectedException(e);
+        serverConnection.setFlagProcessMessagesAsFalse();
+        serverConnection.setClientDisconnectedException(e);
         return;
       } catch (Exception e) {
         // If an interrupted exception is thrown , rethrow it
-        checkForInterrupt(servConn, e);
+        checkForInterrupt(serverConnection, e);
 
         // If we have an issue with the PDX registry, stop processing more data
         if (e.getCause() instanceof PdxRegistryMismatchException) {
           fatalException = e.getCause();
           logger.fatal(LocalizedMessage.create(LocalizedStrings.GatewayReceiver_PDX_CONFIGURATION,
-              new Object[] {servConn.getMembershipID()}), e.getCause());
+              new Object[] { serverConnection.getMembershipID()}), e.getCause());
           break;
         }
 
@@ -772,26 +784,26 @@ public class GatewayReceiverCommand extends BaseCommand {
       stats.incProcessBatchTime(start - oldStart);
     }
     if (fatalException != null) {
-      servConn.incrementLatestBatchIdReplied(batchId);
-      writeFatalException(msg, fatalException, servConn, batchId);
-      servConn.setAsTrue(RESPONDED);
+      serverConnection.incrementLatestBatchIdReplied(batchId);
+      writeFatalException(clientMessage, fatalException, serverConnection, batchId);
+      serverConnection.setAsTrue(RESPONDED);
     } else if (!exceptions.isEmpty()) {
-      servConn.incrementLatestBatchIdReplied(batchId);
-      writeBatchException(msg, exceptions, servConn, batchId);
-      servConn.setAsTrue(RESPONDED);
+      serverConnection.incrementLatestBatchIdReplied(batchId);
+      writeBatchException(clientMessage, exceptions, serverConnection, batchId);
+      serverConnection.setAsTrue(RESPONDED);
     } else if (!wroteResponse) {
       // Increment the batch id unless the received batch id is -1 (a failover
       // batch)
-      servConn.incrementLatestBatchIdReplied(batchId);
+      serverConnection.incrementLatestBatchIdReplied(batchId);
 
-      writeReply(msg, servConn, batchId, numberOfEvents);
-      servConn.setAsTrue(RESPONDED);
+      writeReply(clientMessage, serverConnection, batchId, numberOfEvents);
+      serverConnection.setAsTrue(RESPONDED);
       stats.incWriteProcessBatchResponseTime(DistributionStats.getStatTime() - start);
       if (logger.isDebugEnabled()) {
         logger.debug(
             "{}: Sent process batch normal response for batch {} containing {} events ({} bytes) with {} acknowledgement on {}",
-            servConn.getName(), batchId, numberOfEvents, msg.getPayloadLength(),
-            (earlyAck ? "early" : "normal"), servConn.getSocketString());
+            serverConnection.getName(), batchId, numberOfEvents, clientMessage.getPayloadLength(),
+            (earlyAck ? "early" : "normal"), serverConnection.getSocketString());
       }
       // logger.warn("Sent process batch normal response for batch " +
       // batchId + " containing " + numberOfEvents + " events (" +

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Get70.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Get70.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Get70.java
index 5cb1e41..7017aa8 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Get70.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Get70.java
@@ -54,17 +54,17 @@ public class Get70 extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long startparam)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long startparam)
       throws IOException {
     long start = startparam;
     Part regionNamePart = null, keyPart = null, valuePart = null;
     String regionName = null;
     Object callbackArg = null, key = null;
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    CacheServerStats stats = servConn.getCacheServerStats();
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
     StringId errMessage = null;
 
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
     // requiresResponse = true;
     {
       long oldStart = start;
@@ -72,18 +72,18 @@ public class Get70 extends BaseCommand {
       stats.incReadGetRequestTime(start - oldStart);
     }
     // Retrieve the data from the message parts
-    int parts = msg.getNumberOfParts();
-    regionNamePart = msg.getPart(0);
-    keyPart = msg.getPart(1);
+    int parts = clientMessage.getNumberOfParts();
+    regionNamePart = clientMessage.getPart(0);
+    keyPart = clientMessage.getPart(1);
     // valuePart = null; (redundant assignment)
     if (parts > 2) {
-      valuePart = msg.getPart(2);
+      valuePart = clientMessage.getPart(2);
       try {
         callbackArg = valuePart.getObject();
       } catch (Exception e) {
-        writeException(msg, e, false, servConn);
+        writeException(clientMessage, e, false, serverConnection);
         // responded = true;
-        servConn.setAsTrue(RESPONDED);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
     }
@@ -91,15 +91,15 @@ public class Get70 extends BaseCommand {
     try {
       key = keyPart.getStringOrObject();
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
+      writeException(clientMessage, e, false, serverConnection);
       // responded = true;
-      servConn.setAsTrue(RESPONDED);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received 7.0 get request ({} bytes) from {} for region {} key {} txId {}",
-          servConn.getName(), msg.getPayloadLength(), servConn.getSocketString(), regionName, key,
-          msg.getTransactionId());
+          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName, key,
+          clientMessage.getTransactionId());
     }
 
     // Process the get request
@@ -113,18 +113,18 @@ public class Get70 extends BaseCommand {
         errMessage = LocalizedStrings.Request_THE_INPUT_REGION_NAME_FOR_THE_GET_REQUEST_IS_NULL;
       }
       String s = errMessage.toLocalizedString();
-      logger.warn("{}: {}", servConn.getName(), s);
-      writeErrorResponse(msg, MessageType.REQUESTDATAERROR, s, servConn);
-      servConn.setAsTrue(RESPONDED);
+      logger.warn("{}: {}", serverConnection.getName(), s);
+      writeErrorResponse(clientMessage, MessageType.REQUESTDATAERROR, s, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    Region region = servConn.getCache().getRegion(regionName);
+    Region region = serverConnection.getCache().getRegion(regionName);
     if (region == null) {
       String reason = LocalizedStrings.Request__0_WAS_NOT_FOUND_DURING_GET_REQUEST
           .toLocalizedString(regionName);
-      writeRegionDestroyedEx(msg, regionName, reason, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -133,14 +133,14 @@ public class Get70 extends BaseCommand {
       // for integrated security
       this.securityService.authorizeRegionRead(regionName, key.toString());
 
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         getContext = authzRequest.getAuthorize(regionName, key, callbackArg);
         callbackArg = getContext.getCallbackArg();
       }
     } catch (NotAuthorizedException ex) {
-      writeException(msg, ex, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, ex, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -148,10 +148,10 @@ public class Get70 extends BaseCommand {
     // the value if it is a byte[].
     Entry entry;
     try {
-      entry = getEntry(region, key, callbackArg, servConn);
+      entry = getEntry(region, key, callbackArg, serverConnection);
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -164,7 +164,7 @@ public class Get70 extends BaseCommand {
       boolean keyNotPresent = entry.keyNotPresent;
 
       try {
-        AuthorizeRequestPP postAuthzRequest = servConn.getPostAuthzRequest();
+        AuthorizeRequestPP postAuthzRequest = serverConnection.getPostAuthzRequest();
         if (postAuthzRequest != null) {
           try {
             getContext = postAuthzRequest.getAuthorize(regionName, key, data, isObject, getContext);
@@ -182,8 +182,8 @@ public class Get70 extends BaseCommand {
           }
         }
       } catch (NotAuthorizedException ex) {
-        writeException(msg, ex, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeException(clientMessage, ex, false, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
 
@@ -197,23 +197,23 @@ public class Get70 extends BaseCommand {
       if (region instanceof PartitionedRegion) {
         PartitionedRegion pr = (PartitionedRegion) region;
         if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
-          writeResponseWithRefreshMetadata(data, callbackArg, msg, isObject, servConn, pr,
+          writeResponseWithRefreshMetadata(data, callbackArg, clientMessage, isObject, serverConnection, pr,
               pr.getNetworkHopType(), versionTag, keyNotPresent);
           pr.clearNetworkHopData();
         } else {
-          writeResponse(data, callbackArg, msg, isObject, versionTag, keyNotPresent, servConn);
+          writeResponse(data, callbackArg, clientMessage, isObject, versionTag, keyNotPresent, serverConnection);
         }
       } else {
-        writeResponse(data, callbackArg, msg, isObject, versionTag, keyNotPresent, servConn);
+        writeResponse(data, callbackArg, clientMessage, isObject, versionTag, keyNotPresent, serverConnection);
       }
     } finally {
       OffHeapHelper.release(originalData);
     }
 
-    servConn.setAsTrue(RESPONDED);
+    serverConnection.setAsTrue(RESPONDED);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Wrote get response back to {} for region {} {}", servConn.getName(),
-          servConn.getSocketString(), regionName, entry);
+      logger.debug("{}: Wrote get response back to {} for region {} {}", serverConnection.getName(),
+          serverConnection.getSocketString(), regionName, entry);
     }
     stats.incWriteGetResponseTime(DistributionStats.getStatTime() - start);
 
@@ -379,12 +379,12 @@ public class Get70 extends BaseCommand {
   }
 
   @Override
-  protected void writeReply(Message origMsg, ServerConnection servConn) throws IOException {
+  protected void writeReply(Message origMsg, ServerConnection serverConnection) throws IOException {
     throw new UnsupportedOperationException();
   }
 
   @Override
-  protected void writeReplyWithRefreshMetadata(Message origMsg, ServerConnection servConn,
+  protected void writeReplyWithRefreshMetadata(Message origMsg, ServerConnection serverConnection,
       PartitionedRegion pr, byte nwHop) throws IOException {
     throw new UnsupportedOperationException();
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll.java
index 22e63c6..5f7cb29 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll.java
@@ -44,33 +44,33 @@ public class GetAll extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
     Part regionNamePart = null, keysPart = null;
     String regionName = null;
     Object[] keys = null;
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
 
     // Retrieve the region name from the message parts
-    regionNamePart = msg.getPart(0);
+    regionNamePart = clientMessage.getPart(0);
     regionName = regionNamePart.getString();
 
     // Retrieve the keys array from the message parts
-    keysPart = msg.getPart(1);
+    keysPart = clientMessage.getPart(1);
     try {
       keys = (Object[]) keysPart.getObject();
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     if (logger.isDebugEnabled()) {
       StringBuffer buffer = new StringBuffer();
-      buffer.append(servConn.getName()).append(": Received getAll request (")
-          .append(msg.getPayloadLength()).append(" bytes) from ").append(servConn.getSocketString())
-          .append(" for region ").append(regionName).append(" keys ");
+      buffer.append(serverConnection.getName()).append(": Received getAll request (")
+            .append(clientMessage.getPayloadLength()).append(" bytes) from ").append(serverConnection.getSocketString())
+            .append(" for region ").append(regionName).append(" keys ");
       if (keys != null) {
         for (int i = 0; i < keys.length; i++) {
           buffer.append(keys[i]).append(" ");
@@ -91,37 +91,37 @@ public class GetAll extends BaseCommand {
         message = LocalizedStrings.GetAll_THE_INPUT_REGION_NAME_FOR_THE_GETALL_REQUEST_IS_NULL
             .toLocalizedString();
       }
-      logger.warn("{}: {}", servConn.getName(), message);
-      writeChunkedErrorResponse(msg, MessageType.GET_ALL_DATA_ERROR, message, servConn);
-      servConn.setAsTrue(RESPONDED);
+      logger.warn("{}: {}", serverConnection.getName(), message);
+      writeChunkedErrorResponse(clientMessage, MessageType.GET_ALL_DATA_ERROR, message, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    LocalRegion region = (LocalRegion) servConn.getCache().getRegion(regionName);
+    LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
     if (region == null) {
       String reason = " was not found during getAll request";
-      writeRegionDestroyedEx(msg, regionName, reason, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     // Send header
-    ChunkedMessage chunkedResponseMsg = servConn.getChunkedResponseMessage();
+    ChunkedMessage chunkedResponseMsg = serverConnection.getChunkedResponseMessage();
     chunkedResponseMsg.setMessageType(MessageType.RESPONSE);
-    chunkedResponseMsg.setTransactionId(msg.getTransactionId());
+    chunkedResponseMsg.setTransactionId(clientMessage.getTransactionId());
     chunkedResponseMsg.sendHeader();
 
     // Send chunk response
     try {
-      fillAndSendGetAllResponseChunks(region, regionName, keys, servConn);
-      servConn.setAsTrue(RESPONDED);
+      fillAndSendGetAllResponseChunks(region, regionName, keys, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
     } catch (Exception e) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, e);
+      checkForInterrupt(serverConnection, e);
 
       // Otherwise, write an exception message and continue
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
   }
@@ -142,14 +142,14 @@ public class GetAll extends BaseCommand {
       numKeys = allKeys.size();
     }
 
-    ObjectPartList values = new ObjectPartList(maximumChunkSize, keys == null);
+    ObjectPartList values = new ObjectPartList(MAXIMUM_CHUNK_SIZE, keys == null);
     AuthorizeRequest authzRequest = servConn.getAuthzRequest();
     AuthorizeRequestPP postAuthzRequest = servConn.getPostAuthzRequest();
     Request request = (Request) Request.getCommand();
     Object[] valueAndIsObject = new Object[3];
     for (int i = 0; i < numKeys; i++) {
       // Send the intermediate chunk if necessary
-      if (values.size() == maximumChunkSize) {
+      if (values.size() == MAXIMUM_CHUNK_SIZE) {
         // Send the chunk and clear the list
         sendGetAllResponseChunk(region, values, false, servConn);
         values.clear();
@@ -246,7 +246,7 @@ public class GetAll extends BaseCommand {
     ChunkedMessage chunkedResponseMsg = servConn.getChunkedResponseMessage();
     chunkedResponseMsg.setNumberOfParts(1);
     chunkedResponseMsg.setLastChunk(lastChunk);
-    chunkedResponseMsg.addObjPart(list, zipValues);
+    chunkedResponseMsg.addObjPart(list, false);
 
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Sending {} getAll response chunk for region={} values={} chunk=<{}>",

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll651.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll651.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll651.java
index a19d540..b0a1915 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll651.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll651.java
@@ -21,7 +21,6 @@ import java.util.Set;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.operations.GetOperationContext;
 import org.apache.geode.internal.cache.LocalRegion;
-import org.apache.geode.internal.cache.tier.CachedRegionHelper;
 import org.apache.geode.internal.cache.tier.Command;
 import org.apache.geode.internal.cache.tier.MessageType;
 import org.apache.geode.internal.cache.tier.sockets.BaseCommand;
@@ -45,33 +44,33 @@ public class GetAll651 extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
     Part regionNamePart = null, keysPart = null;
     String regionName = null;
     Object[] keys = null;
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
 
     // Retrieve the region name from the message parts
-    regionNamePart = msg.getPart(0);
+    regionNamePart = clientMessage.getPart(0);
     regionName = regionNamePart.getString();
 
     // Retrieve the keys array from the message parts
-    keysPart = msg.getPart(1);
+    keysPart = clientMessage.getPart(1);
     try {
       keys = (Object[]) keysPart.getObject();
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     if (logger.isDebugEnabled()) {
       StringBuffer buffer = new StringBuffer();
-      buffer.append(servConn.getName()).append(": Received getAll request (")
-          .append(msg.getPayloadLength()).append(" bytes) from ").append(servConn.getSocketString())
-          .append(" for region ").append(regionName).append(" keys ");
+      buffer.append(serverConnection.getName()).append(": Received getAll request (")
+            .append(clientMessage.getPayloadLength()).append(" bytes) from ").append(serverConnection.getSocketString())
+            .append(" for region ").append(regionName).append(" keys ");
       if (keys != null) {
         for (int i = 0; i < keys.length; i++) {
           buffer.append(keys[i]).append(" ");
@@ -90,37 +89,37 @@ public class GetAll651 extends BaseCommand {
         message = LocalizedStrings.GetAll_THE_INPUT_REGION_NAME_FOR_THE_GETALL_REQUEST_IS_NULL
             .toLocalizedString();
       }
-      logger.warn("{}: {}", servConn.getName(), message);
-      writeChunkedErrorResponse(msg, MessageType.GET_ALL_DATA_ERROR, message, servConn);
-      servConn.setAsTrue(RESPONDED);
+      logger.warn("{}: {}", serverConnection.getName(), message);
+      writeChunkedErrorResponse(clientMessage, MessageType.GET_ALL_DATA_ERROR, message, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    LocalRegion region = (LocalRegion) servConn.getCache().getRegion(regionName);
+    LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
     if (region == null) {
       String reason = " was not found during getAll request";
-      writeRegionDestroyedEx(msg, regionName, reason, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     // Send header
-    ChunkedMessage chunkedResponseMsg = servConn.getChunkedResponseMessage();
+    ChunkedMessage chunkedResponseMsg = serverConnection.getChunkedResponseMessage();
     chunkedResponseMsg.setMessageType(MessageType.RESPONSE);
-    chunkedResponseMsg.setTransactionId(msg.getTransactionId());
+    chunkedResponseMsg.setTransactionId(clientMessage.getTransactionId());
     chunkedResponseMsg.sendHeader();
 
     // Send chunk response
     try {
-      fillAndSendGetAllResponseChunks(region, regionName, keys, servConn);
-      servConn.setAsTrue(RESPONDED);
+      fillAndSendGetAllResponseChunks(region, regionName, keys, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
     } catch (Exception e) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, e);
+      checkForInterrupt(serverConnection, e);
 
       // Otherwise, write an exception message and continue
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
   }
@@ -148,7 +147,7 @@ public class GetAll651 extends BaseCommand {
     final boolean isDebugEnabled = logger.isDebugEnabled();
     for (int i = 0; i < numKeys; i++) {
       // Send the intermediate chunk if necessary
-      if (values.size() == maximumChunkSize) {
+      if (values.size() == MAXIMUM_CHUNK_SIZE) {
         // Send the chunk and clear the list
         sendGetAllResponseChunk(region, values, false, servConn);
         values.clear();
@@ -253,7 +252,7 @@ public class GetAll651 extends BaseCommand {
    * @param includeKeys if the part list should include the keys
    */
   protected ObjectPartList651 getObjectPartsList(boolean includeKeys) {
-    ObjectPartList651 values = new ObjectPartList651(maximumChunkSize, includeKeys);
+    ObjectPartList651 values = new ObjectPartList651(MAXIMUM_CHUNK_SIZE, includeKeys);
     return values;
   }
 
@@ -262,7 +261,7 @@ public class GetAll651 extends BaseCommand {
     ChunkedMessage chunkedResponseMsg = servConn.getChunkedResponseMessage();
     chunkedResponseMsg.setNumberOfParts(1);
     chunkedResponseMsg.setLastChunk(lastChunk);
-    chunkedResponseMsg.addObjPart(list, zipValues);
+    chunkedResponseMsg.addObjPart(list, false);
 
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Sending {} getAll response chunk for region={} values={} chunk=<{}>",

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll70.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll70.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll70.java
index 154e800..579593f 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll70.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAll70.java
@@ -23,7 +23,6 @@ import org.apache.geode.cache.operations.GetOperationContext;
 import org.apache.geode.cache.operations.internal.GetOperationContextImpl;
 import org.apache.geode.internal.Version;
 import org.apache.geode.internal.cache.LocalRegion;
-import org.apache.geode.internal.cache.tier.CachedRegionHelper;
 import org.apache.geode.internal.cache.tier.Command;
 import org.apache.geode.internal.cache.tier.MessageType;
 import org.apache.geode.internal.cache.tier.sockets.BaseCommand;
@@ -40,7 +39,6 @@ import org.apache.geode.internal.offheap.OffHeapHelper;
 import org.apache.geode.internal.offheap.annotations.Retained;
 import org.apache.geode.internal.security.AuthorizeRequest;
 import org.apache.geode.internal.security.AuthorizeRequestPP;
-import org.apache.geode.internal.security.SecurityService;
 import org.apache.geode.security.NotAuthorizedException;
 
 public class GetAll70 extends BaseCommand {
@@ -52,36 +50,36 @@ public class GetAll70 extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
     Part regionNamePart = null, keysPart = null;
     String regionName = null;
     Object[] keys = null;
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
     int partIdx = 0;
 
     // Retrieve the region name from the message parts
-    regionNamePart = msg.getPart(partIdx++);
+    regionNamePart = clientMessage.getPart(partIdx++);
     regionName = regionNamePart.getString();
 
     // Retrieve the keys array from the message parts
-    keysPart = msg.getPart(partIdx++);
+    keysPart = clientMessage.getPart(partIdx++);
     try {
       keys = (Object[]) keysPart.getObject();
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     boolean requestSerializedValues;
-    requestSerializedValues = msg.getPart(partIdx++).getInt() == 1;
+    requestSerializedValues = clientMessage.getPart(partIdx++).getInt() == 1;
 
     if (logger.isDebugEnabled()) {
       StringBuffer buffer = new StringBuffer();
-      buffer.append(servConn.getName()).append(": Received getAll request (")
-          .append(msg.getPayloadLength()).append(" bytes) from ").append(servConn.getSocketString())
-          .append(" for region ").append(regionName).append(" keys ");
+      buffer.append(serverConnection.getName()).append(": Received getAll request (")
+            .append(clientMessage.getPayloadLength()).append(" bytes) from ").append(serverConnection.getSocketString())
+            .append(" for region ").append(regionName).append(" keys ");
       if (keys != null) {
         for (int i = 0; i < keys.length; i++) {
           buffer.append(keys[i]).append(" ");
@@ -100,37 +98,37 @@ public class GetAll70 extends BaseCommand {
         message = LocalizedStrings.GetAll_THE_INPUT_REGION_NAME_FOR_THE_GETALL_REQUEST_IS_NULL
             .toLocalizedString();
       }
-      logger.warn("{}: {}", servConn.getName(), message);
-      writeChunkedErrorResponse(msg, MessageType.GET_ALL_DATA_ERROR, message, servConn);
-      servConn.setAsTrue(RESPONDED);
+      logger.warn("{}: {}", serverConnection.getName(), message);
+      writeChunkedErrorResponse(clientMessage, MessageType.GET_ALL_DATA_ERROR, message, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    LocalRegion region = (LocalRegion) servConn.getCache().getRegion(regionName);
+    LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
     if (region == null) {
       String reason = " was not found during getAll request";
-      writeRegionDestroyedEx(msg, regionName, reason, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     // Send header
-    ChunkedMessage chunkedResponseMsg = servConn.getChunkedResponseMessage();
+    ChunkedMessage chunkedResponseMsg = serverConnection.getChunkedResponseMessage();
     chunkedResponseMsg.setMessageType(MessageType.RESPONSE);
-    chunkedResponseMsg.setTransactionId(msg.getTransactionId());
+    chunkedResponseMsg.setTransactionId(clientMessage.getTransactionId());
     chunkedResponseMsg.sendHeader();
 
     // Send chunk response
     try {
-      fillAndSendGetAllResponseChunks(region, regionName, keys, servConn, requestSerializedValues);
-      servConn.setAsTrue(RESPONDED);
+      fillAndSendGetAllResponseChunks(region, regionName, keys, serverConnection, requestSerializedValues);
+      serverConnection.setAsTrue(RESPONDED);
     } catch (Exception e) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, e);
+      checkForInterrupt(serverConnection, e);
 
       // Otherwise, write an exception message and continue
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
   }
@@ -163,7 +161,7 @@ public class GetAll70 extends BaseCommand {
     // in the old mode (which may be impossible since we only used that mode pre 7.0) in which the
     // client told us
     // to get and return all the keys and values. I think this was used for register interest.
-    VersionedObjectList values = new VersionedObjectList(maximumChunkSize, keys == null,
+    VersionedObjectList values = new VersionedObjectList(MAXIMUM_CHUNK_SIZE, keys == null,
         region.getAttributes().getConcurrencyChecksEnabled(), requestSerializedValues);
     try {
       AuthorizeRequest authzRequest = servConn.getAuthzRequest();
@@ -172,7 +170,7 @@ public class GetAll70 extends BaseCommand {
       final boolean isDebugEnabled = logger.isDebugEnabled();
       for (int i = 0; i < numKeys; i++) {
         // Send the intermediate chunk if necessary
-        if (values.size() == maximumChunkSize) {
+        if (values.size() == MAXIMUM_CHUNK_SIZE) {
           // Send the chunk and clear the list
           values.setKeys(null);
           sendGetAllResponseChunk(region, values, false, servConn);

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAllForRI.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAllForRI.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAllForRI.java
index d380beb..43d3348 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAllForRI.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAllForRI.java
@@ -38,7 +38,7 @@ public class GetAllForRI extends GetAll651 {
 
   @Override
   protected ObjectPartList651 getObjectPartsList(boolean includeKeys) {
-    return new SerializedObjectPartList(maximumChunkSize, includeKeys);
+    return new SerializedObjectPartList(MAXIMUM_CHUNK_SIZE, includeKeys);
   }
 
 

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAllWithCallback.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAllWithCallback.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAllWithCallback.java
index 2fb860d..c6663de 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAllWithCallback.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetAllWithCallback.java
@@ -22,7 +22,6 @@ import org.apache.geode.cache.Region;
 import org.apache.geode.cache.operations.GetOperationContext;
 import org.apache.geode.cache.operations.internal.GetOperationContextImpl;
 import org.apache.geode.internal.cache.LocalRegion;
-import org.apache.geode.internal.cache.tier.CachedRegionHelper;
 import org.apache.geode.internal.cache.tier.Command;
 import org.apache.geode.internal.cache.tier.MessageType;
 import org.apache.geode.internal.cache.tier.sockets.BaseCommand;
@@ -57,44 +56,44 @@ public class GetAllWithCallback extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
     Part regionNamePart = null, keysPart = null, callbackPart = null;
     String regionName = null;
     Object[] keys = null;
     Object callback = null;
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
     int partIdx = 0;
 
     // Retrieve the region name from the message parts
-    regionNamePart = msg.getPart(partIdx++);
+    regionNamePart = clientMessage.getPart(partIdx++);
     regionName = regionNamePart.getString();
 
     // Retrieve the keys array from the message parts
-    keysPart = msg.getPart(partIdx++);
+    keysPart = clientMessage.getPart(partIdx++);
     try {
       keys = (Object[]) keysPart.getObject();
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
-    callbackPart = msg.getPart(partIdx++);
+    callbackPart = clientMessage.getPart(partIdx++);
     try {
       callback = callbackPart.getObject();
     } catch (Exception e) {
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     if (logger.isDebugEnabled()) {
       StringBuffer buffer = new StringBuffer();
-      buffer.append(servConn.getName()).append(": Received getAll request (")
-          .append(msg.getPayloadLength()).append(" bytes) from ").append(servConn.getSocketString())
-          .append(" for region ").append(regionName).append(" with callback ").append(callback)
-          .append(" keys ");
+      buffer.append(serverConnection.getName()).append(": Received getAll request (")
+            .append(clientMessage.getPayloadLength()).append(" bytes) from ").append(serverConnection.getSocketString())
+            .append(" for region ").append(regionName).append(" with callback ").append(callback)
+            .append(" keys ");
       if (keys != null) {
         for (int i = 0; i < keys.length; i++) {
           buffer.append(keys[i]).append(" ");
@@ -114,35 +113,35 @@ public class GetAllWithCallback extends BaseCommand {
             .toLocalizedString();
       }
       logger.warn(LocalizedMessage.create(LocalizedStrings.TWO_ARG_COLON,
-          new Object[] {servConn.getName(), message}));
-      writeChunkedErrorResponse(msg, MessageType.GET_ALL_DATA_ERROR, message, servConn);
-      servConn.setAsTrue(RESPONDED);
+          new Object[] { serverConnection.getName(), message}));
+      writeChunkedErrorResponse(clientMessage, MessageType.GET_ALL_DATA_ERROR, message, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
-    LocalRegion region = (LocalRegion) servConn.getCache().getRegion(regionName);
+    LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
     if (region == null) {
       String reason = " was not found during getAll request";
-      writeRegionDestroyedEx(msg, regionName, reason, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     // Send header
-    ChunkedMessage chunkedResponseMsg = servConn.getChunkedResponseMessage();
+    ChunkedMessage chunkedResponseMsg = serverConnection.getChunkedResponseMessage();
     chunkedResponseMsg.setMessageType(MessageType.RESPONSE);
-    chunkedResponseMsg.setTransactionId(msg.getTransactionId());
+    chunkedResponseMsg.setTransactionId(clientMessage.getTransactionId());
     chunkedResponseMsg.sendHeader();
 
     // Send chunk response
     try {
-      fillAndSendGetAllResponseChunks(region, regionName, keys, servConn, callback);
-      servConn.setAsTrue(RESPONDED);
+      fillAndSendGetAllResponseChunks(region, regionName, keys, serverConnection, callback);
+      serverConnection.setAsTrue(RESPONDED);
     } catch (Exception e) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, e);
+      checkForInterrupt(serverConnection, e);
 
       // Otherwise, write an exception message and continue
-      writeChunkedException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeChunkedException(clientMessage, e, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -154,7 +153,7 @@ public class GetAllWithCallback extends BaseCommand {
 
     assert keys != null;
     int numKeys = keys.length;
-    VersionedObjectList values = new VersionedObjectList(maximumChunkSize, false,
+    VersionedObjectList values = new VersionedObjectList(MAXIMUM_CHUNK_SIZE, false,
         region.getAttributes().getConcurrencyChecksEnabled(), false);
     try {
       AuthorizeRequest authzRequest = servConn.getAuthzRequest();
@@ -162,7 +161,7 @@ public class GetAllWithCallback extends BaseCommand {
       Get70 request = (Get70) Get70.getCommand();
       for (int i = 0; i < numKeys; i++) {
         // Send the intermediate chunk if necessary
-        if (values.size() == maximumChunkSize) {
+        if (values.size() == MAXIMUM_CHUNK_SIZE) {
           // Send the chunk and clear the list
           sendGetAllResponseChunk(region, values, false, servConn);
           values.clear();

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand.java
index a3e565d..bcdbd08 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand.java
@@ -49,20 +49,19 @@ public class GetClientPRMetadataCommand extends BaseCommand {
   private GetClientPRMetadataCommand() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException, InterruptedException {
     String regionFullPath = null;
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    regionFullPath = msg.getPart(0).getString();
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    regionFullPath = clientMessage.getPart(0).getString();
     String errMessage = "";
     if (regionFullPath == null) {
       logger.warn(LocalizedMessage
           .create(LocalizedStrings.GetClientPRMetadata_THE_INPUT_REGION_PATH_IS_NULL));
       errMessage =
           LocalizedStrings.GetClientPRMetadata_THE_INPUT_REGION_PATH_IS_NULL.toLocalizedString();
-      writeErrorResponse(msg, MessageType.GET_CLIENT_PR_METADATA_ERROR, errMessage.toString(),
-          servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.GET_CLIENT_PR_METADATA_ERROR, errMessage.toString(), serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
     } else {
       Region region = crHelper.getRegion(regionFullPath);
       if (region == null) {
@@ -71,13 +70,12 @@ public class GetClientPRMetadataCommand extends BaseCommand {
             regionFullPath));
         errMessage = LocalizedStrings.GetClientPRMetadata_REGION_NOT_FOUND.toLocalizedString()
             + regionFullPath;
-        writeErrorResponse(msg, MessageType.GET_CLIENT_PR_METADATA_ERROR, errMessage.toString(),
-            servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeErrorResponse(clientMessage, MessageType.GET_CLIENT_PR_METADATA_ERROR, errMessage.toString(), serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
       } else {
         try {
-          Message responseMsg = servConn.getResponseMessage();
-          responseMsg.setTransactionId(msg.getTransactionId());
+          Message responseMsg = serverConnection.getResponseMessage();
+          responseMsg.setTransactionId(clientMessage.getTransactionId());
           responseMsg.setMessageType(MessageType.RESPONSE_CLIENT_PR_METADATA);
 
           PartitionedRegion prRgion = (PartitionedRegion) region;
@@ -93,11 +91,11 @@ public class GetClientPRMetadataCommand extends BaseCommand {
             }
           }
           responseMsg.send();
-          msg.clearParts();
+          clientMessage.clearParts();
         } catch (Exception e) {
-          writeException(msg, e, false, servConn);
+          writeException(clientMessage, e, false, serverConnection);
         } finally {
-          servConn.setAsTrue(Command.RESPONDED);
+          serverConnection.setAsTrue(Command.RESPONDED);
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand66.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand66.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand66.java
index 3961b19..4c519a9 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand66.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand66.java
@@ -47,20 +47,19 @@ public class GetClientPRMetadataCommand66 extends BaseCommand {
   private GetClientPRMetadataCommand66() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException, InterruptedException {
     String regionFullPath = null;
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    regionFullPath = msg.getPart(0).getString();
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    regionFullPath = clientMessage.getPart(0).getString();
     String errMessage = "";
     if (regionFullPath == null) {
       logger.warn(LocalizedMessage
           .create(LocalizedStrings.GetClientPRMetadata_THE_INPUT_REGION_PATH_IS_NULL));
       errMessage =
           LocalizedStrings.GetClientPRMetadata_THE_INPUT_REGION_PATH_IS_NULL.toLocalizedString();
-      writeErrorResponse(msg, MessageType.GET_CLIENT_PR_METADATA_ERROR, errMessage.toString(),
-          servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.GET_CLIENT_PR_METADATA_ERROR, errMessage.toString(), serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
     } else {
       Region region = crHelper.getRegion(regionFullPath);
       if (region == null) {
@@ -69,13 +68,12 @@ public class GetClientPRMetadataCommand66 extends BaseCommand {
             regionFullPath));
         errMessage = LocalizedStrings.GetClientPRMetadata_REGION_NOT_FOUND.toLocalizedString()
             + regionFullPath;
-        writeErrorResponse(msg, MessageType.GET_CLIENT_PR_METADATA_ERROR, errMessage.toString(),
-            servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeErrorResponse(clientMessage, MessageType.GET_CLIENT_PR_METADATA_ERROR, errMessage.toString(), serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
       } else {
         try {
-          Message responseMsg = servConn.getResponseMessage();
-          responseMsg.setTransactionId(msg.getTransactionId());
+          Message responseMsg = serverConnection.getResponseMessage();
+          responseMsg.setTransactionId(clientMessage.getTransactionId());
           responseMsg.setMessageType(MessageType.RESPONSE_CLIENT_PR_METADATA);
 
           PartitionedRegion prRgion = (PartitionedRegion) region;
@@ -86,11 +84,11 @@ public class GetClientPRMetadataCommand66 extends BaseCommand {
             responseMsg.addObjPart(serverLocations);
           }
           responseMsg.send();
-          msg.clearParts();
+          clientMessage.clearParts();
         } catch (Exception e) {
-          writeException(msg, e, false, servConn);
+          writeException(clientMessage, e, false, serverConnection);
         } finally {
-          servConn.setAsTrue(Command.RESPONDED);
+          serverConnection.setAsTrue(Command.RESPONDED);
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPartitionAttributesCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPartitionAttributesCommand.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPartitionAttributesCommand.java
index 7d5c251..6be9353 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPartitionAttributesCommand.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPartitionAttributesCommand.java
@@ -45,22 +45,22 @@ public class GetClientPartitionAttributesCommand extends BaseCommand {
 
   @SuppressWarnings("unchecked")
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException, InterruptedException {
     String regionFullPath = null;
-    regionFullPath = msg.getPart(0).getString();
+    regionFullPath = clientMessage.getPart(0).getString();
     String errMessage = "";
     if (regionFullPath == null) {
       logger.warn(LocalizedMessage
           .create(LocalizedStrings.GetClientPartitionAttributes_THE_INPUT_REGION_PATH_IS_NULL));
       errMessage = LocalizedStrings.GetClientPartitionAttributes_THE_INPUT_REGION_PATH_IS_NULL
           .toLocalizedString();
-      writeErrorResponse(msg, MessageType.GET_CLIENT_PARTITION_ATTRIBUTES_ERROR,
-          errMessage.toString(), servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.GET_CLIENT_PARTITION_ATTRIBUTES_ERROR,
+          errMessage.toString(), serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
-    Region region = servConn.getCache().getRegion(regionFullPath);
+    Region region = serverConnection.getCache().getRegion(regionFullPath);
     if (region == null) {
       logger.warn(LocalizedMessage.create(
           LocalizedStrings.GetClientPartitionAttributes_REGION_NOT_FOUND_FOR_SPECIFIED_REGION_PATH,
@@ -68,15 +68,15 @@ public class GetClientPartitionAttributesCommand extends BaseCommand {
       errMessage =
           LocalizedStrings.GetClientPartitionAttributes_REGION_NOT_FOUND.toLocalizedString()
               + regionFullPath;
-      writeErrorResponse(msg, MessageType.GET_CLIENT_PARTITION_ATTRIBUTES_ERROR,
-          errMessage.toString(), servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.GET_CLIENT_PARTITION_ATTRIBUTES_ERROR,
+          errMessage.toString(), serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     try {
-      Message responseMsg = servConn.getResponseMessage();
-      responseMsg.setTransactionId(msg.getTransactionId());
+      Message responseMsg = serverConnection.getResponseMessage();
+      responseMsg.setTransactionId(clientMessage.getTransactionId());
       responseMsg.setMessageType(MessageType.RESPONSE_CLIENT_PARTITION_ATTRIBUTES);
 
       PartitionedRegion prRgion = (PartitionedRegion) region;
@@ -113,11 +113,11 @@ public class GetClientPartitionAttributesCommand extends BaseCommand {
       }
       responseMsg.addObjPart(leaderRegionPath);
       responseMsg.send();
-      msg.clearParts();
+      clientMessage.clearParts();
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
+      writeException(clientMessage, e, false, serverConnection);
     } finally {
-      servConn.setAsTrue(Command.RESPONDED);
+      serverConnection.setAsTrue(Command.RESPONDED);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPartitionAttributesCommand66.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPartitionAttributesCommand66.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPartitionAttributesCommand66.java
index 209c40c..251f4da 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPartitionAttributesCommand66.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPartitionAttributesCommand66.java
@@ -50,22 +50,22 @@ public class GetClientPartitionAttributesCommand66 extends BaseCommand {
 
   @SuppressWarnings("unchecked")
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException, InterruptedException {
     String regionFullPath = null;
-    regionFullPath = msg.getPart(0).getString();
+    regionFullPath = clientMessage.getPart(0).getString();
     String errMessage = "";
     if (regionFullPath == null) {
       logger.warn(LocalizedMessage
           .create(LocalizedStrings.GetClientPartitionAttributes_THE_INPUT_REGION_PATH_IS_NULL));
       errMessage = LocalizedStrings.GetClientPartitionAttributes_THE_INPUT_REGION_PATH_IS_NULL
           .toLocalizedString();
-      writeErrorResponse(msg, MessageType.GET_CLIENT_PARTITION_ATTRIBUTES_ERROR,
-          errMessage.toString(), servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.GET_CLIENT_PARTITION_ATTRIBUTES_ERROR,
+          errMessage.toString(), serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
-    Region region = servConn.getCache().getRegion(regionFullPath);
+    Region region = serverConnection.getCache().getRegion(regionFullPath);
     if (region == null) {
       logger.warn(LocalizedMessage.create(
           LocalizedStrings.GetClientPartitionAttributes_REGION_NOT_FOUND_FOR_SPECIFIED_REGION_PATH,
@@ -73,15 +73,15 @@ public class GetClientPartitionAttributesCommand66 extends BaseCommand {
       errMessage =
           LocalizedStrings.GetClientPartitionAttributes_REGION_NOT_FOUND.toLocalizedString()
               + regionFullPath;
-      writeErrorResponse(msg, MessageType.GET_CLIENT_PARTITION_ATTRIBUTES_ERROR,
-          errMessage.toString(), servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.GET_CLIENT_PARTITION_ATTRIBUTES_ERROR,
+          errMessage.toString(), serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     try {
-      Message responseMsg = servConn.getResponseMessage();
-      responseMsg.setTransactionId(msg.getTransactionId());
+      Message responseMsg = serverConnection.getResponseMessage();
+      responseMsg.setTransactionId(clientMessage.getTransactionId());
       responseMsg.setMessageType(MessageType.RESPONSE_CLIENT_PARTITION_ATTRIBUTES);
 
       if (!(region instanceof PartitionedRegion)) {
@@ -138,11 +138,11 @@ public class GetClientPartitionAttributesCommand66 extends BaseCommand {
         }
       }
       responseMsg.send();
-      msg.clearParts();
+      clientMessage.clearParts();
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
+      writeException(clientMessage, e, false, serverConnection);
     } finally {
-      servConn.setAsTrue(Command.RESPONDED);
+      serverConnection.setAsTrue(Command.RESPONDED);
     }
 
   }


[07/43] geode git commit: GEODE-2941 Update Pulse documentation

Posted by kl...@apache.org.
GEODE-2941 Update Pulse documentation


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/7b34cfd9
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/7b34cfd9
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/7b34cfd9

Branch: refs/heads/feature/GEODE-2632-17
Commit: 7b34cfd9fd7865ee30f2d1518977b3c7bce294a4
Parents: b7faa08
Author: Dave Barnes <db...@pivotal.io>
Authored: Wed May 24 17:18:58 2017 -0700
Committer: Dave Barnes <db...@pivotal.io>
Committed: Thu May 25 13:29:42 2017 -0700

----------------------------------------------------------------------
 .../source/subnavs/geode-subnav.erb             |  13 +-
 .../cluster_config/gfsh_persist.html.md.erb     |   2 +-
 .../15_minute_quickstart_gfsh.html.md.erb       |   2 +-
 .../management/jmx_manager_node.html.md.erb     |   2 +-
 .../management_system_overview.html.md.erb      |   4 +-
 .../managing/management/mm_overview.html.md.erb |   2 +-
 geode-docs/tools_modules/book_intro.html.md.erb |   2 +-
 .../gfsh/command-pages/start.html.md.erb        |   4 +-
 .../lucene_integration.html.md.erb              |   2 -
 .../pulse/chapter_overview.html.md.erb          |  49 --
 .../tools_modules/pulse/pulse-auth.html.md.erb  |  63 ++
 .../pulse/pulse-embedded.html.md.erb            |  76 ++
 .../pulse/pulse-hosted.html.md.erb              |  89 ++
 .../pulse/pulse-overview.html.md.erb            |  49 ++
 .../pulse/pulse-requirements.html.md.erb        |  34 +
 .../tools_modules/pulse/pulse-views.html.md.erb | 453 ++++++++++
 .../tools_modules/pulse/quickstart.html.md.erb  | 827 -------------------
 .../pulse/system_requirements.html.md.erb       |  35 -
 .../tools_modules/redis_adapter.html.md.erb     |   2 -
 19 files changed, 781 insertions(+), 929 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-book/master_middleman/source/subnavs/geode-subnav.erb
----------------------------------------------------------------------
diff --git a/geode-book/master_middleman/source/subnavs/geode-subnav.erb b/geode-book/master_middleman/source/subnavs/geode-subnav.erb
index 12b2151..aa0faf4 100644
--- a/geode-book/master_middleman/source/subnavs/geode-subnav.erb
+++ b/geode-book/master_middleman/source/subnavs/geode-subnav.erb
@@ -2271,19 +2271,22 @@ gfsh</a>
                         </ul>
                     </li>
                     <li class="has_submenu">
-                        <a href="/docs/guide/12/tools_modules/pulse/chapter_overview.html">Geode Pulse</a>
+                        <a href="/docs/guide/12/tools_modules/pulse/pulse-overview.html">Geode Pulse</a>
                         <ul>
                             <li>
-                                <a href="/docs/guide/12/tools_modules/pulse/quickstart.html#topic_523F6DE33FE54307BBE8F83BB7D9355D">Pulse Quick Start (Embedded Mode)</a>
+                                <a href="/docs/guide/12/tools_modules/pulse/pulse-requirements.html">Pulse System Requirements</a>
                             </li>
                             <li>
-                                <a href="/docs/guide/12/tools_modules/pulse/quickstart.html#topic_795C97B46B9843528961A094EE520782">Hosting Pulse on a Web Application Server</a>
+                                <a href="/docs/guide/12/tools_modules/pulse/pulse-embedded.html">Running Pulse in Embedded Mode (Quick Start)</a>
                             </li>
                             <li>
-                                <a href="/docs/guide/12/tools_modules/pulse/quickstart.html#topic_AC9FFAA6FB044279BAED7A3E099E07AC">Configuring Pulse Authentication</a>
+                                <a href="/docs/guide/12/tools_modules/pulse/pulse-hosted.html">Hosting Pulse on a Web Application Server</a>
                             </li>
                             <li>
-                                <a href="/docs/guide/12/tools_modules/pulse/quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404">Using Pulse Views</a>
+                                <a href="/docs/guide/12/tools_modules/pulse/pulse-auth.html">Configuring Pulse Authentication</a>
+                            </li>
+                            <li>
+                                <a href="/docs/guide/12/tools_modules/pulse/pulse-views.html">Using Pulse Views</a>
                             </li>
                         </ul>
                     </li>

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/configuring/cluster_config/gfsh_persist.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/configuring/cluster_config/gfsh_persist.html.md.erb b/geode-docs/configuring/cluster_config/gfsh_persist.html.md.erb
index 45f73f9..4e21735 100644
--- a/geode-docs/configuring/cluster_config/gfsh_persist.html.md.erb
+++ b/geode-docs/configuring/cluster_config/gfsh_persist.html.md.erb
@@ -91,7 +91,7 @@ There are some configurations that you cannot create using `gfsh`, and that you
     -   `cache-writer`
     -   `compressor`
     -   `serializer`
-    -   `instantiantor`
+    -   `instantiator`
     -   `pdx-serializer`
     
         **Note:**

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/getting_started/15_minute_quickstart_gfsh.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/getting_started/15_minute_quickstart_gfsh.html.md.erb b/geode-docs/getting_started/15_minute_quickstart_gfsh.html.md.erb
index 894b998..954dcf8 100644
--- a/geode-docs/getting_started/15_minute_quickstart_gfsh.html.md.erb
+++ b/geode-docs/getting_started/15_minute_quickstart_gfsh.html.md.erb
@@ -73,7 +73,7 @@ If you run `start locator` from gfsh without specifying the member name, gfsh wi
 
 ## <a id="topic_FE3F28ED18E145F787431EC87B676A76__section_02C79BFFB5334E78A5856AE1EB1F1F84" class="no-quick-link"></a>Step 3: Start Pulse
 
-Start up the browser-based Pulse monitoring tool. Pulse is a Web Application that provides a graphical dashboard for monitoring vital, real-time health and performance of Geode clusters, members, and regions. See [Geode Pulse](../tools_modules/pulse/chapter_overview.html).
+Start up the browser-based Pulse monitoring tool. Pulse is a Web Application that provides a graphical dashboard for monitoring vital, real-time health and performance of Geode clusters, members, and regions. See [Geode Pulse](../tools_modules/pulse/pulse-overview.html).
 
 ``` pre
 gfsh>start pulse

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/managing/management/jmx_manager_node.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/jmx_manager_node.html.md.erb b/geode-docs/managing/management/jmx_manager_node.html.md.erb
index 97ca066..d002734 100644
--- a/geode-docs/managing/management/jmx_manager_node.html.md.erb
+++ b/geode-docs/managing/management/jmx_manager_node.html.md.erb
@@ -23,7 +23,7 @@ limitations under the License.
 
 Any member can host an embedded JMX Manager, which provides a federated view of all MBeans for the distributed system. The member can be configured to be a manager at startup or anytime during its life by invoking the appropriate API calls on the ManagementService.
 
-You need to have a JMX Manager started in your distributed system in order to use Geode management and monitoring tools such as [gfsh](../../tools_modules/gfsh/chapter_overview.html) and [Geode Pulse](../../tools_modules/pulse/chapter_overview.html).
+You need to have a JMX Manager started in your distributed system in order to use Geode management and monitoring tools such as [gfsh](../../tools_modules/gfsh/chapter_overview.html) and [Geode Pulse](../../tools_modules/pulse/pulse-overview.html).
 
 **Note:**
 Each node that acts as the JMX Manager has additional memory requirements depending on the number of resources that it is managing and monitoring. Being a JMX Manager can increase the memory footprint of any process, including locator processes. See [Memory Requirements for Cached Data](../../reference/topics/memory_requirements_for_cache_data.html#calculating_memory_requirements) for more information on calculating memory overhead on your Geode processes.

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/managing/management/management_system_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/management_system_overview.html.md.erb b/geode-docs/managing/management/management_system_overview.html.md.erb
index ecf69f5..13a3de0 100644
--- a/geode-docs/managing/management/management_system_overview.html.md.erb
+++ b/geode-docs/managing/management/management_system_overview.html.md.erb
@@ -105,8 +105,8 @@ You can also execute gfsh commands using the ManagementService API. See [Executi
 This section lists the currently available tools for managing and monitoring Geode:
 
 -   **gfsh**. Apache Geode command-line interface that provides a simple & powerful command shell that supports the administration, debugging and deployment of Geode applications. It features context sensitive help, scripting and the ability to invoke any commands from within the application using a simple API. See [gfsh](../../tools_modules/gfsh/chapter_overview.html).
--   **Geode Pulse**. Easy-to-use, browser-based dashboard for monitoring Geode deployments. Geode Pulse provides an integrated view of all Geode members within a distributed system. See [Geode Pulse](../../tools_modules/pulse/chapter_overview.html).
--   **Pulse Data Browser**. This Geode Pulse utility provides a graphical interface for performing OQL ad-hoc queries in a Geode distributed system. See [Data Browser](../../tools_modules/pulse/quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser).
+-   **Geode Pulse**. Easy-to-use, browser-based dashboard for monitoring Geode deployments. Geode Pulse provides an integrated view of all Geode members within a distributed system. See [Geode Pulse](../../tools_modules/pulse/pulse-overview.html).
+-   **Pulse Data Browser**. This Geode Pulse utility provides a graphical interface for performing OQL ad-hoc queries in a Geode distributed system. See [Data Browser](../../tools_modules/pulse/pulse-views.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser).
 -   **Other Java Monitoring Tools such as JConsole and jvisualvm.** JConsole is a JMX-based management and monitoring tool provided in the Java 2 Platform that provides information on the performance and consumption of resources by Java applications. See [http://docs.oracle.com/javase/6/docs/technotes/guides/management/jconsole.html](http://docs.oracle.com/javase/6/docs/technotes/guides/management/jconsole.html). **Java VisualVM (jvisualvm)** is a profiling tool for analyzing your Java Virtual Machine. Java VisualVM is useful to Java application developers to troubleshoot applications and to monitor and improve the applications' performance. Java VisualVM can allow developers to generate and analyse heap dumps, track down memory leaks, perform and monitor garbage collection, and perform lightweight memory and CPU profiling. For more details on using jvisualvm, see [http://docs.oracle.com/javase/6/docs/technotes/tools/share/jvisualvm.html](http://docs.oracle.com/javase/6/docs/technot
 es/tools/share/jvisualvm.html).
 
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/managing/management/mm_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/mm_overview.html.md.erb b/geode-docs/managing/management/mm_overview.html.md.erb
index 89a8df5..21967cb 100644
--- a/geode-docs/managing/management/mm_overview.html.md.erb
+++ b/geode-docs/managing/management/mm_overview.html.md.erb
@@ -85,7 +85,7 @@ Geode Pulse is a Web Application that provides a graphical dashboard for monitor
 
 Use Pulse to examine total memory, CPU, and disk space used by members, uptime statistics, client connections, and critical notifications. Pulse communicates with a Geode JMX manager to provide a complete view of your Geode deployment.
 
-See [Geode Pulse](../../tools_modules/pulse/chapter_overview.html).
+See [Geode Pulse](../../tools_modules/pulse/pulse-overview.html).
 
 ## JConsole
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/book_intro.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/book_intro.html.md.erb b/geode-docs/tools_modules/book_intro.html.md.erb
index e7390c5..2bf0930 100644
--- a/geode-docs/tools_modules/book_intro.html.md.erb
+++ b/geode-docs/tools_modules/book_intro.html.md.erb
@@ -35,7 +35,7 @@ limitations under the License.
 
     The Apache Geode HTTP Session Management modules provide fast, scalable, and reliable session replication for HTTP servers without requiring application changes.
 
--   **[Geode Pulse](pulse/chapter_overview.html)**
+-   **[Geode Pulse](pulse/pulse-overview.html)**
 
     Geode Pulse is a Web Application that provides a graphical dashboard for monitoring vital, real-time health and performance of Geode clusters, members, and regions.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/gfsh/command-pages/start.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/gfsh/command-pages/start.html.md.erb b/geode-docs/tools_modules/gfsh/command-pages/start.html.md.erb
index e2a4edc..0bec322 100644
--- a/geode-docs/tools_modules/gfsh/command-pages/start.html.md.erb
+++ b/geode-docs/tools_modules/gfsh/command-pages/start.html.md.erb
@@ -458,7 +458,7 @@ Cluster configuration service is up and running.
 
 Launch the Geode Pulse monitoring dashboard tool in the user's default system browser and navigates the user to the landing page (login page).
 
-For more information on Geode Pulse, see [Geode Pulse](../../pulse/chapter_overview.html).
+For more information on Geode Pulse, see [Geode Pulse](../../pulse/pulse-overview.html).
 
 **Availability:** Online or offline.
 
@@ -483,7 +483,7 @@ start pulse
 start pulse --url=http://gemfire.example.com:7070/pulse
 ```
 
-**Sample Output:** See [Geode Pulse](../../pulse/chapter_overview.html) for examples of Pulse.
+**Sample Output:** See [Geode Pulse](../../pulse/pulse-overview.html) for examples of Pulse.
 
 ## <a id="topic_3764EE2DB18B4AE4A625E0354471738A" class="no-quick-link"></a>start server
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/lucene_integration.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/lucene_integration.html.md.erb b/geode-docs/tools_modules/lucene_integration.html.md.erb
index b83705b..7f5afdc 100644
--- a/geode-docs/tools_modules/lucene_integration.html.md.erb
+++ b/geode-docs/tools_modules/lucene_integration.html.md.erb
@@ -1,8 +1,6 @@
 ---
 title: Apache Lucene&reg; Integration
 ---
-<a id="topic_523F6DE33FE54307BBE8F83BB7D9355D"></a>
-
 <!--
 Licensed to the Apache Software Foundation (ASF) under one or more
 contributor license agreements.  See the NOTICE file distributed with

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/pulse/chapter_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/chapter_overview.html.md.erb b/geode-docs/tools_modules/pulse/chapter_overview.html.md.erb
deleted file mode 100644
index 19de8f1..0000000
--- a/geode-docs/tools_modules/pulse/chapter_overview.html.md.erb
+++ /dev/null
@@ -1,49 +0,0 @@
----
-title:  Geode Pulse
----
-
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-Geode Pulse is a Web Application that provides a graphical dashboard for monitoring vital, real-time health and performance of Geode clusters, members, and regions.
-
-Use Pulse to examine total memory, CPU, and disk space used by members, uptime statistics, client connections, WAN connections, and critical notifications. Pulse communicates with a Geode JMX manager to provide a complete view of your Geode deployment. You can drill down from a high-level cluster view to examine individual members and even regions within a member, to filter the type of information and level of detail.
-
-By default, Geode Pulse runs in an embedded container within a Geode JMX manager node. You can optionally deploy Pulse to a Web application server of your choice, so that the tool runs independently of your Geode clusters. Hosting Pulse on an application server also enables you to use SSL for accessing the application.
-
--   **[Pulse System Requirements](system_requirements.html)**
-
-    Verify that your system meets the installation and runtime requirements for GemFire Pulse.
-
-
--   **[Pulse Quick Start (Embedded Mode)](quickstart.html#topic_523F6DE33FE54307BBE8F83BB7D9355D)**
-
-    Use Pulse in embedded mode to monitor a Geode deployment directly from a Geode JMX Manager. By default, the embedded Pulse application connects to the local JMX Manager that hosts the Pulse application. Optionally, configure Pulse to connect to a Geode system of your choice.
-
--   **[Hosting Pulse on a Web Application Server](quickstart.html#topic_795C97B46B9843528961A094EE520782)**
-
-    Host Pulse on a dedicated Web application server to make the Pulse application available at a consistent address, or to use SSL for accessing the Pulse application. When you host Pulse in this way, you also configure Pulse to connect to a specific locator or JMX Manager node for monitoring.
-
--   **[Configuring Pulse Authentication](quickstart.html#topic_AC9FFAA6FB044279BAED7A3E099E07AC)**
-
-    Pulse requires all users to authenticate themselves before they can use the Pulse Web application. If you have configured JMX authentication on the Geode JMX Manager node, the Pulse Web application itself may also need to authenticate itself to the Geode JMX Manager node on startup.
-
--   **[Using Pulse Views](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404)**
-
-    Pulse provides a variety of different views to help you monitor Geode clusters, members, and regions.
-
-

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/pulse/pulse-auth.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/pulse-auth.html.md.erb b/geode-docs/tools_modules/pulse/pulse-auth.html.md.erb
new file mode 100644
index 0000000..d834592
--- /dev/null
+++ b/geode-docs/tools_modules/pulse/pulse-auth.html.md.erb
@@ -0,0 +1,63 @@
+---
+title: Configuring Pulse Authentication
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+Pulse requires all users to authenticate themselves before they can use the Pulse Web application.
+
+If you run Pulse in embedded mode, the Pulse application runs on the JMX Manager node and no JMX authentication is required. You do not need to specify valid JMX credentials to start an embedded Pulse application.
+
+If you host Pulse on a Web Application server (non-embedded mode) and you configure JMX authentication on the Geode manager node, then the Pulse Web application must authenticate itself with the manager node when it starts. Specify the credentials of a valid JMX user account in the `pulse.properties` file, as described in [Hosting Pulse on a Web Application Server](pulse-hosted.html).
+
+**Note:**
+The credentials that you specify must have both read and write privileges in the JMX Manager node. See [Configuring a JMX Manager](../../managing/management/jmx_manager_operations.html#topic_263072624B8D4CDBAD18B82E07AA44B6).
+
+# Configuring Pulse to Use HTTPS
+
+You can configure Pulse to use HTTPS in either embedded or non-embedded mode.
+
+In non-embedded mode where you are running Pulse on a standalone Web application server, you must use the Web server's SSL configuration to make the HTTP requests secure.
+
+In embedded mode, Geode uses an embedded Jetty server to host the
+Pulse Web application. To make the embedded server use HTTPS, you must
+enable the `http` SSL component in
+`gemfire.properties` or `gfsecurity-properties`.
+See [SSL](../../managing/security/ssl_overview.html) for details on configuring these parameters.
+
+These SSL parameters apply to all HTTP services hosted on the JMX Manager, which includes the following:
+
+-   Developer REST API service
+-   Management REST API service (for remote cluster management)
+-   Pulse monitoring tool
+
+When the `http` SSL component is enabled, all HTTP services become
+SSL-enabled and you must configure your client applications
+accordingly. For SSL-enabled Pulse, you will need to configure your
+browsers with proper certificates.
+
+If a JMX manager or locator is configured to use SSL, you can configure Pulse to connect to these
+processes. Create a file named `pulsesecurity.properties` and save it somewhere in the classpath of
+your Web application server. Include standard Java SSL properties, such as:
+
+```
+javax.net.ssl.keyStore={KeyStorePath}
+javax.net.ssl.keyStorePassword={KeyStorePassword}
+javax.net.ssl.trustStore={TrustStorePath}
+javax.net.ssl.trustStorePassword={TrustStorePassword}
+```

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/pulse/pulse-embedded.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/pulse-embedded.html.md.erb b/geode-docs/tools_modules/pulse/pulse-embedded.html.md.erb
new file mode 100644
index 0000000..955e554
--- /dev/null
+++ b/geode-docs/tools_modules/pulse/pulse-embedded.html.md.erb
@@ -0,0 +1,76 @@
+---
+title: Running Pulse in Embedded Mode (Quick Start)
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+Use Pulse in embedded mode to monitor a Geode deployment directly from a Geode JMX Manager. By
+default, the embedded Pulse application connects to the local JMX Manager that hosts the Pulse
+application. Optionally, configure Pulse to connect to a Geode system of your choice.
+
+To run Pulse in embedded mode:
+
+1.  Configure a Geode member to run as a JMX Manager node, specifying the HTTP port on which you
+will access the Pulse Web application (port 7070 by default). For example, the following command
+starts a Geode locator as a JMX Manager node, using the default HTTP port 7070 for the Pulse
+application:
+
+    ``` pre
+    gfsh
+    gfsh> start locator --name=loc1
+    ```
+
+    **Note:**
+    Geode locators become JMX Manager nodes by default. To start a non-locator member as a JMX
+    Manager node, include the `--J=-Dgemfire.jmx-manager=true` option. To specify a non-default port
+    number for the HTTP service that hosts the Pulse application, include the
+    `--J=-Dgemfire.http-service-port=port_number` option when starting the JMX Manager node.
+
+    When the JMX Manager node boots, it starts an embedded Jetty instance and deploys the Pulse Web
+    application at the specified or default HTTP port or 7070 by default.
+
+    `gfsh` automatically connects to the manager when you start it in this way. If you already
+    started a manager process earlier, use the `connect` command in `gfsh` to connect to that
+    process.
+
+2.  Access the embedded Pulse application from a Web browser. If you are connected to the Geode
+cluster using gfsh, use the `start pulse` command to load the correct URL in your browser:
+
+    ``` pre
+    gfsh> start pulse
+    ```
+
+    Or, enter the URL http://*address*:*http-service-port*/pulse directly in your Web browser,
+    substituting the address and HTTP port of the manager. For example, you access Pulse on the
+    local locator machine from Step 1 at the URL http://localhost:7070/pulse.
+
+3.  If you have configured authentication for the Pulse application, enter the username and password
+of a valid Pulse account in the login screen. Otherwise, enter the default "admin" in both
+fields. Click **Sign In** to continue.
+
+    See [Configuring Pulse Authentication](pulse-auth.html).
+
+4.  After you log in, Pulse displays the main cluster view for the local distributed system. See
+[Using Pulse Views](pulse-views.html).
+
+**Note:**
+When running in embedded mode, the Pulse application connects only to the JMX Manager running in the
+locator or member that hosts Pulse. This enables you to monitor all members of that distributed
+system. You can also view (but not monitor) connected WAN clusters, and can view gateway senders and
+receivers that are configured in the local cluster.
+

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb b/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb
new file mode 100644
index 0000000..ceed530
--- /dev/null
+++ b/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb
@@ -0,0 +1,89 @@
+---
+title: Hosting Pulse on a Web Application Server
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+Host Pulse on a dedicated Web application server to make the Pulse application available at a consistent address, or to use SSL for accessing the Pulse application. When you host Pulse in this way, you also configure Pulse to connect to a specific locator or JMX Manager node for monitoring.
+
+To host Pulse on a Web application server:
+
+1.  Set the `http-service-port` property to zero (`-Dgemfire.http-service-port=0`) when you start your Geode JMX Manager nodes. Setting this property to zero disables the embedded Web server for hosting the Pulse application.
+2.  Create a `pulse.properties` file somewhere in the classpath of your Web application server. For example, if you are hosting Pulse on Tomcat, create the `pulse.properties` file in the `$TOMCAT_SERVER/lib` directory.
+
+3.  Define the following configuration properties in the `pulse.properties` file:
+
+    <table>
+    <colgroup>
+    <col width="50%" />
+    <col width="50%" />
+    </colgroup>
+    <thead>
+    <tr class="header">
+    <th>Property</th>
+    <th>Description</th>
+    </tr>
+    </thead>
+    <tbody>
+    <tr class="odd">
+    <td><code class="ph codeph">pulse.useLocator</code></td>
+    <td>Specify &quot;true&quot; to configure Pulse to connect to a Geode Locator member, or &quot;false&quot; to connect directly to a JMX Manager.
+    <p>When Pulse connects to a Geode locator, the locator provides the address and port of an available JMX Manager to use for monitoring the distributed system. In most production deployments, you should connect Pulse to a locator instance; this allows Pulse to provide monitoring services using any available JMX Manager.</p>
+    <p>If you specify &quot;false,&quot; Pulse connects directly to a specific JMX Manager. If this manager is not available, the Pulse connection fails, even if another JMX Manager is available in the distributed system.</p></td>
+    </tr>
+    <tr class="even">
+    <td><code class="ph codeph">pulse.host</code></td>
+    <td>Specify the DNS name or IP address of the Geode locator or JMX Manager machine to which Pulse should connect. You specify either a locator or JMX Manager address depending on how you configured the <code class="ph codeph">pulse.useLocator</code> property.</td>
+    </tr>
+    <tr class="odd">
+    <td><code class="ph codeph">pulse.port</code></td>
+    <td>Specify the port number of the Geode locator or the HTTP port number of the JMX Manager to which Pulse should connect. You specify either a locator or JMX Manager port depending on how you configured the <code class="ph codeph">pulse.useLocator</code> property.
+    <p>If you configured <code class="ph codeph">pulse.useLocator=false</code>, then <code class="ph codeph">pulse.port</code> must correspond to the <code class="ph codeph">http-service-port</code> setting of the JMX Manager.</p></td>
+    </tr>
+    </tbody>
+    </table>
+
+    For example, with this configuration Pulse connects to the locator at mylocator\[10334\] and accesses any available JMX Manager:
+
+    ``` pre
+    pulse.useLocator=true
+    pulse.host=locsrv.gemstone.com
+    pulse.port=10334
+    ```
+
+    With this configuration Pulse accesses only the JMX Manager instance at manager1\[8080\]:
+
+    ``` pre
+    pulse.useLocator=false
+    pulse.host=jmxsrv.gemstone.com
+    pulse.port=8080
+    ```
+
+4.  (Optional.) Configure authentication for the Pulse Web application using the instructions in [Configuring Pulse Authentication](pulse-auth.html).
+
+5.  Deploy the Pulse Web application to your application server. Geode installs the `pulse.war` file in the `tools/Pulse` subdirectory of your Geode installation directory. Depending on your application server, you may need to copy the `pulse.war` file to a deployment directory or use a configuration tool to deploy the file.
+6.  Access the Pulse application using the address, port, and application URL that you configure in your Web application server. For example, with Tomcat the default URL is http://*address*:8080/pulse. Your application server provides options for configuring the address, port, and application name; substitute the correct items to access the deployed Pulse application.
+
+    Pulse connects to the locator or JMX Manager that you configured in the `pulse.properties` file, authenticating using the credentials that you configured in the file.
+
+7.  If you have configured authentication for the Pulse application, enter the username and password of a valid Pulse account in the login screen. Otherwise, enter the default "admin" in both fields. Click **Sign In** to continue.
+
+    See [Configuring Pulse Authentication](pulse-auth.html).
+
+8.  After you log in, Pulse displays the main cluster view for the distributed system to which it has connected. See [Using Pulse Views](pulse-views.html).
+

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/pulse/pulse-overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/pulse-overview.html.md.erb b/geode-docs/tools_modules/pulse/pulse-overview.html.md.erb
new file mode 100644
index 0000000..ec723d2
--- /dev/null
+++ b/geode-docs/tools_modules/pulse/pulse-overview.html.md.erb
@@ -0,0 +1,49 @@
+---
+title:  Geode Pulse
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+Geode Pulse is a Web Application that provides a graphical dashboard for monitoring vital, real-time health and performance of Geode clusters, members, and regions.
+
+Use Pulse to examine total memory, CPU, and disk space used by members, uptime statistics, client connections, WAN connections, and critical notifications. Pulse communicates with a Geode JMX manager to provide a complete view of your Geode deployment. You can drill down from a high-level cluster view to examine individual members and even regions within a member, to filter the type of information and level of detail.
+
+By default, Geode Pulse runs in an embedded container within a Geode JMX manager node. You can optionally deploy Pulse to a Web application server of your choice, so that the tool runs independently of your Geode clusters. Hosting Pulse on an application server also enables you to use SSL for accessing the application.
+
+-   **[Pulse System Requirements](pulse-requirements.html)**
+
+    Verify that your system meets the installation and runtime requirements for GemFire Pulse.
+
+
+-   **[Running Pulse in Embedded Mode (Quick Start)](pulse-embedded.html)**
+
+    Use Pulse in embedded mode to monitor a Geode deployment directly from a Geode JMX Manager. By default, the embedded Pulse application connects to the local JMX Manager that hosts the Pulse application. Optionally, configure Pulse to connect to a Geode system of your choice.
+
+-   **[Hosting Pulse on a Web Application Server](pulse-hosted.html)**
+
+    Host Pulse on a dedicated Web application server to make the Pulse application available at a consistent address, or to use SSL for accessing the Pulse application. When you host Pulse in this way, you also configure Pulse to connect to a specific locator or JMX Manager node for monitoring.
+
+-   **[Configuring Pulse Authentication](pulse-auth.html)**
+
+    Pulse requires all users to authenticate themselves before they can use the Pulse Web application. If you have configured JMX authentication on the Geode JMX Manager node, the Pulse Web application itself may also need to authenticate itself to the Geode JMX Manager node on startup.
+
+-   **[Using Pulse Views](pulse-views.html)**
+
+    Pulse provides a variety of different views to help you monitor Geode clusters, members, and regions.
+
+

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/pulse/pulse-requirements.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/pulse-requirements.html.md.erb b/geode-docs/tools_modules/pulse/pulse-requirements.html.md.erb
new file mode 100644
index 0000000..1163983
--- /dev/null
+++ b/geode-docs/tools_modules/pulse/pulse-requirements.html.md.erb
@@ -0,0 +1,34 @@
+---
+title:  Pulse System Requirements
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+Verify that your system meets the installation and runtime requirements for Pulse.
+
+The Pulse Web application has been tested for compatibility with the following Web browsers:
+
+-   Internet Explorer 9.0.8112.16421
+-   Safari 5.1.7 for Windows
+-   Google Chrome 22.0.1229.79 m
+-   Mozilla Firefox 16.0.1
+
+Pulse has been tested for standalone deployment on Tomcat and Jetty.
+Pulse may work with other operating systems and browsers upon which it has not been tested.
+
+

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/pulse/pulse-views.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/pulse-views.html.md.erb b/geode-docs/tools_modules/pulse/pulse-views.html.md.erb
new file mode 100644
index 0000000..d3bb367
--- /dev/null
+++ b/geode-docs/tools_modules/pulse/pulse-views.html.md.erb
@@ -0,0 +1,453 @@
+---
+title: Using Pulse Views
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+Pulse provides a variety of different views to help you monitor Geode clusters, members, and regions.
+
+The following sections provide an overview of the main Pulse views:
+
+-   [Cluster View](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_9794B5754E474E10ABFBCD8B1DA240F8)
+-   [Member View](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_3629814A3DF64D31A190495782DB0DBF)
+-   [Region View](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_D151776BAC8B4704A71F37F8B5CE063D)
+-   [Data Browser](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser)
+-   [Alerts Widget](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_bfk_sc3_wn)
+
+# <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_9794B5754E474E10ABFBCD8B1DA240F8" class="no-quick-link"></a>Cluster View
+
+The cluster view is a high-level overview of the Geode distributed system. It is displayed immediately after you log into Pulse. Information displays around the perimeter of the cluster view show statistics such as memory usage, JVM pauses, and throughput. You can use the cluster view to drill down into details for individual members and regions in the distributed system.
+
+<img src="../../images/pulse_cluster_view.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_CC7B54903DF24030850E55965CDB6EC4" class="image imageleft" width="624" />
+
+Use these basic controls while in Cluster view:
+
+1.  Click Members or Data to display information about Geode members or data regions in the distributed system.
+2.  Click the display icons to display the Geode members using icon view, block view, or table view. Note that icon view is available only when displaying Members.
+
+    For example, the following shows Geode Members displayed in table view:
+
+    <img src="../../images/member_view_list.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_npw_sq3_wn" class="image" />
+    -   While in block view or table view, click the name of a Geode member to display additional information in the [Member View](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_3629814A3DF64D31A190495782DB0DBF).
+    -   Click Topology, Server Groups, or Redundancy Zones to filter the view based on all members in the topology, configured server groups, or configured redundancy zones.
+    The following shows Geode Regions displayed in table view:
+    <img src="../../images/pulse-region-detail.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_glp_1jr_54" class="image" />
+    -   While in block view or table view, click the name of a Geode region to display additional information in the [Region View](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_D151776BAC8B4704A71F37F8B5CE063D).
+
+3.  While in icon view, click a host machine icon to display the Geode members on that machine.
+4.  In the Alerts pane, click the severity tabs to filter the message display by the level of severity.
+
+**Cluster View Screen Components**
+
+The following table describes the data pieces displayed on the Cluster View screen.
+
+<table>
+<colgroup>
+<col width="50%" />
+<col width="50%" />
+</colgroup>
+<thead>
+<tr class="header">
+<th>Screen Component</th>
+<th>Description</th>
+</tr>
+</thead>
+<tbody>
+<tr class="odd">
+<td><strong>Cluster Status</strong></td>
+<td>Overall status of the distributed system being monitored. Possible statuses include Normal, Warning, or Severe.</td>
+</tr>
+<tr class="even">
+<td>Total Heap</td>
+<td>Total amount of memory (in GB) allocated to the Java heap across all members.</td>
+</tr>
+<tr class="odd">
+<td>Members</td>
+<td>Total number of members in the cluster.</td>
+</tr>
+<tr class="even">
+<td>Servers</td>
+<td>Total number of servers in the cluster.</td>
+</tr>
+<tr class="odd">
+<td>Clients</td>
+<td>Total number of clients in the cluster.</td>
+</tr>
+<tr class="even">
+<td>Locators</td>
+<td>Total number of locators in the cluster.</td>
+</tr>
+<tr class="odd">
+<td>Regions</td>
+<td>Total number of regions in the cluster.</td>
+</tr>
+<tr class="even">
+<td>Functions</td>
+<td>Total number of functions registered in the cluster.</td>
+</tr>
+<tr class="odd">
+<td>Unique CQs</td>
+<td>Total number of unique CQs. Corresponds to the UNIQUE _CQ_QUERY statistic.</td>
+</tr>
+<tr class="even">
+<td>Subscriptions</td>
+<td>Total number of client event subscriptions.</td>
+</tr>
+<tr class="odd">
+<td><strong>Cluster Members</strong></td>
+<td>Graphical, block, or table view of the members in the cluster.</td>
+</tr>
+<tr class="even">
+<td>Topology</td>
+<td>Organizes cluster members by DistributedMember Id.</td>
+</tr>
+<tr class="odd">
+<td>Server Groups</td>
+<td>Organizes cluster members by server group membership. If no server groups are configured, all members appear under the &quot;Default&quot; server group.</td>
+</tr>
+<tr class="even">
+<td>Redundancy Zones</td>
+<td>Organizes cluster members by redundancy zones. If no redundancy zones are configured, all members appear under the &quot;Default&quot; zone.</td>
+</tr>
+<tr class="odd">
+<td>Host Machine</td>
+<td>When you mouse over a machine icon in Topology View, a pop-up appears with the following machine statistics:
+<ul>
+<li><em>CPU Usage</em>. Percentage of CPU being used by Geode processes on the machine.</li>
+<li><em>Memory Usage</em>. Amount of memory (in MB) being used by Geode processes.</li>
+<li><em>Load Avg</em>. Average number of threads on the host machine that are in the run queue or are waiting for disk I/O over the last minutes. Corresponds to the Linux System statistic loadAverage1. If the load average is not available, a negative value is shown.</li>
+<li><em>Sockets</em>. Number of sockets currently open on the machine.</li>
+</ul></td>
+</tr>
+<tr class="even">
+<td>Member</td>
+<td>When you mouse over a member icon in Graphical View, a pop-up appears with the following member statistics:
+<ul>
+<li><em>CPU Usage</em>. Percentage of CPU being used by the Geode member process.</li>
+<li><em>Threads</em>. Number of threads running on the member.</li>
+<li><em>JVM Pauses</em>. Number of times the JVM used by the member process has paused due to garbage collection or excessive CPU usage.</li>
+<li><em>Regions</em>. Number of regions hosted on the member process.</li>
+<li><em>Clients</em>. Number of client currently connected to the member process.</li>
+<li><em>Gateway Sender</em>. Number of gateway senders configured on the member.</li>
+<li><em>Port</em>. Server port of the cache server member where clients can connect and perform cache operations.</li>
+<li><em>GemFire Version</em>. The version of the Geode member.</li>
+</ul></td>
+</tr>
+<tr class="odd">
+<td>Member</td>
+<td>In List View, the following data fields are displayed for each member:
+<ul>
+<li><em>ID</em>. DistributedMember Id of the member.</li>
+<li><em>Name</em>. Name of the member.</li>
+<li><em>Host</em>. Hostname or IP address where the member is running.</li>
+<li><em>Heap Usage</em>. Amount of JVM heap memory being used by the member process.</li>
+<li><em>CPU Usage</em>. Percentage of CPU being used by the Geode member process.</li>
+<li><em>Uptime</em>. How long the member has been up and running.</li>
+<li><em>Clients</em>. Number of clients currently connected to the member. It will have a value only if the member acts as a CacheServer.</li>
+</ul></td>
+</tr>
+<tr class="even">
+<td><strong>Key Statistics</strong></td>
+<td>Displays a few key performance measurements of the distributed system (over the last 15 minutes).</td>
+</tr>
+<tr class="odd">
+<td>Write/Sec</td>
+<td>Number of write operations per second that have occurred across the cluster. Each put/putAll operation counts as a write; for example, a putAll of 50 entries is counted as one write.</td>
+</tr>
+<tr class="even">
+<td>Read/Sec</td>
+<td>Number of read operations per second that have occurred across the cluster.</td>
+</tr>
+<tr class="odd">
+<td>Queries/Sec</td>
+<td>Number of queries per second that have been executed across the cluster.</td>
+</tr>
+<tr class="even">
+<td><strong>No. of JVM Pauses</strong></td>
+<td>Number of times the JVM has paused during the last five minutes to perform garbage collection.</td>
+</tr>
+<tr class="odd">
+<td><strong>WAN Information</strong></td>
+<td>If you have configured gateway senders or receivers for a multi-site (WAN) deployment, this box displays whether the remote cluster is reachable (working connectivity represented by a green triangle).</td>
+</tr>
+<tr class="even">
+<td><strong>Disk Throughput</strong></td>
+<td>Total disk throughput for all disks in cluster.</td>
+</tr>
+<tr class="odd">
+<td><strong>Alerts View</strong></td>
+<td>Displays alerts for the cluster.</td>
+</tr>
+</tbody>
+</table>
+
+# <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_3629814A3DF64D31A190495782DB0DBF" class="no-quick-link"></a>Member View
+
+When you select an individual Geode member in Cluster View, Pulse displays the regions available on that member, as well as member-specific information such as the configured listen ports.
+
+<img src="../../images/pulse_member_view.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_EDBD3D333B2741DCAA5CB94719B507B7" class="image imageleft" width="624" />
+
+Use these basic controls while in Member View:
+
+1.  Click the display icons to display regions using block view or table view.
+2.  Use the drop down menu to select a specific member or search for specific members by name.
+3.  Click **Cluster View** to return to Cluster View. See [Cluster View](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_9794B5754E474E10ABFBCD8B1DA240F8).
+4.  Click **Data Browser** to query region data. See [Data Browser](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser).
+
+**Member View Screen Components**
+
+The following table describes the data elements displayed on the Member View screen.
+
+<table>
+<colgroup>
+<col width="50%" />
+<col width="50%" />
+</colgroup>
+<thead>
+<tr class="header">
+<th>Screen Component</th>
+<th>Description</th>
+</tr>
+</thead>
+<tbody>
+<tr class="odd">
+<td><strong>Member Status</strong></td>
+<td>Overall status of the member being monitored. Possible statuses include Normal, Warning, or Severe.</td>
+</tr>
+<tr class="even">
+<td>Regions</td>
+<td>Total number of regions hosted on the member.</td>
+</tr>
+<tr class="odd">
+<td>Threads</td>
+<td>Total number of threads being executed on the member.</td>
+</tr>
+<tr class="even">
+<td>Sockets</td>
+<td>Total number of sockets currently open on the member.</td>
+</tr>
+<tr class="odd">
+<td>Load Avg.</td>
+<td>Average number of threads on the member that are in the run queue or are waiting for disk I/O over the last minute. Corresponds to the Linux System statistic loadAverage1. If the load average is not available, a negative value is shown.</td>
+</tr>
+<tr class="even">
+<td>Clients</td>
+<td>Current number of client connections to the member.</td>
+</tr>
+<tr class="odd">
+<td><strong>Member Regions</strong></td>
+<td>Block or table view of the regions hosted on the member.</td>
+</tr>
+<tr class="even">
+<td>Regions</td>
+<td>When you mouse over a region in block view, a pop-up appears with the following data fields:
+<ul>
+<li><em>Name</em>. Region name.</li>
+<li><em>Type</em>. For example, REPLICATE, PARTITION.</li>
+<li><em>EntryCount</em>. Number of entries in the region.</li>
+<li><em>EntrySize</em>. The aggregate entry size (in bytes) of all entries. For replicated regions this field will only provide a value if the eviction algorithm has been set to EvictionAlgorithm#LRU_ MEMORY. All partition regions will have this value. However, the value includes redundant entries and will also count the size of all the secondary entries on the node.</li>
+</ul></td>
+</tr>
+<tr class="odd">
+<td>Regions</td>
+<td>In table view, the following fields are listed for each region:
+<ul>
+<li><em>Name</em>. Region name.</li>
+<li><em>Type</em>. For example, REPLICATE, PARTITION.</li>
+<li><em>EntryCount</em>. Number of entries in the region.</li>
+<li><em>EntrySize</em>. The aggregate entry size (in bytes) of all entries. For replicated regions this field will only provide a value if the eviction algorithm has been set to EvictionAlgorithm#LRU_ MEMORY. All partition regions will have this value. However, the value includes redundant entries and will also count the size of all the secondary entries on the node.</li>
+<li><em>Scope</em>. Scope configured for the region.</li>
+<li><em>Disk Store Name</em>. Name of disk stores (if any) associated with the region.</li>
+<li><em>Disk Synchronous</em>. True if writes to disk are set to synchronous and false if not. This field reflects the configured disk-synchronous region attribute.</li>
+<li><em>Gateway Enabled</em>. Whether gateway sender and receiver configurations have been defined on members hosting this region.</li>
+</ul></td>
+</tr>
+<tr class="even">
+<td><strong>Member Clients</strong></td>
+<td>In table view, the following fields are listed for each client:
+<ul>
+<li><em>Id</em>. DistributedMember ID of the client process.</li>
+<li><em>Name</em>. Name of the client process.</li>
+<li><em>Host</em>. Hostname or IP address of the client process.</li>
+<li><em>Connected</em>. Whether the client process is currently connected to the member.</li>
+<li><em>Queue Size</em>. The size of the queue used by server to send events in case of a subscription enabled client or a client that has continuous queries running on the server.</li>
+<li><em>CPU Usage</em>. Percentage of CPU being used by the client process.</li>
+<li><em>Uptime</em>. Amount of time the client process has been running.</li>
+<li><em>Threads</em>. Threads being used by the member clients</li>
+<li><em>Gets</em>. Total number of successful get requests completed.</li>
+<li><em>Puts</em>. Total number of successful put requests completed.</li>
+</ul></td>
+</tr>
+<tr class="odd">
+<td><strong>Key Statistics</strong></td>
+<td>Displays a few key performance measurements for the member (over the last 15 minutes).</td>
+</tr>
+<tr class="even">
+<td>% CPU Usage</td>
+<td>Percentage of CPU used by the member.</td>
+</tr>
+<tr class="odd">
+<td>Read/Sec</td>
+<td>Number of read operations per second that have occurred on the member.</td>
+</tr>
+<tr class="even">
+<td>Write/Sec</td>
+<td>Number of write operations per second that have occurred on the member. Each put/putAll operation counts as a write; for example, a putAll of 50 entries is counted as one write.</td>
+</tr>
+<tr class="odd">
+<td><strong>Memory Usage</strong></td>
+<td>Total memory used on the member in MB.</td>
+</tr>
+<tr class="even">
+<td><strong>No. of JVM Pauses</strong></td>
+<td>Number of times the JVM has paused during the last five minutes due to garbage collection or excessive CPU usage.</td>
+</tr>
+<tr class="odd">
+<td><strong>WAN Information</strong></td>
+<td>Displays cluster information. This dialog box only appears if you have configured WAN functionality (gateway senders and gateway receivers).</td>
+</tr>
+<tr class="even">
+<td><strong>Disk Throughput</strong></td>
+<td>Rate of disk writes on the member.</td>
+</tr>
+</tbody>
+</table>
+
+# <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_D151776BAC8B4704A71F37F8B5CE063D" class="no-quick-link"></a>Region View
+
+The Pulse Region View provides a comprehensive overview of all regions in the Geode distributed system:
+
+<img src="../../images/pulse_data_view.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_A533852E38654E79BE5628E938E170EB" class="image imageleft" width="624" />
+
+Use these basic controls while in Region View:
+
+1.  Click the display icons to display all members that host the region using block view or table view.
+
+    (Click the name of a member to change to that member's [Member View](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_3629814A3DF64D31A190495782DB0DBF).)
+
+2.  Search for specific members that host the current region.
+3.  Hover over a member name to display information such as the region entry count, entry size, and throughput on that member.
+4.  Click [Cluster View](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_9794B5754E474E10ABFBCD8B1DA240F8) or [Data Browser](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser) to go to those screens.
+
+**Region View Screen Components**
+
+The following table describes the data elements displayed on the Region View screen.
+
+<table>
+<colgroup>
+<col width="50%" />
+<col width="50%" />
+</colgroup>
+<thead>
+<tr class="header">
+<th>Screen Component</th>
+<th>Description</th>
+</tr>
+</thead>
+<tbody>
+<tr class="odd">
+<td><strong>Region Members</strong></td>
+<td>Lists information about Geode members that host the region, either in block view or table view.</td>
+</tr>
+<tr class="even">
+<td>Region Member (Detail View)</td>
+<td>When you hover over a region member in block view, a pop-up appears with the following data fields:
+<ul>
+<li><em>Member Name</em>. The name of the Geode member hosting the region.</li>
+<li><em>EntryCount</em>. Number of entries for the region on that member.</li>
+<li><em>EntrySize</em>. The aggregate entry size (in bytes) of all entries on that member. For replicated regions this field will only provide a value if the eviction algorithm has been set to EvictionAlgorithm#LRU_ MEMORY. All partition regions will have this value. However, the value includes redundant entries and will also count the size of all the secondary entries on the node.</li>
+<li><em>Accessor</em>. Indicates whether the member is an accessor member.</li>
+<li><em>Reads/Writes</em>. Summary of reads and writes served from memory and from disk stores over the last 15 minutes.</li>
+</ul></td>
+</tr>
+<tr class="odd">
+<td>Region Member (Table View)</td>
+<td>In table view, the following fields are listed for each region member:
+<ul>
+<li><em>ID</em>. The unique member ID.</li>
+<li><em>Name</em>. Region name.</li>
+<li><em>Host</em>. Member hostname.</li>
+<li><em>Heap Usage</em>. The total amount of heap used on the member in MB.</li>
+<li><em>CPU Usage</em>. CPU usage as a percent of available CPU.</li>
+<li><em>Uptime</em>. The amount of time elapsed since the member started.</li>
+<li><em>Accessor</em>. Indicates whether the member is an accessor member.</li>
+</ul></td>
+</tr>
+<tr class="even">
+<td><strong>Region Detail</strong></td>
+<td>When you have selected a region, the right hand pane displays the following information about the region:
+<ul>
+<li><em>Name</em>. Name of the region.</li>
+<li><em>Region Path</em>. Path for the region.</li>
+<li><em>Type</em>. For example, REPLICATE, PARTITION</li>
+<li><em>Members</em>. Number of members that are hosting the region.</li>
+<li><em>Empty Nodes</em>. Nodes where the region DataPolicy is defined as EMPTY or where LocalMaxMemory is set to 0.</li>
+<li><em>Entry Count</em>. Total number of entries in the region.</li>
+<li><em>Disk Usage</em>. Persistent data usage.</li>
+<li><em>Persistence</em>. Indicates whether the region's data is persisted to disk.</li>
+<li><em>Memory Usage</em>. The amount of memory used and total available memory (also shown as a percentage).</li>
+<li><em>Reads/Writes</em>. Summary of reads and writes served from memory and from disk stores over the last 15 minutes.</li>
+</ul></td>
+</tr>
+</tbody>
+</table>
+
+# <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser" class="no-quick-link"></a>Data Browser
+
+The Pulse Data Browser enables you to query region data. Note that there are two key attributes available on DistributedSystemMXBean (see [List of Geode JMX MBeans](../../managing/management/list_of_mbeans.html#topic_4BCF867697C3456D96066BAD7F39FC8B)) that you can use to configure limits for the result sets displayed in Data Browser:
+
+-   `QueryResultSetLimit` limits the number of rows that Data Browser queries return. 1000 rows are displayed by default.
+-   `QueryCollectionsDepth` limits the number of elements of a collection that Data Browser queries return. This attribute applies to query results contain collections such as Map, List, and so forth. The default value is 100 elements.
+
+See the `org.apache.geode.management.DistributedSystemMXBean` JavaDocs for information on available MBean methods and attributes.
+
+The following shows an example Data Browser view:
+
+<img src="../../images/pulse-data-browser.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_hhp_dz2_44" class="image imageleft" width="624" />
+
+Use these basic controls while in Data Browser view:
+
+1.  Search for the name of a specific region.
+2.  Select one or more regions to display the Geode members that host those regions. The hosting Geode members appear in the Region Members section.
+3.  Select one or more members from the Region Members section to restrict query results to those members.
+4.  Type in the text of a query to execute. See [Querying](../../developing/querying_basics/chapter_overview.html).
+5.  Display a list of previously-executed queries. Double-click on a query from the history list to copy it to the Query Editor, or delete the query from your history.
+6.  Execute your query or clear the contents of the Query Editor.
+7.  View the current query results.
+8.  Export the query results to a text file.
+9.  Return to [Cluster View](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_9794B5754E474E10ABFBCD8B1DA240F8).
+
+# <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_bfk_sc3_wn" class="no-quick-link"></a>Alerts Widget
+
+The Alerts Widget appears in the right portion of the screen and displays a list of alerts.
+
+The alerts displayed for the cluster appear based on the alertLevel field set in the DistributedSystemMXBean. By default, log messages with the level of SEVERE are shown as alerts. You can modify the level by using the `DistributedMXBean.changeAlertLevel` method. See [System Alert Notifications](../../managing/management/notification_federation_and_alerts.html#topic_212EE5A2ABAB4E8E8EF71807C9ECEF1A__section_7463D13112D54406953416356835E290) for more information.
+
+<img src="../../images/pulse_alerts_widget.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_jrc_smt_qn" class="image" />
+
+Use these basic controls in the Alerts Widget:
+
+1.  Select an alert level to view only alerts with a specific severity.
+2.  Enter text in the search box to filter the list of alerts.
+3.  Select an alert and click Clear to remove it from the alert list.
+4.  Click **Clear All** to remove all alerts from the widget.
+5.  Double-click an alert to open a pop-up window that displays the full text of the alert message.
+6.  Click the check mark in an alert pop-up window to acknowledge the alert. Acknowledged alerts display a check mark in the list of alerts.
+7.  Triple-click the alert in the pop-up or in the alert list to select the message text. You can then copy and paste the text into another application.
+8.  Click the **X** to close the pop-up alert window.
+


[03/43] geode git commit: Undoing spark connector changes related to geode 1.2

Posted by kl...@apache.org.
Undoing spark connector changes related to geode 1.2

The spark connector builds against geode 1.0.0-incubating. The spark
connector was no longer compiling with these changes.


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/0dae918d
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/0dae918d
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/0dae918d

Branch: refs/heads/feature/GEODE-2632-17
Commit: 0dae918df3b4c7bc53abdbf57c92dddba8e814f2
Parents: e79d27d
Author: Lynn Hughes-Godfrey <lh...@pivotal.io>
Authored: Wed May 24 15:31:09 2017 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Thu May 25 10:36:36 2017 -0700

----------------------------------------------------------------------
 .../geodefunctions/RetrieveRegionFunction.java  | 20 ++++++++++----------
 .../internal/DefaultGeodeConnection.scala       |  4 ++--
 2 files changed, 12 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/0dae918d/geode-spark-connector/geode-functions/src/main/java/org/apache/geode/spark/connector/internal/geodefunctions/RetrieveRegionFunction.java
----------------------------------------------------------------------
diff --git a/geode-spark-connector/geode-functions/src/main/java/org/apache/geode/spark/connector/internal/geodefunctions/RetrieveRegionFunction.java b/geode-spark-connector/geode-functions/src/main/java/org/apache/geode/spark/connector/internal/geodefunctions/RetrieveRegionFunction.java
index 7407cc8..096e4d5 100644
--- a/geode-spark-connector/geode-functions/src/main/java/org/apache/geode/spark/connector/internal/geodefunctions/RetrieveRegionFunction.java
+++ b/geode-spark-connector/geode-functions/src/main/java/org/apache/geode/spark/connector/internal/geodefunctions/RetrieveRegionFunction.java
@@ -16,24 +16,25 @@
  */
 package org.apache.geode.spark.connector.internal.geodefunctions;
 
+import java.util.Iterator;
+import org.apache.logging.log4j.Logger;
+
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheFactory;
-import org.apache.geode.cache.Region;
-import org.apache.geode.cache.execute.Function;
-import org.apache.geode.cache.execute.FunctionContext;
 import org.apache.geode.cache.execute.FunctionException;
-import org.apache.geode.cache.partition.PartitionRegionHelper;
 import org.apache.geode.cache.query.Query;
 import org.apache.geode.cache.query.QueryService;
 import org.apache.geode.cache.query.SelectResults;
 import org.apache.geode.cache.query.Struct;
+import org.apache.geode.internal.cache.*;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.execute.Function;
+import org.apache.geode.cache.execute.FunctionContext;
+import org.apache.geode.cache.partition.PartitionRegionHelper;
 import org.apache.geode.internal.cache.execute.InternalRegionFunctionContext;
 import org.apache.geode.internal.cache.execute.InternalResultSender;
 import org.apache.geode.internal.cache.partitioned.PREntriesIterator;
 import org.apache.geode.internal.logging.LogService;
-import org.apache.logging.log4j.Logger;
-
-import java.util.Iterator;
 
 /**
  * GemFire function that is used by `SparkContext.geodeRegion(regionPath, whereClause)`
@@ -84,11 +85,10 @@ public class RetrieveRegionFunction implements Function {
     InternalRegionFunctionContext irfc = (InternalRegionFunctionContext) context;
     LocalRegion localRegion = (LocalRegion) irfc.getDataSet();
     boolean partitioned = localRegion.getDataPolicy().withPartitioning();
-    if (StringUtils.isBlank(where)) {
+    if (where.trim().isEmpty())
       retrieveFullRegion(irfc, partitioned, taskDesc);
-    } else {
+    else
       retrieveRegionWithWhereClause(irfc, localRegion, partitioned, where, taskDesc);
-    }
   }
 
   /** ------------------------------------------ */

http://git-wip-us.apache.org/repos/asf/geode/blob/0dae918d/geode-spark-connector/geode-spark-connector/src/main/scala/org/apache/geode/spark/connector/internal/DefaultGeodeConnection.scala
----------------------------------------------------------------------
diff --git a/geode-spark-connector/geode-spark-connector/src/main/scala/org/apache/geode/spark/connector/internal/DefaultGeodeConnection.scala b/geode-spark-connector/geode-spark-connector/src/main/scala/org/apache/geode/spark/connector/internal/DefaultGeodeConnection.scala
index b5dcf1d..670a3f8 100644
--- a/geode-spark-connector/geode-spark-connector/src/main/scala/org/apache/geode/spark/connector/internal/DefaultGeodeConnection.scala
+++ b/geode-spark-connector/geode-spark-connector/src/main/scala/org/apache/geode/spark/connector/internal/DefaultGeodeConnection.scala
@@ -129,7 +129,7 @@ private[connector] class DefaultGeodeConnection (
     val collector = new StructStreamingResultCollector(desc)
         // RetrieveRegionResultCollector[(K, V)]
     import scala.collection.JavaConversions.setAsJavaSet
-    val exec = FunctionService.onRegion(region).setArguments(args).withCollector(collector).asInstanceOf[InternalExecution]
+    val exec = FunctionService.onRegion(region).withArgs(args).withCollector(collector).asInstanceOf[InternalExecution]
       .withBucketFilter(split.bucketSet.map(Integer.valueOf))
     exec.setWaitOnExceptionFlag(true)
     exec.execute(RetrieveRegionFunction.ID)
@@ -144,7 +144,7 @@ private[connector] class DefaultGeodeConnection (
     val args: Array[String] = Array[String](queryString, bucketSet.toString)
     val exec = FunctionService.onRegion(region).withCollector(collector).asInstanceOf[InternalExecution]
       .withBucketFilter(bucketSet.map(Integer.valueOf))
-      .setArguments(args)
+      .withArgs(args)
     exec.execute(QueryFunction.ID)
     collector.getResult
   }


[05/43] geode git commit: GEODE-2944: Added __REGION_VALUE_FIELD explanation to lucene create index help

Posted by kl...@apache.org.
GEODE-2944: Added __REGION_VALUE_FIELD explanation to lucene create index help

	This closes #533


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/b7faa083
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/b7faa083
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/b7faa083

Branch: refs/heads/feature/GEODE-2632-17
Commit: b7faa083aa9da5a9e7c3c579584be8f28faae02d
Parents: c793f74
Author: David Anuta <da...@gmail.com>
Authored: Wed May 24 17:05:33 2017 -0700
Committer: nabarun <nn...@pivotal.io>
Committed: Thu May 25 11:23:30 2017 -0700

----------------------------------------------------------------------
 .../apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/b7faa083/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java
index d0a2999..db9f7b9 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java
@@ -40,7 +40,7 @@ public class LuceneCliStrings {
       "Name/Path of the region on which to create the lucene index.";
   public static final String LUCENE_CREATE_INDEX__FIELD = "field";
   public static final String LUCENE_CREATE_INDEX__FIELD_HELP =
-      "fields on the region values which are stored in the lucene index.";
+      "Fields on the region values which are stored in the lucene index.\nUse __REGION_VALUE_FIELD if the entire region value should be indexed.\n__REGION_VALUE_FIELD is valid only if the region values are strings or numbers.";
   public static final String LUCENE_CREATE_INDEX__ANALYZER = "analyzer";
   public static final String LUCENE_CREATE_INDEX__ANALYZER_HELP =
       "Type of the analyzer for each field.";


[10/43] geode git commit: GEODE-2950: Updated error messages

Posted by kl...@apache.org.
GEODE-2950: Updated error messages

	* Different error messages are displayed when validating region names versus indexes.
	* Added flexible enum allowing for expanded error messages and name validation.

	This closes #540


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/5ab4a693
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/5ab4a693
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/5ab4a693

Branch: refs/heads/feature/GEODE-2632-17
Commit: 5ab4a69378c697fdf050048165e2945a4b028eb7
Parents: 0fe0a10
Author: David Anuta <da...@gmail.com>
Authored: Thu May 25 15:18:40 2017 -0700
Committer: nabarun <nn...@pivotal.io>
Committed: Thu May 25 16:14:30 2017 -0700

----------------------------------------------------------------------
 .../lucene/internal/LuceneServiceImpl.java      | 58 +++++++++++---------
 .../functions/LuceneCreateIndexFunction.java    |  7 ++-
 .../cli/LuceneIndexCommandsDUnitTest.java       | 10 ++--
 3 files changed, 43 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/5ab4a693/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
index c0d6266..23b6925 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
@@ -128,34 +128,42 @@ public class LuceneServiceImpl implements InternalLuceneService {
     return getUniqueIndexName(indexName, regionPath) + regionSuffix;
   }
 
-  public static void validateCreateIndexCommandParams(String name, boolean isRegionPath) {
-    if (name == null) {
-      throw new IllegalArgumentException(
-          LocalizedStrings.LocalRegion_NAME_CANNOT_BE_NULL.toLocalizedString());
-    }
-    if (name.isEmpty()) {
-      throw new IllegalArgumentException(
-          LocalizedStrings.LocalRegion_NAME_CANNOT_BE_EMPTY.toLocalizedString());
-    }
+  public enum validateCommandParameters {
+    REGION_PATH, INDEX_NAME;
 
-    if (name.startsWith("__")) {
-      throw new IllegalArgumentException(
-          "Parameter names may not begin with a double-underscore: " + name);
-    }
+    public void validateName(String name) {
+      if (name == null) {
+        throw new IllegalArgumentException(
+            LocalizedStrings.LocalRegion_NAME_CANNOT_BE_NULL.toLocalizedString());
+      }
+      if (name.isEmpty()) {
+        throw new IllegalArgumentException(
+            LocalizedStrings.LocalRegion_NAME_CANNOT_BE_EMPTY.toLocalizedString());
+      }
 
-    final Pattern NAME_PATTERN;
-    if (isRegionPath) {
-      NAME_PATTERN = Pattern.compile("[aA-zZ0-9-_./]+");
-    } else {
-      NAME_PATTERN = Pattern.compile("[aA-zZ0-9-_.]+");
-    }
+      boolean iae = false;
+      String msg =
+          " names may only be alphanumeric, must not begin with double-underscores, but can contain hyphens";
+      Matcher matcher = null;
+      switch (this) {
+        case REGION_PATH:
+          matcher = Pattern.compile("[aA-zZ0-9-_./]+").matcher(name);
+          msg = "Region" + msg + ", underscores, or forward slashes: ";
+          iae = name.startsWith("__") || !matcher.matches();
+          break;
+        case INDEX_NAME:
+          matcher = Pattern.compile("[aA-zZ0-9-_.]+").matcher(name);
+          msg = "Index" + msg + " or underscores: ";
+          iae = name.startsWith("__") || !matcher.matches();
+          break;
+        default:
+          throw new IllegalArgumentException("Illegal option for validateName function");
+      }
 
-    // Ensure the region only contains valid characters
-    Matcher matcher = NAME_PATTERN.matcher(name);
-    if (!matcher.matches()) {
-      throw new IllegalArgumentException(
-          "Parameter names may only be alphanumeric, though they can contain hyphens or underscores: "
-              + name);
+      // Ensure the region only contains valid characters
+      if (iae) {
+        throw new IllegalArgumentException(msg + name);
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/5ab4a693/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
index 26ac0e2..d49f7f9 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
@@ -15,7 +15,8 @@
 
 package org.apache.geode.cache.lucene.internal.cli.functions;
 
-import static org.apache.geode.cache.lucene.internal.LuceneServiceImpl.validateCreateIndexCommandParams;
+import static org.apache.geode.cache.lucene.internal.LuceneServiceImpl.validateCommandParameters.INDEX_NAME;
+import static org.apache.geode.cache.lucene.internal.LuceneServiceImpl.validateCommandParameters.REGION_PATH;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.geode.cache.Cache;
@@ -67,7 +68,7 @@ public class LuceneCreateIndexFunction extends FunctionAdapter implements Intern
       memberId = cache.getDistributedSystem().getDistributedMember().getId();
       LuceneService service = LuceneServiceProvider.get(cache);
 
-      validateCreateIndexCommandParams(indexInfo.getIndexName(), false);
+      INDEX_NAME.validateName(indexInfo.getIndexName());
 
       String[] fields = indexInfo.getSearchableFieldNames();
       String[] analyzerName = indexInfo.getFieldAnalyzers();
@@ -86,7 +87,7 @@ public class LuceneCreateIndexFunction extends FunctionAdapter implements Intern
         }
       }
 
-      validateCreateIndexCommandParams(indexInfo.getRegionPath(), true);
+      REGION_PATH.validateName(indexInfo.getRegionPath());
       indexFactory.create(indexInfo.getIndexName(), indexInfo.getRegionPath());
 
       // TODO - update cluster configuration by returning a valid XmlEntity

http://git-wip-us.apache.org/repos/asf/geode/blob/5ab4a693/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
index 5e9c4f9..5cbe31c 100755
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
@@ -210,7 +210,8 @@ public class LuceneIndexCommandsDUnitTest extends CliCommandTestBase {
     csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
 
     String resultAsString = executeCommandAndLogResult(csb);
-    assertTrue(resultAsString.contains("Parameter names may not begin with a double-underscore:"));
+    assertTrue(resultAsString.contains(
+        "Region names may only be alphanumeric, must not begin with double-underscores, but can contain hyphens, underscores, or forward slashes:"));
 
     csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
     csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, INDEX_NAME);
@@ -219,7 +220,7 @@ public class LuceneIndexCommandsDUnitTest extends CliCommandTestBase {
 
     resultAsString = executeCommandAndLogResult(csb);
     assertTrue(resultAsString.contains(
-        "Parameter names may only be alphanumeric, though they can contain hyphens or underscores:"));
+        "Region names may only be alphanumeric, must not begin with double-underscores, but can contain hyphens, underscores, or forward slashes:"));
 
     csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
     csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, "\'__\'");
@@ -227,7 +228,8 @@ public class LuceneIndexCommandsDUnitTest extends CliCommandTestBase {
     csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
 
     resultAsString = executeCommandAndLogResult(csb);
-    assertTrue(resultAsString.contains("Parameter names may not begin with a double-underscore:"));
+    assertTrue(resultAsString.contains(
+        "Index names may only be alphanumeric, must not begin with double-underscores, but can contain hyphens or underscores:"));
 
     csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
     csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, "\' @@@*%\'");
@@ -236,7 +238,7 @@ public class LuceneIndexCommandsDUnitTest extends CliCommandTestBase {
 
     resultAsString = executeCommandAndLogResult(csb);
     assertTrue(resultAsString.contains(
-        "Parameter names may only be alphanumeric, though they can contain hyphens or underscores:"));
+        "Index names may only be alphanumeric, must not begin with double-underscores, but can contain hyphens or underscores:"));
   }
 
   @Test


[17/43] geode git commit: Cleanup CacheClientNotifier

Posted by kl...@apache.org.
Cleanup CacheClientNotifier


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/d66e51d0
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/d66e51d0
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/d66e51d0

Branch: refs/heads/feature/GEODE-2632-17
Commit: d66e51d05087169eb0f29080abf5a0e1678054d4
Parents: d3543d2
Author: Kirk Lund <kl...@apache.org>
Authored: Thu May 18 13:28:38 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Tue May 30 10:21:07 2017 -0700

----------------------------------------------------------------------
 .../cache/tier/sockets/CacheClientNotifier.java | 1160 ++++++++----------
 .../cache/tier/sockets/CacheClientProxy.java    |    4 +-
 2 files changed, 542 insertions(+), 622 deletions(-)
----------------------------------------------------------------------



[25/43] geode git commit: Cleanup BaseCommand

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientNotifier.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientNotifier.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientNotifier.java
index 4bd4970..5631184 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientNotifier.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientNotifier.java
@@ -17,9 +17,7 @@ package org.apache.geode.internal.cache.tier.sockets;
 import static org.apache.geode.distributed.ConfigurationProperties.*;
 
 import java.io.BufferedOutputStream;
-import java.io.DataInput;
 import java.io.DataInputStream;
-import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.lang.reflect.Method;
@@ -70,12 +68,7 @@ import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.distributed.internal.DM;
 import org.apache.geode.distributed.internal.DistributionConfig;
-import org.apache.geode.distributed.internal.DistributionManager;
-import org.apache.geode.distributed.internal.HighPriorityDistributionMessage;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
-import org.apache.geode.distributed.internal.MessageWithReply;
-import org.apache.geode.distributed.internal.ReplyMessage;
-import org.apache.geode.distributed.internal.ReplyProcessor21;
 import org.apache.geode.internal.ClassLoadUtil;
 import org.apache.geode.internal.cache.InternalCache;
 import org.apache.geode.internal.statistics.DummyStatisticsFactory;
@@ -127,6 +120,22 @@ import org.apache.geode.security.AuthenticationRequiredException;
 public class CacheClientNotifier {
   private static final Logger logger = LogService.getLogger();
 
+  /**
+   * The size of the server-to-client communication socket buffers. This can be modified using the
+   * BridgeServer.SOCKET_BUFFER_SIZE system property.
+   */
+  private static final int socketBufferSize =
+    Integer.getInteger("BridgeServer.SOCKET_BUFFER_SIZE", 32768);
+
+  private static final long CLIENT_PING_TASK_PERIOD =
+    Long.getLong(DistributionConfig.GEMFIRE_PREFIX + "serverToClientPingPeriod", 60000);
+
+  /**
+   * package-private to avoid synthetic accessor
+   */
+  static final long CLIENT_PING_TASK_COUNTER =
+    Long.getLong(DistributionConfig.GEMFIRE_PREFIX + "serverToClientPingCounter", 3);
+
   private static volatile CacheClientNotifier ccnSingleton;
 
   /**
@@ -149,20 +158,6 @@ public class CacheClientNotifier {
 
   private final Set<ClientProxyMembershipID> timedOutDurableClientProxies = new HashSet<>();
 
-  /**
-   * The GemFire {@code InternalCache}. Note that since this is a singleton class you should not use
-   * a direct reference to cache in CacheClientNotifier code. Instead, you should always use
-   * {@code getCache()}
-   */
-  private InternalCache cache; // TODO: fix synchronization of cache
-
-  private InternalLogWriter logWriter;
-
-  /**
-   * The GemFire security {@code LogWriter}
-   */
-  private InternalLogWriter securityLogWriter;
-
   /** the maximum number of messages that can be enqueued in a client-queue. */
   private final int maximumMessageCount;
 
@@ -179,24 +174,9 @@ public class CacheClientNotifier {
   private final CacheServerStats acceptorStats;
 
   /**
-   * haContainer can hold either the name of the client-messages-region (in case of eviction
-   * policies "mem" or "entry") or an instance of HashMap (in case of eviction policy "none"). In
-   * both the cases, it'll store HAEventWrapper as its key and ClientUpdateMessage as its value.
-   */
-  private volatile HAContainerWrapper haContainer;
-
-  /**
-   * The size of the server-to-client communication socket buffers. This can be modified using the
-   * BridgeServer.SOCKET_BUFFER_SIZE system property.
-   */
-  private static final int socketBufferSize =
-      Integer.getInteger("BridgeServer.SOCKET_BUFFER_SIZE", 32768);
-
-  /**
    * The statistics for this notifier
    */
-  final CacheClientNotifierStats statistics; // TODO: pass statistics into CacheClientProxy then
-                                             // make private
+  final CacheClientNotifierStats statistics; // TODO: pass statistics into CacheClientProxy
 
   /**
    * The {@code InterestRegistrationListener} instances registered in this VM. This is used when
@@ -209,55 +189,41 @@ public class CacheClientNotifier {
    * provide a read-only {@code Set} of listeners.
    */
   private final Set readableInterestRegistrationListeners =
-      Collections.unmodifiableSet(this.writableInterestRegistrationListeners);
-
-  /**
-   * System property name for indicating how much frequently the "Queue full" message should be
-   * logged.
-   */
-  private static final String MAX_QUEUE_LOG_FREQUENCY =
-      DistributionConfig.GEMFIRE_PREFIX + "logFrequency.clientQueueReachedMaxLimit";
+    Collections.unmodifiableSet(this.writableInterestRegistrationListeners);
 
-  public static final long DEFAULT_LOG_FREQUENCY = 1000;
+  private final Map<String, DefaultQuery> compiledQueries = new ConcurrentHashMap<>();
 
-  private static final String EVENT_ENQUEUE_WAIT_TIME_NAME =
-      DistributionConfig.GEMFIRE_PREFIX + "subscription.EVENT_ENQUEUE_WAIT_TIME";
+  private final Object lockIsCompiledQueryCleanupThreadStarted = new Object();
 
-  private static final int DEFAULT_EVENT_ENQUEUE_WAIT_TIME = 100;
+  private final SocketCloser socketCloser;
 
-  /**
-   * System property value denoting the time in milliseconds. Any thread putting an event into a
-   * subscription queue, which is full, will wait this much time for the queue to make space. It'll
-   * then enque the event possibly causing the queue to grow beyond its capacity/max-size. See
-   * #51400.
-   */
-  public static int eventEnqueueWaitTime; // TODO: encapsulate eventEnqueueWaitTime
+  /** package-private to avoid synthetic accessor */
+  final Set blackListedClients = new CopyOnWriteArraySet();
 
   /**
-   * The frequency of logging the "Queue full" message.
+   * haContainer can hold either the name of the client-messages-region (in case of eviction
+   * policies "mem" or "entry") or an instance of HashMap (in case of eviction policy "none"). In
+   * both the cases, it'll store HAEventWrapper as its key and ClientUpdateMessage as its value.
    */
-  private long logFrequency = DEFAULT_LOG_FREQUENCY;
-
-  private final Map<String, DefaultQuery> compiledQueries = new ConcurrentHashMap<>();
+  private volatile HAContainerWrapper haContainer;
 
   private volatile boolean isCompiledQueryCleanupThreadStarted = false;
 
-  private final Object lockIsCompiledQueryCleanupThreadStarted = new Object();
-
-  private SystemTimer.SystemTimerTask clientPingTask; // TODO: fix synchronization of clientPingTask
-
-  private final SocketCloser socketCloser;
+  /**
+   * The GemFire {@code InternalCache}. Note that since this is a singleton class you should not use
+   * a direct reference to cache in CacheClientNotifier code. Instead, you should always use
+   * {@code getCache()}
+   */
+  private InternalCache cache; // TODO: fix synchronization of cache
 
-  private static final long CLIENT_PING_TASK_PERIOD =
-      Long.getLong(DistributionConfig.GEMFIRE_PREFIX + "serverToClientPingPeriod", 60000);
+  private InternalLogWriter logWriter;
 
   /**
-   * package-private to avoid synthetic accessor
+   * The GemFire security {@code LogWriter}
    */
-  static final long CLIENT_PING_TASK_COUNTER =
-      Long.getLong(DistributionConfig.GEMFIRE_PREFIX + "serverToClientPingCounter", 3);
+  private InternalLogWriter securityLogWriter;
 
-  private final Set blackListedClients = new CopyOnWriteArraySet();
+  private SystemTimer.SystemTimerTask clientPingTask; // TODO: fix synchronization of clientPingTask
 
   /**
    * Factory method to construct a CacheClientNotifier {@code CacheClientNotifier} instance.
@@ -319,21 +285,6 @@ public class CacheClientNotifier {
     }
     this.statistics = new CacheClientNotifierStats(factory);
 
-    try {
-      this.logFrequency = Long.valueOf(System.getProperty(MAX_QUEUE_LOG_FREQUENCY));
-      if (this.logFrequency <= 0) {
-        this.logFrequency = DEFAULT_LOG_FREQUENCY;
-      }
-    } catch (Exception e) {
-      this.logFrequency = DEFAULT_LOG_FREQUENCY;
-    }
-
-    eventEnqueueWaitTime =
-        Integer.getInteger(EVENT_ENQUEUE_WAIT_TIME_NAME, DEFAULT_EVENT_ENQUEUE_WAIT_TIME);
-    if (eventEnqueueWaitTime < 0) {
-      eventEnqueueWaitTime = DEFAULT_EVENT_ENQUEUE_WAIT_TIME;
-    }
-
     // Schedule task to periodically ping clients.
     scheduleClientPingTask();
   }
@@ -923,7 +874,7 @@ public class CacheClientNotifier {
    * in it that determines which clients will receive the event.
    */
   public static void notifyClients(InternalCacheEvent event) {
-    CacheClientNotifier instance = ccnSingleton;
+    CacheClientNotifier instance = getInstance();
     if (instance != null) {
       instance.singletonNotifyClients(event, null);
     }
@@ -935,7 +886,7 @@ public class CacheClientNotifier {
    */
   public static void notifyClients(InternalCacheEvent event,
       ClientUpdateMessage clientUpdateMessage) {
-    CacheClientNotifier instance = ccnSingleton;
+    CacheClientNotifier instance = getInstance();
     if (instance != null) {
       instance.singletonNotifyClients(event, clientUpdateMessage);
     }
@@ -1094,7 +1045,7 @@ public class CacheClientNotifier {
    * interest established, or override the isClientInterested method to implement its own routing
    */
   public static void routeClientMessage(Conflatable clientMessage) {
-    CacheClientNotifier instance = ccnSingleton;
+    CacheClientNotifier instance = getInstance();
     if (instance != null) {
       // ok to use keySet here because all we do is call getClientProxy with these keys
       instance.singletonRouteClientMessage(clientMessage, instance.clientProxies.keySet());
@@ -1106,7 +1057,7 @@ public class CacheClientNotifier {
    */
   static void routeSingleClientMessage(ClientUpdateMessage clientMessage,
       ClientProxyMembershipID clientProxyMembershipId) {
-    CacheClientNotifier instance = ccnSingleton;
+    CacheClientNotifier instance = getInstance();
     if (instance != null) {
       instance.singletonRouteClientMessage(clientMessage,
           Collections.singleton(clientProxyMembershipId));
@@ -1589,7 +1540,7 @@ public class CacheClientNotifier {
       }
     }
 
-    if (noActiveServer() && ccnSingleton != null) {
+    if (noActiveServer() && getInstance() != null) {
       ccnSingleton = null;
       if (this.haContainer != null) {
         this.haContainer.cleanUp();
@@ -1814,7 +1765,7 @@ public class CacheClientNotifier {
   /**
    * Shuts down durable client proxy
    */
-  public boolean closeDurableClientProxy(String durableClientId) throws CacheException {
+  public boolean closeDurableClientProxy(String durableClientId) {
     CacheClientProxy ccp = getClientProxy(durableClientId);
     if (ccp == null) {
       return false;
@@ -1828,8 +1779,7 @@ public class CacheClientNotifier {
       if (logger.isDebugEnabled()) {
         logger.debug("Cannot close running durable client: {}", durableClientId);
       }
-      // TODO: never throw an anonymous inner class
-      throw new CacheException("Cannot close a running durable client : " + durableClientId) {};
+      throw new IllegalStateException("Cannot close a running durable client : " + durableClientId);
     }
   }
 
@@ -2114,10 +2064,6 @@ public class CacheClientNotifier {
         CLIENT_PING_TASK_PERIOD, CLIENT_PING_TASK_PERIOD);
   }
 
-  public long getLogFrequency() {
-    return this.logFrequency;
-  }
-
   /**
    * @return the haContainer
    */
@@ -2182,93 +2128,4 @@ public class CacheClientNotifier {
     }
   }
 
-  /**
-   * Static inner-class ServerInterestRegistrationMessage
-   * <p>
-   * this message is used to send interest registration to another server. Since interest
-   * registration performs a state-flush operation this message must not transmitted on an ordered
-   * socket
-   */
-  public static class ServerInterestRegistrationMessage extends HighPriorityDistributionMessage
-      implements MessageWithReply {
-
-    ClientProxyMembershipID clientId;
-    ClientInterestMessageImpl clientMessage;
-    int processorId;
-
-    ServerInterestRegistrationMessage(ClientProxyMembershipID clientID,
-        ClientInterestMessageImpl msg) {
-      this.clientId = clientID;
-      this.clientMessage = msg;
-    }
-
-    public ServerInterestRegistrationMessage() {
-      // nothing
-    }
-
-    static void sendInterestChange(DM dm, ClientProxyMembershipID clientID,
-        ClientInterestMessageImpl msg) {
-      ServerInterestRegistrationMessage registrationMessage =
-          new ServerInterestRegistrationMessage(clientID, msg);
-      Set recipients = dm.getOtherDistributionManagerIds();
-      registrationMessage.setRecipients(recipients);
-      ReplyProcessor21 rp = new ReplyProcessor21(dm, recipients);
-      registrationMessage.processorId = rp.getProcessorId();
-      dm.putOutgoing(registrationMessage);
-      try {
-        rp.waitForReplies();
-      } catch (InterruptedException ignore) {
-        Thread.currentThread().interrupt();
-      }
-    }
-
-    @Override
-    protected void process(DistributionManager dm) {
-      // Get the proxy for the proxy id
-      try {
-        CacheClientNotifier clientNotifier = CacheClientNotifier.getInstance();
-        if (clientNotifier != null) {
-          CacheClientProxy proxy = clientNotifier.getClientProxy(this.clientId);
-          // If this VM contains a proxy for the requested proxy id, forward the
-          // message on to the proxy for processing
-          if (proxy != null) {
-            proxy.processInterestMessage(this.clientMessage);
-          }
-        }
-      } finally {
-        ReplyMessage reply = new ReplyMessage();
-        reply.setProcessorId(this.processorId);
-        reply.setRecipient(getSender());
-        try {
-          dm.putOutgoing(reply);
-        } catch (CancelException ignore) {
-          // can't send a reply, so ignore the exception
-        }
-      }
-    }
-
-    @Override
-    public int getDSFID() {
-      return SERVER_INTEREST_REGISTRATION_MESSAGE;
-    }
-
-    @Override
-    public void toData(DataOutput out) throws IOException {
-      super.toData(out);
-      out.writeInt(this.processorId);
-      InternalDataSerializer.invokeToData(this.clientId, out);
-      InternalDataSerializer.invokeToData(this.clientMessage, out);
-    }
-
-    @Override
-    public void fromData(DataInput in) throws IOException, ClassNotFoundException {
-      super.fromData(in);
-      this.processorId = in.readInt();
-      this.clientId = new ClientProxyMembershipID();
-      InternalDataSerializer.invokeFromData(this.clientId, in);
-      this.clientMessage = new ClientInterestMessageImpl();
-      InternalDataSerializer.invokeFromData(this.clientMessage, in);
-    }
-  }
 }
-

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerInterestRegistrationMessage.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerInterestRegistrationMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerInterestRegistrationMessage.java
new file mode 100644
index 0000000..5860982
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerInterestRegistrationMessage.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache.tier.sockets;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Set;
+
+import org.apache.geode.CancelException;
+import org.apache.geode.distributed.internal.DM;
+import org.apache.geode.distributed.internal.DistributionManager;
+import org.apache.geode.distributed.internal.HighPriorityDistributionMessage;
+import org.apache.geode.distributed.internal.MessageWithReply;
+import org.apache.geode.distributed.internal.ReplyMessage;
+import org.apache.geode.distributed.internal.ReplyProcessor21;
+import org.apache.geode.internal.InternalDataSerializer;
+
+/**
+ * Send interest registration to another server. Since interest registration performs a state-flush operation this message must not transmitted on an ordered socket.
+ * <p>
+ * Extracted from CacheClientNotifier
+ */
+public class ServerInterestRegistrationMessage extends HighPriorityDistributionMessage
+    implements MessageWithReply {
+
+  private ClientProxyMembershipID clientId;
+  private ClientInterestMessageImpl clientMessage;
+  private int processorId;
+
+  ServerInterestRegistrationMessage(ClientProxyMembershipID clientId, ClientInterestMessageImpl clientInterestMessage) {
+    this.clientId = clientId;
+    this.clientMessage = clientInterestMessage;
+  }
+
+  public ServerInterestRegistrationMessage() {
+    // deserializing in fromData
+  }
+
+  static void sendInterestChange(DM dm, ClientProxyMembershipID clientId, ClientInterestMessageImpl clientInterestMessage) {
+    ServerInterestRegistrationMessage registrationMessage =
+        new ServerInterestRegistrationMessage(clientId, clientInterestMessage);
+
+    Set recipients = dm.getOtherDistributionManagerIds();
+    registrationMessage.setRecipients(recipients);
+
+    ReplyProcessor21 replyProcessor = new ReplyProcessor21(dm, recipients);
+    registrationMessage.processorId = replyProcessor.getProcessorId();
+
+    dm.putOutgoing(registrationMessage);
+
+    try {
+      replyProcessor.waitForReplies();
+    } catch (InterruptedException ignore) {
+      Thread.currentThread().interrupt();
+    }
+  }
+
+  @Override
+  protected void process(DistributionManager dm) {
+    // Get the proxy for the proxy id
+    try {
+      CacheClientNotifier clientNotifier = CacheClientNotifier.getInstance();
+      if (clientNotifier != null) {
+        CacheClientProxy proxy = clientNotifier.getClientProxy(this.clientId);
+        // If this VM contains a proxy for the requested proxy id, forward the
+        // message on to the proxy for processing
+        if (proxy != null) {
+          proxy.processInterestMessage(this.clientMessage);
+        }
+      }
+    } finally {
+      ReplyMessage reply = new ReplyMessage();
+      reply.setProcessorId(this.processorId);
+      reply.setRecipient(getSender());
+      try {
+        dm.putOutgoing(reply);
+      } catch (CancelException ignore) {
+        // can't send a reply, so ignore the exception
+      }
+    }
+  }
+
+  @Override
+  public int getDSFID() {
+    return SERVER_INTEREST_REGISTRATION_MESSAGE;
+  }
+
+  @Override
+  public void toData(DataOutput out) throws IOException {
+    super.toData(out);
+    out.writeInt(this.processorId);
+    InternalDataSerializer.invokeToData(this.clientId, out);
+    InternalDataSerializer.invokeToData(this.clientMessage, out);
+  }
+
+  @Override
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    super.fromData(in);
+    this.processorId = in.readInt();
+    this.clientId = new ClientProxyMembershipID();
+    InternalDataSerializer.invokeFromData(this.clientId, in);
+    this.clientMessage = new ClientInterestMessageImpl();
+    InternalDataSerializer.invokeFromData(this.clientMessage, in);
+  }
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/AddPdxEnum.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/AddPdxEnum.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/AddPdxEnum.java
index 1b599e9..fb0bd50 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/AddPdxEnum.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/AddPdxEnum.java
@@ -39,29 +39,29 @@ public class AddPdxEnum extends BaseCommand {
   private AddPdxEnum() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException {
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received get pdx id for enum request ({} parts) from {}",
-          servConn.getName(), msg.getNumberOfParts(), servConn.getSocketString());
+          serverConnection.getName(), clientMessage.getNumberOfParts(), serverConnection.getSocketString());
     }
-    int noOfParts = msg.getNumberOfParts();
+    int noOfParts = clientMessage.getNumberOfParts();
 
-    EnumInfo enumInfo = (EnumInfo) msg.getPart(0).getObject();
-    int enumId = msg.getPart(1).getInt();
+    EnumInfo enumInfo = (EnumInfo) clientMessage.getPart(0).getObject();
+    int enumId = clientMessage.getPart(1).getInt();
 
     try {
-      InternalCache cache = servConn.getCache();
+      InternalCache cache = serverConnection.getCache();
       TypeRegistry registry = cache.getPdxRegistry();
       registry.addRemoteEnum(enumId, enumInfo);
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    writeReply(msg, servConn);
-    servConn.setAsTrue(RESPONDED);
+    writeReply(clientMessage, serverConnection);
+    serverConnection.setAsTrue(RESPONDED);
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/AddPdxType.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/AddPdxType.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/AddPdxType.java
index 9b8302e..10a065c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/AddPdxType.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/AddPdxType.java
@@ -39,33 +39,33 @@ public class AddPdxType extends BaseCommand {
   private AddPdxType() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, ClassNotFoundException {
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received get pdx id for type request ({} parts) from {}",
-          servConn.getName(), msg.getNumberOfParts(), servConn.getSocketString());
+          serverConnection.getName(), clientMessage.getNumberOfParts(), serverConnection.getSocketString());
     }
-    int noOfParts = msg.getNumberOfParts();
+    int noOfParts = clientMessage.getNumberOfParts();
 
-    PdxType type = (PdxType) msg.getPart(0).getObject();
-    int typeId = msg.getPart(1).getInt();
+    PdxType type = (PdxType) clientMessage.getPart(0).getObject();
+    int typeId = clientMessage.getPart(1).getInt();
 
     // The native client needs this line
     // because it doesn't set the type id on the
     // client side.
     type.setTypeId(typeId);
     try {
-      InternalCache cache = servConn.getCache();
+      InternalCache cache = serverConnection.getCache();
       TypeRegistry registry = cache.getPdxRegistry();
       registry.addRemoteType(typeId, type);
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    writeReply(msg, servConn);
-    servConn.setAsTrue(RESPONDED);
+    writeReply(clientMessage, serverConnection);
+    serverConnection.setAsTrue(RESPONDED);
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ClearRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ClearRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ClearRegion.java
index 959430c..c9c5a9d 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ClearRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ClearRegion.java
@@ -47,15 +47,15 @@ public class ClearRegion extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
     Part regionNamePart = null, callbackArgPart = null;
     String regionName = null;
     Object callbackArg = null;
     Part eventPart = null;
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    CacheServerStats stats = servConn.getCacheServerStats();
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
 
     {
       long oldStart = start;
@@ -63,36 +63,36 @@ public class ClearRegion extends BaseCommand {
       stats.incReadClearRegionRequestTime(start - oldStart);
     }
     // Retrieve the data from the message parts
-    regionNamePart = msg.getPart(0);
-    eventPart = msg.getPart(1);
+    regionNamePart = clientMessage.getPart(0);
+    eventPart = clientMessage.getPart(1);
     // callbackArgPart = null; (redundant assignment)
-    if (msg.getNumberOfParts() > 2) {
-      callbackArgPart = msg.getPart(2);
+    if (clientMessage.getNumberOfParts() > 2) {
+      callbackArgPart = clientMessage.getPart(2);
       try {
         callbackArg = callbackArgPart.getObject();
       } catch (Exception e) {
-        writeException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeException(clientMessage, e, false, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
     }
     regionName = regionNamePart.getString();
     if (logger.isDebugEnabled()) {
-      logger.debug(servConn.getName() + ": Received clear region request (" + msg.getPayloadLength()
-          + " bytes) from " + servConn.getSocketString() + " for region " + regionName);
+      logger.debug(serverConnection.getName() + ": Received clear region request (" + clientMessage.getPayloadLength()
+                   + " bytes) from " + serverConnection.getSocketString() + " for region " + regionName);
     }
 
     // Process the clear region request
     if (regionName == null) {
       logger.warn(LocalizedMessage.create(
           LocalizedStrings.ClearRegion_0_THE_INPUT_REGION_NAME_FOR_THE_CLEAR_REGION_REQUEST_IS_NULL,
-          servConn.getName()));
+          serverConnection.getName()));
       String errMessage =
           LocalizedStrings.ClearRegion_THE_INPUT_REGION_NAME_FOR_THE_CLEAR_REGION_REQUEST_IS_NULL
               .toLocalizedString();
 
-      writeErrorResponse(msg, MessageType.CLEAR_REGION_DATA_ERROR, errMessage, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.CLEAR_REGION_DATA_ERROR, errMessage, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -100,35 +100,35 @@ public class ClearRegion extends BaseCommand {
     if (region == null) {
       String reason = LocalizedStrings.ClearRegion_WAS_NOT_FOUND_DURING_CLEAR_REGION_REGUEST
           .toLocalizedString();
-      writeRegionDestroyedEx(msg, regionName, reason, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
     long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
     long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-    EventID eventId = new EventID(servConn.getEventMemberIDByteArray(), threadId, sequenceId);
+    EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
     try {
       // Clear the region
       this.securityService.authorizeRegionWrite(regionName);
 
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         RegionClearOperationContext clearContext =
             authzRequest.clearAuthorize(regionName, callbackArg);
         callbackArg = clearContext.getCallbackArg();
       }
-      region.basicBridgeClear(callbackArg, servConn.getProxyID(),
+      region.basicBridgeClear(callbackArg, serverConnection.getProxyID(),
           true /* boolean from cache Client */, eventId);
     } catch (Exception e) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, e);
+      checkForInterrupt(serverConnection, e);
 
       // If an exception occurs during the clear, preserve the connection
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -138,10 +138,10 @@ public class ClearRegion extends BaseCommand {
       start = DistributionStats.getStatTime();
       stats.incProcessClearRegionTime(start - oldStart);
     }
-    writeReply(msg, servConn);
-    servConn.setAsTrue(RESPONDED);
+    writeReply(clientMessage, serverConnection);
+    serverConnection.setAsTrue(RESPONDED);
     if (logger.isDebugEnabled()) {
-      logger.debug(servConn.getName() + ": Sent clear region response for region " + regionName);
+      logger.debug(serverConnection.getName() + ": Sent clear region response for region " + regionName);
     }
     stats.incWriteClearRegionResponseTime(DistributionStats.getStatTime() - start);
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ClientReady.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ClientReady.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ClientReady.java
index d50e522..053ef8a 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ClientReady.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ClientReady.java
@@ -35,34 +35,34 @@ public class ClientReady extends BaseCommand {
   private ClientReady() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
-    CacheServerStats stats = servConn.getCacheServerStats();
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+    CacheServerStats stats = serverConnection.getCacheServerStats();
     {
       long oldStart = start;
       start = DistributionStats.getStatTime();
       stats.incReadClientReadyRequestTime(start - oldStart);
     }
     try {
-      String clientHost = servConn.getSocketHost();
-      int clientPort = servConn.getSocketPort();
+      String clientHost = serverConnection.getSocketHost();
+      int clientPort = serverConnection.getSocketPort();
       if (logger.isDebugEnabled()) {
         logger.debug("{}: Received client ready request ({} bytes) from {} on {}:{}",
-            servConn.getName(), msg.getPayloadLength(), servConn.getProxyID(), clientHost,
+            serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getProxyID(), clientHost,
             clientPort);
       }
 
-      servConn.getAcceptor().getCacheClientNotifier().readyForEvents(servConn.getProxyID());
+      serverConnection.getAcceptor().getCacheClientNotifier().readyForEvents(serverConnection.getProxyID());
 
       long oldStart = start;
       start = DistributionStats.getStatTime();
       stats.incProcessClientReadyTime(start - oldStart);
 
-      writeReply(msg, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeReply(clientMessage, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
 
       if (logger.isDebugEnabled()) {
-        logger.debug(servConn.getName() + ": Processed client ready request from "
-            + servConn.getProxyID() + " on " + clientHost + ":" + clientPort);
+        logger.debug(serverConnection.getName() + ": Processed client ready request from "
+                     + serverConnection.getProxyID() + " on " + clientHost + ":" + clientPort);
       }
     } finally {
       stats.incWriteClientReadyResponseTime(DistributionStats.getStatTime() - start);

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CloseConnection.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CloseConnection.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CloseConnection.java
index 66045aa..378a322 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CloseConnection.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CloseConnection.java
@@ -39,43 +39,43 @@ public class CloseConnection extends BaseCommand {
   private CloseConnection() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
-    CacheServerStats stats = servConn.getCacheServerStats();
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+    CacheServerStats stats = serverConnection.getCacheServerStats();
     long oldStart = start;
-    boolean respondToClient = servConn.getClientVersion().compareTo(Version.GFE_90) >= 0;
+    boolean respondToClient = serverConnection.getClientVersion().compareTo(Version.GFE_90) >= 0;
     start = DistributionStats.getStatTime();
     stats.incReadCloseConnectionRequestTime(start - oldStart);
 
     if (respondToClient) {
       // newer clients will wait for a response or EOFException
-      servConn.setAsTrue(REQUIRES_RESPONSE);
+      serverConnection.setAsTrue(REQUIRES_RESPONSE);
     }
 
     try {
-      servConn.setClientDisconnectCleanly();
-      String clientHost = servConn.getSocketHost();
-      int clientPort = servConn.getSocketPort();
+      serverConnection.setClientDisconnectCleanly();
+      String clientHost = serverConnection.getSocketHost();
+      int clientPort = serverConnection.getSocketPort();
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Received close request ({} bytes) from {}:{}", servConn.getName(),
-            msg.getPayloadLength(), clientHost, clientPort);
+        logger.debug("{}: Received close request ({} bytes) from {}:{}", serverConnection.getName(),
+            clientMessage.getPayloadLength(), clientHost, clientPort);
       }
 
-      Part keepalivePart = msg.getPart(0);
+      Part keepalivePart = clientMessage.getPart(0);
       byte[] keepaliveByte = keepalivePart.getSerializedForm();
       boolean keepalive = (keepaliveByte == null || keepaliveByte[0] == 0) ? false : true;
 
-      servConn.getAcceptor().getCacheClientNotifier().setKeepAlive(servConn.getProxyID(),
+      serverConnection.getAcceptor().getCacheClientNotifier().setKeepAlive(serverConnection.getProxyID(),
           keepalive);
 
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Processed close request from {}:{}, keepAlive: {}", servConn.getName(),
+        logger.debug("{}: Processed close request from {}:{}, keepAlive: {}", serverConnection.getName(),
             clientHost, clientPort, keepalive);
       }
     } finally {
       if (respondToClient) {
-        writeReply(msg, servConn);
+        writeReply(clientMessage, serverConnection);
       }
-      servConn.setFlagProcessMessagesAsFalse();
+      serverConnection.setFlagProcessMessagesAsFalse();
 
       stats.incProcessCloseConnectionTime(DistributionStats.getStatTime() - start);
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CommitCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CommitCommand.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CommitCommand.java
index 55ef09b..b2bba4f 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CommitCommand.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CommitCommand.java
@@ -50,12 +50,12 @@ public class CommitCommand extends BaseCommand {
   private CommitCommand() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
-    servConn.setAsTrue(REQUIRES_RESPONSE);
-    TXManagerImpl txMgr = (TXManagerImpl) servConn.getCache().getCacheTransactionManager();
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
+    TXManagerImpl txMgr = (TXManagerImpl) serverConnection.getCache().getCacheTransactionManager();
     InternalDistributedMember client =
-        (InternalDistributedMember) servConn.getProxyID().getDistributedMember();
-    int uniqId = msg.getTransactionId();
+        (InternalDistributedMember) serverConnection.getProxyID().getDistributedMember();
+    int uniqId = clientMessage.getTransactionId();
     TXId txId = new TXId(client, uniqId);
     TXCommitMessage commitMsg = null;
     if (txMgr.isHostedTxRecentlyCompleted(txId)) {
@@ -64,11 +64,11 @@ public class CommitCommand extends BaseCommand {
         logger.debug("TX: returning a recently committed txMessage for tx: {}", txId);
       }
       if (!txMgr.isExceptionToken(commitMsg)) {
-        writeCommitResponse(commitMsg, msg, servConn);
+        writeCommitResponse(commitMsg, clientMessage, serverConnection);
         commitMsg.setClientVersion(null); // fixes bug 46529
-        servConn.setAsTrue(RESPONDED);
+        serverConnection.setAsTrue(RESPONDED);
       } else {
-        sendException(msg, servConn, txMgr.getExceptionForToken(commitMsg, txId));
+        sendException(clientMessage, serverConnection, txMgr.getExceptionForToken(commitMsg, txId));
       }
       txMgr.removeHostedTXState(txId);
       return;
@@ -87,10 +87,10 @@ public class CommitCommand extends BaseCommand {
       txMgr.commit();
 
       commitMsg = txProxy.getCommitMessage();
-      writeCommitResponse(commitMsg, msg, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeCommitResponse(commitMsg, clientMessage, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
     } catch (Exception e) {
-      sendException(msg, servConn, e);
+      sendException(clientMessage, serverConnection, e);
     } finally {
       if (txId != null) {
         txMgr.removeHostedTXState(txId);
@@ -115,7 +115,7 @@ public class CommitCommand extends BaseCommand {
     if (response != null) {
       response.setClientVersion(servConn.getClientVersion());
     }
-    responseMsg.addObjPart(response, zipValues);
+    responseMsg.addObjPart(response, false);
     servConn.getCache().getCancelCriterion().checkCancelInProgress(null);
     if (logger.isDebugEnabled()) {
       logger.debug("TX: sending a nonNull response for transaction: {}",

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ContainsKey.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ContainsKey.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ContainsKey.java
index c1b67e1..50d1197 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ContainsKey.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ContainsKey.java
@@ -51,34 +51,34 @@ public class ContainsKey extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
     Part regionNamePart = null;
     Part keyPart = null;
     String regionName = null;
     Object key = null;
 
-    CacheServerStats stats = servConn.getCacheServerStats();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
 
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
     {
       long oldStart = start;
       start = DistributionStats.getStatTime();
       stats.incReadContainsKeyRequestTime(start - oldStart);
     }
     // Retrieve the data from the message parts
-    regionNamePart = msg.getPart(0);
-    keyPart = msg.getPart(1);
+    regionNamePart = clientMessage.getPart(0);
+    keyPart = clientMessage.getPart(1);
     regionName = regionNamePart.getString();
     try {
       key = keyPart.getStringOrObject();
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received containsKey request ({} bytes) from {} for region {} key {}",
-          servConn.getName(), msg.getPayloadLength(), servConn.getSocketString(), regionName, key);
+          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName, key);
     }
 
     // Process the containsKey request
@@ -87,47 +87,47 @@ public class ContainsKey extends BaseCommand {
       if (key == null) {
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.ContainsKey_0_THE_INPUT_KEY_FOR_THE_CONTAINSKEY_REQUEST_IS_NULL,
-            servConn.getName()));
+            serverConnection.getName()));
         errMessage = LocalizedStrings.ContainsKey_THE_INPUT_KEY_FOR_THE_CONTAINSKEY_REQUEST_IS_NULL
             .toLocalizedString();
       }
       if (regionName == null) {
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.ContainsKey_0_THE_INPUT_REGION_NAME_FOR_THE_CONTAINSKEY_REQUEST_IS_NULL,
-            servConn.getName()));
+            serverConnection.getName()));
         errMessage =
             LocalizedStrings.ContainsKey_THE_INPUT_REGION_NAME_FOR_THE_CONTAINSKEY_REQUEST_IS_NULL
                 .toLocalizedString();
       }
-      writeErrorResponse(msg, MessageType.CONTAINS_KEY_DATA_ERROR, errMessage, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.CONTAINS_KEY_DATA_ERROR, errMessage, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    LocalRegion region = (LocalRegion) servConn.getCache().getRegion(regionName);
+    LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
     if (region == null) {
       String reason =
           LocalizedStrings.ContainsKey_WAS_NOT_FOUND_DURING_CONTAINSKEY_REQUEST.toLocalizedString();
-      writeRegionDestroyedEx(msg, regionName, reason, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     try {
       this.securityService.authorizeRegionRead(regionName, key.toString());
     } catch (NotAuthorizedException ex) {
-      writeException(msg, ex, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, ex, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+    AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
     if (authzRequest != null) {
       try {
         authzRequest.containsKeyAuthorize(regionName, key);
       } catch (NotAuthorizedException ex) {
-        writeException(msg, ex, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeException(clientMessage, ex, false, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
     }
@@ -140,10 +140,10 @@ public class ContainsKey extends BaseCommand {
       start = DistributionStats.getStatTime();
       stats.incProcessContainsKeyTime(start - oldStart);
     }
-    writeContainsKeyResponse(containsKey, msg, servConn);
-    servConn.setAsTrue(RESPONDED);
+    writeContainsKeyResponse(containsKey, clientMessage, serverConnection);
+    serverConnection.setAsTrue(RESPONDED);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sent containsKey response for region {} key {}", servConn.getName(),
+      logger.debug("{}: Sent containsKey response for region {} key {}", serverConnection.getName(),
           regionName, key);
     }
     stats.incWriteContainsKeyResponseTime(DistributionStats.getStatTime() - start);

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ContainsKey66.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ContainsKey66.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ContainsKey66.java
index dc8f9eb..53bb414 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ContainsKey66.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ContainsKey66.java
@@ -55,34 +55,34 @@ public class ContainsKey66 extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
     Part regionNamePart = null, keyPart = null;
     String regionName = null;
     Object key = null;
     ContainsKeyOp.MODE mode;
-    CacheServerStats stats = servConn.getCacheServerStats();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
 
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
     {
       long oldStart = start;
       start = DistributionStats.getStatTime();
       stats.incReadContainsKeyRequestTime(start - oldStart);
     }
     // Retrieve the data from the message parts
-    regionNamePart = msg.getPart(0);
-    keyPart = msg.getPart(1);
-    mode = ContainsKeyOp.MODE.values()[(msg.getPart(2).getInt())];
+    regionNamePart = clientMessage.getPart(0);
+    keyPart = clientMessage.getPart(1);
+    mode = ContainsKeyOp.MODE.values()[(clientMessage.getPart(2).getInt())];
     regionName = regionNamePart.getString();
     try {
       key = keyPart.getStringOrObject();
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received containsKey request ({} bytes) from {} for region {} key {}",
-          servConn.getName(), msg.getPayloadLength(), servConn.getSocketString(), regionName, key);
+          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName, key);
     }
 
     // Process the containsKey request
@@ -91,46 +91,46 @@ public class ContainsKey66 extends BaseCommand {
       if (key == null) {
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.ContainsKey_0_THE_INPUT_KEY_FOR_THE_CONTAINSKEY_REQUEST_IS_NULL,
-            servConn.getName()));
+            serverConnection.getName()));
         errMessage = LocalizedStrings.ContainsKey_THE_INPUT_KEY_FOR_THE_CONTAINSKEY_REQUEST_IS_NULL
             .toLocalizedString();
       }
       if (regionName == null) {
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.ContainsKey_0_THE_INPUT_REGION_NAME_FOR_THE_CONTAINSKEY_REQUEST_IS_NULL,
-            servConn.getName()));
+            serverConnection.getName()));
         errMessage =
             LocalizedStrings.ContainsKey_THE_INPUT_REGION_NAME_FOR_THE_CONTAINSKEY_REQUEST_IS_NULL
                 .toLocalizedString();
       }
-      writeErrorResponse(msg, MessageType.CONTAINS_KEY_DATA_ERROR, errMessage, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.CONTAINS_KEY_DATA_ERROR, errMessage, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
-    LocalRegion region = (LocalRegion) servConn.getCache().getRegion(regionName);
+    LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
     if (region == null) {
       String reason =
           LocalizedStrings.ContainsKey_WAS_NOT_FOUND_DURING_CONTAINSKEY_REQUEST.toLocalizedString();
-      writeRegionDestroyedEx(msg, regionName, reason, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     try {
       this.securityService.authorizeRegionRead(regionName, key.toString());
     } catch (NotAuthorizedException ex) {
-      writeException(msg, ex, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, ex, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+    AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
     if (authzRequest != null) {
       try {
         authzRequest.containsKeyAuthorize(regionName, key);
       } catch (NotAuthorizedException ex) {
-        writeException(msg, ex, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeException(clientMessage, ex, false, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
     }
@@ -157,10 +157,10 @@ public class ContainsKey66 extends BaseCommand {
       start = DistributionStats.getStatTime();
       stats.incProcessContainsKeyTime(start - oldStart);
     }
-    writeContainsKeyResponse(containsKey, msg, servConn);
-    servConn.setAsTrue(RESPONDED);
+    writeContainsKeyResponse(containsKey, clientMessage, serverConnection);
+    serverConnection.setAsTrue(RESPONDED);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sent containsKey response for region {} key {}", servConn.getName(),
+      logger.debug("{}: Sent containsKey response for region {} key {}", serverConnection.getName(),
           regionName, key);
     }
     stats.incWriteContainsKeyResponseTime(DistributionStats.getStatTime() - start);

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CreateRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CreateRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CreateRegion.java
index d84dc62..b7ab01b 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CreateRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CreateRegion.java
@@ -41,25 +41,25 @@ public class CreateRegion extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
     Part regionNamePart = null;
     String regionName = null;
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
     // bserverStats.incLong(readDestroyRequestTimeId,
     // DistributionStats.getStatTime() - start);
     // bserverStats.incInt(destroyRequestsId, 1);
     // start = DistributionStats.getStatTime();
     // Retrieve the data from the message parts
-    Part parentRegionNamePart = msg.getPart(0);
+    Part parentRegionNamePart = clientMessage.getPart(0);
     String parentRegionName = parentRegionNamePart.getString();
 
-    regionNamePart = msg.getPart(1);
+    regionNamePart = clientMessage.getPart(1);
     regionName = regionNamePart.getString();
 
     if (logger.isDebugEnabled()) {
       logger.debug(
           "{}: Received create region request ({} bytes) from {} for parent region {} region {}",
-          servConn.getName(), msg.getPayloadLength(), servConn.getSocketString(), parentRegionName,
+          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), parentRegionName,
           regionName);
     }
 
@@ -69,7 +69,7 @@ public class CreateRegion extends BaseCommand {
       if (parentRegionName == null) {
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.CreateRegion_0_THE_INPUT_PARENT_REGION_NAME_FOR_THE_CREATE_REGION_REQUEST_IS_NULL,
-            servConn.getName()));
+            serverConnection.getName()));
         errMessage =
             LocalizedStrings.CreateRegion_THE_INPUT_PARENT_REGION_NAME_FOR_THE_CREATE_REGION_REQUEST_IS_NULL
                 .toLocalizedString();
@@ -77,41 +77,41 @@ public class CreateRegion extends BaseCommand {
       if (regionName == null) {
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.CreateRegion_0_THE_INPUT_REGION_NAME_FOR_THE_CREATE_REGION_REQUEST_IS_NULL,
-            servConn.getName()));
+            serverConnection.getName()));
         errMessage =
             LocalizedStrings.CreateRegion_THE_INPUT_REGION_NAME_FOR_THE_CREATE_REGION_REQUEST_IS_NULL
                 .toLocalizedString();
       }
-      writeErrorResponse(msg, MessageType.CREATE_REGION_DATA_ERROR, errMessage, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.CREATE_REGION_DATA_ERROR, errMessage, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    Region parentRegion = servConn.getCache().getRegion(parentRegionName);
+    Region parentRegion = serverConnection.getCache().getRegion(parentRegionName);
     if (parentRegion == null) {
       String reason =
           LocalizedStrings.CreateRegion__0_WAS_NOT_FOUND_DURING_SUBREGION_CREATION_REQUEST
               .toLocalizedString(parentRegionName);
-      writeRegionDestroyedEx(msg, parentRegionName, reason, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeRegionDestroyedEx(clientMessage, parentRegionName, reason, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
     try {
       this.securityService.authorizeDataManage();
     } catch (NotAuthorizedException ex) {
-      writeException(msg, ex, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, ex, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+    AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
     if (authzRequest != null) {
       try {
         authzRequest.createRegionAuthorize(parentRegionName + '/' + regionName);
       } catch (NotAuthorizedException ex) {
-        writeException(msg, ex, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeException(clientMessage, ex, false, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
     }
@@ -121,11 +121,11 @@ public class CreateRegion extends BaseCommand {
       AttributesFactory factory = new AttributesFactory(parentRegion.getAttributes());
       region = parentRegion.createSubregion(regionName, factory.create());
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Created region {}", servConn.getName(), region);
+        logger.debug("{}: Created region {}", serverConnection.getName(), region);
       }
     } else {
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Retrieved region {}", servConn.getName(), region);
+        logger.debug("{}: Retrieved region {}", serverConnection.getName(), region);
       }
     }
 
@@ -134,11 +134,11 @@ public class CreateRegion extends BaseCommand {
     // NOT USING IT
     // bserverStats.incLong(processDestroyTimeId,
     // DistributionStats.getStatTime() - start);
-    writeReply(msg, servConn);
-    servConn.setAsTrue(RESPONDED);
+    writeReply(clientMessage, serverConnection);
+    serverConnection.setAsTrue(RESPONDED);
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Sent create region response for parent region {} region {}",
-          servConn.getName(), parentRegionName, regionName);
+          serverConnection.getName(), parentRegionName, regionName);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Default.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Default.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Default.java
index 1497044..359e1b4 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Default.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Default.java
@@ -37,14 +37,15 @@ public class Default extends BaseCommand {
   private Default() {}
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
     // requiresResponse = true; NOT NEEDED... ALWAYS SEND ERROR RESPONSE
 
     logger.fatal(
         LocalizedMessage.create(LocalizedStrings.Default_0_UNKNOWN_MESSAGE_TYPE_1_WITH_TX_2_FROM_3,
-            new Object[] {servConn.getName(), MessageType.getString(msg.getMessageType()),
-                Integer.valueOf(msg.getTransactionId()), servConn.getSocketString()}));
-    writeErrorResponse(msg, MessageType.UNKNOWN_MESSAGE_TYPE_ERROR, servConn);
+            new Object[] {
+              serverConnection.getName(), MessageType.getString(clientMessage.getMessageType()),
+                Integer.valueOf(clientMessage.getTransactionId()), serverConnection.getSocketString()}));
+    writeErrorResponse(clientMessage, MessageType.UNKNOWN_MESSAGE_TYPE_ERROR, serverConnection);
     // responded = true; NOT NEEDED... ALWAYS SEND ERROR RESPONSE
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy.java
index 5996984..0699c8b 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy.java
@@ -48,7 +48,7 @@ public class Destroy extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long startparam)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long startparam)
       throws IOException, InterruptedException {
     long start = startparam;
 
@@ -57,8 +57,8 @@ public class Destroy extends BaseCommand {
     Object callbackArg = null, key = null;
     Part eventPart = null;
     StringBuffer errMessage = new StringBuffer();
-    CacheServerStats stats = servConn.getCacheServerStats();
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    CacheServerStats stats = serverConnection.getCacheServerStats();
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
 
     {
       long oldStart = start;
@@ -66,17 +66,17 @@ public class Destroy extends BaseCommand {
       stats.incReadDestroyRequestTime(start - oldStart);
     }
     // Retrieve the data from the message parts
-    regionNamePart = msg.getPart(0);
-    keyPart = msg.getPart(1);
-    eventPart = msg.getPart(2);
+    regionNamePart = clientMessage.getPart(0);
+    keyPart = clientMessage.getPart(1);
+    eventPart = clientMessage.getPart(2);
     // callbackArgPart = null; (redundant assignment)
-    if (msg.getNumberOfParts() > 3) {
-      callbackArgPart = msg.getPart(3);
+    if (clientMessage.getNumberOfParts() > 3) {
+      callbackArgPart = clientMessage.getPart(3);
       try {
         callbackArg = callbackArgPart.getObject();
       } catch (Exception e) {
-        writeException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeException(clientMessage, e, false, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
     }
@@ -84,13 +84,13 @@ public class Destroy extends BaseCommand {
     try {
       key = keyPart.getStringOrObject();
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received destroy request ({} bytes) from {} for region {} key {}",
-          servConn.getName(), msg.getPayloadLength(), servConn.getSocketString(), regionName, key);
+          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName, key);
     }
 
     // Process the destroy request
@@ -98,29 +98,29 @@ public class Destroy extends BaseCommand {
       if (key == null) {
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.Destroy_0_THE_INPUT_KEY_FOR_THE_DESTROY_REQUEST_IS_NULL,
-            servConn.getName()));
+            serverConnection.getName()));
         errMessage.append(LocalizedStrings.Destroy__THE_INPUT_KEY_FOR_THE_DESTROY_REQUEST_IS_NULL
             .toLocalizedString());
       }
       if (regionName == null) {
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.Destroy_0_THE_INPUT_REGION_NAME_FOR_THE_DESTROY_REQUEST_IS_NULL,
-            servConn.getName()));
+            serverConnection.getName()));
         errMessage
             .append(LocalizedStrings.Destroy__THE_INPUT_REGION_NAME_FOR_THE_DESTROY_REQUEST_IS_NULL
                 .toLocalizedString());
       }
-      writeErrorResponse(msg, MessageType.DESTROY_DATA_ERROR, errMessage.toString(), servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.DESTROY_DATA_ERROR, errMessage.toString(), serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    LocalRegion region = (LocalRegion) servConn.getCache().getRegion(regionName);
+    LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
     if (region == null) {
       String reason = LocalizedStrings.Destroy__0_WAS_NOT_FOUND_DURING_DESTROY_REQUEST
           .toLocalizedString(regionName);
-      writeRegionDestroyedEx(msg, regionName, reason, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -128,13 +128,13 @@ public class Destroy extends BaseCommand {
     ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
     long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
     long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-    EventID eventId = new EventID(servConn.getEventMemberIDByteArray(), threadId, sequenceId);
+    EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
     try {
       // for integrated security
       this.securityService.authorizeRegionWrite(regionName, key.toString());
 
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         if (DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
           RegionDestroyOperationContext destroyContext =
@@ -146,35 +146,35 @@ public class Destroy extends BaseCommand {
           callbackArg = destroyContext.getCallbackArg();
         }
       }
-      region.basicBridgeDestroy(key, callbackArg, servConn.getProxyID(), true,
+      region.basicBridgeDestroy(key, callbackArg, serverConnection.getProxyID(), true,
           new EventIDHolder(eventId));
-      servConn.setModificationInfo(true, regionName, key);
+      serverConnection.setModificationInfo(true, regionName, key);
     } catch (EntryNotFoundException e) {
       // Don't send an exception back to the client if this
       // exception happens. Just log it and continue.
       logger.info(LocalizedMessage.create(
           LocalizedStrings.Destroy_0_DURING_ENTRY_DESTROY_NO_ENTRY_WAS_FOUND_FOR_KEY_1,
-          new Object[] {servConn.getName(), key}));
+          new Object[] { serverConnection.getName(), key}));
     } catch (RegionDestroyedException rde) {
-      writeException(msg, rde, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, rde, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (Exception e) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, e);
+      checkForInterrupt(serverConnection, e);
 
       // If an exception occurs during the destroy, preserve the connection
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       if (e instanceof GemFireSecurityException) {
         // Fine logging for security exceptions since these are already
         // logged by the security logger
         if (logger.isDebugEnabled()) {
-          logger.debug("{}: Unexpected Security exception", servConn.getName(), e);
+          logger.debug("{}: Unexpected Security exception", serverConnection.getName(), e);
         }
       } else {
         logger.warn(LocalizedMessage.create(LocalizedStrings.Destroy_0_UNEXPECTED_EXCEPTION,
-            servConn.getName()), e);
+            serverConnection.getName()), e);
       }
       return;
     }
@@ -188,17 +188,17 @@ public class Destroy extends BaseCommand {
     if (region instanceof PartitionedRegion) {
       PartitionedRegion pr = (PartitionedRegion) region;
       if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
-        writeReplyWithRefreshMetadata(msg, servConn, pr, pr.getNetworkHopType());
+        writeReplyWithRefreshMetadata(clientMessage, serverConnection, pr, pr.getNetworkHopType());
         pr.clearNetworkHopData();
       } else {
-        writeReply(msg, servConn);
+        writeReply(clientMessage, serverConnection);
       }
     } else {
-      writeReply(msg, servConn);
+      writeReply(clientMessage, serverConnection);
     }
-    servConn.setAsTrue(RESPONDED);
+    serverConnection.setAsTrue(RESPONDED);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sent destroy response for region {} key {}", servConn.getName(), regionName,
+      logger.debug("{}: Sent destroy response for region {} key {}", serverConnection.getName(), regionName,
           key);
     }
     stats.incWriteDestroyResponseTime(DistributionStats.getStatTime() - start);

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy65.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy65.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy65.java
index 585f57d..0ee0fc4 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy65.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy65.java
@@ -54,7 +54,7 @@ public class Destroy65 extends BaseCommand {
   }
 
   @Override
-  protected void writeReplyWithRefreshMetadata(Message origMsg, ServerConnection servConn,
+  protected void writeReplyWithRefreshMetadata(Message origMsg, ServerConnection serverConnection,
       PartitionedRegion pr, byte nwHop) throws IOException {
     throw new UnsupportedOperationException();
   }
@@ -72,7 +72,7 @@ public class Destroy65 extends BaseCommand {
     replyMsg.addIntPart(entryNotFoundForRemove ? 1 : 0);
     replyMsg.send(servConn);
     if (logger.isTraceEnabled()) {
-      logger.trace("{}: rpl with REFRESH_METADAT tx: {}", servConn.getName(),
+      logger.trace("{}: rpl with REFRESH_METADATA tx: {}", servConn.getName(),
           origMsg.getTransactionId());
     }
   }
@@ -84,7 +84,7 @@ public class Destroy65 extends BaseCommand {
     replyMsg.setMessageType(MessageType.REPLY);
     replyMsg.setNumberOfParts(2);
     replyMsg.setTransactionId(origMsg.getTransactionId());
-    replyMsg.addBytesPart(OK_BYTES);
+    replyMsg.addBytesPart(okBytes());
     replyMsg.addIntPart(entryNotFound ? 1 : 0);
     replyMsg.send(servConn);
     if (logger.isTraceEnabled()) {
@@ -94,7 +94,7 @@ public class Destroy65 extends BaseCommand {
   }
 
   @Override
-  public void cmdExecute(Message msg, ServerConnection servConn, long start)
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
       throws IOException, InterruptedException {
     Part regionNamePart;
     Part keyPart;
@@ -108,20 +108,20 @@ public class Destroy65 extends BaseCommand {
     String regionName = null;
     Object callbackArg = null, key = null;
     StringBuffer errMessage = new StringBuffer();
-    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
-    CacheServerStats stats = servConn.getCacheServerStats();
-    servConn.setAsTrue(REQUIRES_RESPONSE);
+    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
+    CacheServerStats stats = serverConnection.getCacheServerStats();
+    serverConnection.setAsTrue(REQUIRES_RESPONSE);
 
     long now = DistributionStats.getStatTime();
     stats.incReadDestroyRequestTime(now - start);
 
     // Retrieve the data from the message parts
-    regionNamePart = msg.getPart(0);
-    keyPart = msg.getPart(1);
-    expectedOldValuePart = msg.getPart(2);
+    regionNamePart = clientMessage.getPart(0);
+    keyPart = clientMessage.getPart(1);
+    expectedOldValuePart = clientMessage.getPart(2);
     try {
 
-      operation = msg.getPart(3).getObject();
+      operation = clientMessage.getPart(3).getObject();
 
       if (((operation instanceof Operation) && ((Operation) operation == Operation.REMOVE))
           || ((operation instanceof Byte) && (Byte) operation == OpType.DESTROY))
@@ -130,20 +130,20 @@ public class Destroy65 extends BaseCommand {
         expectedOldValue = expectedOldValuePart.getObject();
       }
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    eventPart = msg.getPart(4);
+    eventPart = clientMessage.getPart(4);
 
-    if (msg.getNumberOfParts() > 5) {
-      callbackArgPart = msg.getPart(5);
+    if (clientMessage.getNumberOfParts() > 5) {
+      callbackArgPart = clientMessage.getPart(5);
       try {
         callbackArg = callbackArgPart.getObject();
       } catch (Exception e) {
-        writeException(msg, e, false, servConn);
-        servConn.setAsTrue(RESPONDED);
+        writeException(clientMessage, e, false, serverConnection);
+        serverConnection.setAsTrue(RESPONDED);
         return;
       }
     }
@@ -151,16 +151,16 @@ public class Destroy65 extends BaseCommand {
     try {
       key = keyPart.getStringOrObject();
     } catch (Exception e) {
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
     if (logger.isDebugEnabled()) {
       logger.debug(
           "{}: Received destroy65 request ({} bytes; op={}) from {} for region {} key {}{} txId {}",
-          servConn.getName(), msg.getPayloadLength(), operation, servConn.getSocketString(),
+          serverConnection.getName(), clientMessage.getPayloadLength(), operation, serverConnection.getSocketString(),
           regionName, key, (operation == Operation.REMOVE ? " value=" + expectedOldValue : ""),
-          msg.getTransactionId());
+          clientMessage.getTransactionId());
     }
     boolean entryNotFoundForRemove = false;
 
@@ -169,29 +169,29 @@ public class Destroy65 extends BaseCommand {
       if (key == null) {
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.Destroy_0_THE_INPUT_KEY_FOR_THE_DESTROY_REQUEST_IS_NULL,
-            servConn.getName()));
+            serverConnection.getName()));
         errMessage.append(LocalizedStrings.Destroy__THE_INPUT_KEY_FOR_THE_DESTROY_REQUEST_IS_NULL
             .toLocalizedString());
       }
       if (regionName == null) {
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.Destroy_0_THE_INPUT_REGION_NAME_FOR_THE_DESTROY_REQUEST_IS_NULL,
-            servConn.getName()));
+            serverConnection.getName()));
         errMessage
             .append(LocalizedStrings.Destroy__THE_INPUT_REGION_NAME_FOR_THE_DESTROY_REQUEST_IS_NULL
                 .toLocalizedString());
       }
-      writeErrorResponse(msg, MessageType.DESTROY_DATA_ERROR, errMessage.toString(), servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeErrorResponse(clientMessage, MessageType.DESTROY_DATA_ERROR, errMessage.toString(), serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
-    LocalRegion region = (LocalRegion) servConn.getCache().getRegion(regionName);
+    LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
     if (region == null) {
       String reason = LocalizedStrings.Destroy__0_WAS_NOT_FOUND_DURING_DESTROY_REQUEST
           .toLocalizedString(regionName);
-      writeRegionDestroyedEx(msg, regionName, reason, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     }
 
@@ -199,13 +199,13 @@ public class Destroy65 extends BaseCommand {
     ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
     long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
     long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-    EventID eventId = new EventID(servConn.getEventMemberIDByteArray(), threadId, sequenceId);
+    EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
     EventIDHolder clientEvent = new EventIDHolder(eventId);
 
     Breadcrumbs.setEventId(eventId);
 
     // msg.isRetry might be set by v7.0 and later clients
-    if (msg.isRetry()) {
+    if (clientMessage.isRetry()) {
       // if (logger.isDebugEnabled()) {
       // logger.debug("DEBUG: encountered isRetry in Destroy65");
       // }
@@ -223,7 +223,7 @@ public class Destroy65 extends BaseCommand {
       // for integrated security
       this.securityService.authorizeRegionWrite(regionName, key.toString());
 
-      AuthorizeRequest authzRequest = servConn.getAuthzRequest();
+      AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
       if (authzRequest != null) {
         if (DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
           RegionDestroyOperationContext destroyContext =
@@ -236,14 +236,14 @@ public class Destroy65 extends BaseCommand {
         }
       }
       if (operation == null || operation == Operation.DESTROY) {
-        region.basicBridgeDestroy(key, callbackArg, servConn.getProxyID(), true, clientEvent);
+        region.basicBridgeDestroy(key, callbackArg, serverConnection.getProxyID(), true, clientEvent);
       } else {
         // this throws exceptions if expectedOldValue checks fail
         try {
           if (expectedOldValue == null) {
             expectedOldValue = Token.INVALID;
           }
-          if (operation == Operation.REMOVE && msg.isRetry()
+          if (operation == Operation.REMOVE && clientMessage.isRetry()
               && clientEvent.getVersionTag() != null) {
             // the operation was successful last time it was tried, so there's
             // no need to perform it again. Just return the version tag and
@@ -254,55 +254,55 @@ public class Destroy65 extends BaseCommand {
             }
             // try the operation anyway to ensure that it's been distributed to all servers
             try {
-              region.basicBridgeRemove(key, expectedOldValue, callbackArg, servConn.getProxyID(),
+              region.basicBridgeRemove(key, expectedOldValue, callbackArg, serverConnection.getProxyID(),
                   true, clientEvent);
             } catch (EntryNotFoundException e) {
               // ignore, and don't set entryNotFoundForRemove because this was a successful
               // operation - bug #51664
             }
           } else {
-            region.basicBridgeRemove(key, expectedOldValue, callbackArg, servConn.getProxyID(),
+            region.basicBridgeRemove(key, expectedOldValue, callbackArg, serverConnection.getProxyID(),
                 true, clientEvent);
             if (logger.isDebugEnabled()) {
               logger.debug("region.remove succeeded");
             }
           }
         } catch (EntryNotFoundException e) {
-          servConn.setModificationInfo(true, regionName, key);
+          serverConnection.setModificationInfo(true, regionName, key);
           if (logger.isDebugEnabled()) {
             logger.debug("writing entryNotFound response");
           }
           entryNotFoundForRemove = true;
         }
       }
-      servConn.setModificationInfo(true, regionName, key);
+      serverConnection.setModificationInfo(true, regionName, key);
     } catch (EntryNotFoundException e) {
       // Don't send an exception back to the client if this
       // exception happens. Just log it and continue.
       logger.info(LocalizedMessage.create(
           LocalizedStrings.Destroy_0_DURING_ENTRY_DESTROY_NO_ENTRY_WAS_FOUND_FOR_KEY_1,
-          new Object[] {servConn.getName(), key}));
+          new Object[] { serverConnection.getName(), key}));
       entryNotFoundForRemove = true;
     } catch (RegionDestroyedException rde) {
-      writeException(msg, rde, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, rde, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       return;
     } catch (Exception e) {
       // If an interrupted exception is thrown , rethrow it
-      checkForInterrupt(servConn, e);
+      checkForInterrupt(serverConnection, e);
 
       // If an exception occurs during the destroy, preserve the connection
-      writeException(msg, e, false, servConn);
-      servConn.setAsTrue(RESPONDED);
+      writeException(clientMessage, e, false, serverConnection);
+      serverConnection.setAsTrue(RESPONDED);
       if (e instanceof GemFireSecurityException) {
         // Fine logging for security exceptions since these are already
         // logged by the security logger
         if (logger.isDebugEnabled()) {
-          logger.debug("{}: Unexpected Security exception", servConn.getName(), e);
+          logger.debug("{}: Unexpected Security exception", serverConnection.getName(), e);
         }
       } else {
         logger.warn(LocalizedMessage.create(LocalizedStrings.Destroy_0_UNEXPECTED_EXCEPTION,
-            servConn.getName()), e);
+            serverConnection.getName()), e);
       }
       return;
     }
@@ -314,20 +314,20 @@ public class Destroy65 extends BaseCommand {
     if (region instanceof PartitionedRegion) {
       PartitionedRegion pr = (PartitionedRegion) region;
       if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
-        writeReplyWithRefreshMetadata(msg, servConn, pr, entryNotFoundForRemove,
+        writeReplyWithRefreshMetadata(clientMessage, serverConnection, pr, entryNotFoundForRemove,
             pr.getNetworkHopType(), clientEvent.getVersionTag());
         pr.clearNetworkHopData();
       } else {
-        writeReply(msg, servConn, entryNotFoundForRemove | clientEvent.getIsRedestroyedEntry(),
+        writeReply(clientMessage, serverConnection, entryNotFoundForRemove | clientEvent.getIsRedestroyedEntry(),
             clientEvent.getVersionTag());
       }
     } else {
-      writeReply(msg, servConn, entryNotFoundForRemove | clientEvent.getIsRedestroyedEntry(),
+      writeReply(clientMessage, serverConnection, entryNotFoundForRemove | clientEvent.getIsRedestroyedEntry(),
           clientEvent.getVersionTag());
     }
-    servConn.setAsTrue(RESPONDED);
+    serverConnection.setAsTrue(RESPONDED);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sent destroy response for region {} key {}", servConn.getName(), regionName,
+      logger.debug("{}: Sent destroy response for region {} key {}", serverConnection.getName(), regionName,
           key);
     }
     stats.incWriteDestroyResponseTime(DistributionStats.getStatTime() - start);

http://git-wip-us.apache.org/repos/asf/geode/blob/c5031d12/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy70.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy70.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy70.java
index 59a7233..7c07c72 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy70.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Destroy70.java
@@ -67,7 +67,7 @@ public class Destroy70 extends Destroy65 {
     pr.getPrStats().incPRMetaDataSentCount();
     replyMsg.send(servConn);
     if (logger.isTraceEnabled()) {
-      logger.trace("{}: rpl with REFRESH_METADAT tx: {}", servConn.getName(),
+      logger.trace("{}: rpl with REFRESH_METADATA tx: {}", servConn.getName(),
           origMsg.getTransactionId());
     }
   }
@@ -104,7 +104,7 @@ public class Destroy70 extends Destroy65 {
       // logger.fine("response has no version tag");
       // }
     }
-    replyMsg.addBytesPart(OK_BYTES); // make old single-hop code happy by puting byte[]{0} here
+    replyMsg.addBytesPart(okBytes()); // make old single-hop code happy by puting byte[]{0} here
     replyMsg.addIntPart(entryNotFound ? 1 : 0);
     replyMsg.send(servConn);
     if (logger.isTraceEnabled()) {


[04/43] geode git commit: GEODE-2950: Adding validation checks on create lucene index parameter names

Posted by kl...@apache.org.
GEODE-2950: Adding validation checks on create lucene index parameter names

	This closes #532


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/c793f74c
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/c793f74c
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/c793f74c

Branch: refs/heads/feature/GEODE-2632-17
Commit: c793f74c07c3488ba188ed927144be688bd50b19
Parents: 0dae918
Author: David Anuta <da...@gmail.com>
Authored: Wed May 24 16:21:33 2017 -0700
Committer: nabarun <nn...@pivotal.io>
Committed: Thu May 25 11:20:56 2017 -0700

----------------------------------------------------------------------
 .../lucene/internal/LuceneServiceImpl.java      | 15 ++++++++----
 .../functions/LuceneCreateIndexFunction.java    |  6 +++--
 .../cli/LuceneIndexCommandsDUnitTest.java       | 25 ++++++++++++++++----
 3 files changed, 36 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/c793f74c/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
index 3859804..c0d6266 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
@@ -128,7 +128,7 @@ public class LuceneServiceImpl implements InternalLuceneService {
     return getUniqueIndexName(indexName, regionPath) + regionSuffix;
   }
 
-  public static void validateRegionName(String name) {
+  public static void validateCreateIndexCommandParams(String name, boolean isRegionPath) {
     if (name == null) {
       throw new IllegalArgumentException(
           LocalizedStrings.LocalRegion_NAME_CANNOT_BE_NULL.toLocalizedString());
@@ -140,15 +140,22 @@ public class LuceneServiceImpl implements InternalLuceneService {
 
     if (name.startsWith("__")) {
       throw new IllegalArgumentException(
-          "Region names may not begin with a double-underscore: " + name);
+          "Parameter names may not begin with a double-underscore: " + name);
+    }
+
+    final Pattern NAME_PATTERN;
+    if (isRegionPath) {
+      NAME_PATTERN = Pattern.compile("[aA-zZ0-9-_./]+");
+    } else {
+      NAME_PATTERN = Pattern.compile("[aA-zZ0-9-_.]+");
     }
 
-    final Pattern NAME_PATTERN = Pattern.compile("[aA-zZ0-9-_./]+");
     // Ensure the region only contains valid characters
     Matcher matcher = NAME_PATTERN.matcher(name);
     if (!matcher.matches()) {
       throw new IllegalArgumentException(
-          "Region names may only be alphanumeric and may contain hyphens or underscores: " + name);
+          "Parameter names may only be alphanumeric, though they can contain hyphens or underscores: "
+              + name);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/c793f74c/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
index 422b1ef..26ac0e2 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
@@ -15,7 +15,7 @@
 
 package org.apache.geode.cache.lucene.internal.cli.functions;
 
-import static org.apache.geode.cache.lucene.internal.LuceneServiceImpl.validateRegionName;
+import static org.apache.geode.cache.lucene.internal.LuceneServiceImpl.validateCreateIndexCommandParams;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.geode.cache.Cache;
@@ -67,6 +67,8 @@ public class LuceneCreateIndexFunction extends FunctionAdapter implements Intern
       memberId = cache.getDistributedSystem().getDistributedMember().getId();
       LuceneService service = LuceneServiceProvider.get(cache);
 
+      validateCreateIndexCommandParams(indexInfo.getIndexName(), false);
+
       String[] fields = indexInfo.getSearchableFieldNames();
       String[] analyzerName = indexInfo.getFieldAnalyzers();
 
@@ -84,7 +86,7 @@ public class LuceneCreateIndexFunction extends FunctionAdapter implements Intern
         }
       }
 
-      validateRegionName(indexInfo.getRegionPath());
+      validateCreateIndexCommandParams(indexInfo.getRegionPath(), true);
       indexFactory.create(indexInfo.getIndexName(), indexInfo.getRegionPath());
 
       // TODO - update cluster configuration by returning a valid XmlEntity

http://git-wip-us.apache.org/repos/asf/geode/blob/c793f74c/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
index 04359a3..5e9c4f9 100755
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
@@ -198,7 +198,7 @@ public class LuceneIndexCommandsDUnitTest extends CliCommandTestBase {
   }
 
   @Test
-  public void createIndexShouldNotAcceptEmptyRegionNames() {
+  public void createIndexShouldNotAcceptBadIndexOrRegionNames() {
     final VM vm1 = Host.getHost(0).getVM(-1);
     vm1.invoke(() -> {
       getCache();
@@ -210,7 +210,7 @@ public class LuceneIndexCommandsDUnitTest extends CliCommandTestBase {
     csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
 
     String resultAsString = executeCommandAndLogResult(csb);
-    assertTrue(resultAsString.contains("Region names may not begin with a double-underscore:"));
+    assertTrue(resultAsString.contains("Parameter names may not begin with a double-underscore:"));
 
     csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
     csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, INDEX_NAME);
@@ -218,8 +218,25 @@ public class LuceneIndexCommandsDUnitTest extends CliCommandTestBase {
     csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
 
     resultAsString = executeCommandAndLogResult(csb);
-    assertTrue(resultAsString
-        .contains("Region names may only be alphanumeric and may contain hyphens or underscores:"));
+    assertTrue(resultAsString.contains(
+        "Parameter names may only be alphanumeric, though they can contain hyphens or underscores:"));
+
+    csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
+    csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, "\'__\'");
+    csb.addOption(LuceneCliStrings.LUCENE__REGION_PATH, REGION_NAME);
+    csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
+
+    resultAsString = executeCommandAndLogResult(csb);
+    assertTrue(resultAsString.contains("Parameter names may not begin with a double-underscore:"));
+
+    csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
+    csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, "\' @@@*%\'");
+    csb.addOption(LuceneCliStrings.LUCENE__REGION_PATH, REGION_NAME);
+    csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
+
+    resultAsString = executeCommandAndLogResult(csb);
+    assertTrue(resultAsString.contains(
+        "Parameter names may only be alphanumeric, though they can contain hyphens or underscores:"));
   }
 
   @Test


[16/43] geode git commit: Cleanup CacheClientNotifier

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/d66e51d0/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientNotifier.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientNotifier.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientNotifier.java
index e79bfbd..4bd4970 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientNotifier.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientNotifier.java
@@ -93,7 +93,6 @@ import org.apache.geode.internal.cache.ClientRegionEventImpl;
 import org.apache.geode.internal.cache.ClientServerObserver;
 import org.apache.geode.internal.cache.ClientServerObserverHolder;
 import org.apache.geode.internal.cache.Conflatable;
-import org.apache.geode.internal.cache.DistributedRegion;
 import org.apache.geode.internal.cache.EntryEventImpl;
 import org.apache.geode.internal.cache.EnumListenerEvent;
 import org.apache.geode.internal.cache.EventID;
@@ -120,29 +119,158 @@ import org.apache.geode.security.AuthenticationFailedException;
 import org.apache.geode.security.AuthenticationRequiredException;
 
 /**
- * Class <code>CacheClientNotifier</code> works on the server and manages client socket connections
- * to clients requesting notification of updates and notifies them when updates occur.
+ * Class {@code CacheClientNotifier} works on the server and manages client socket connections to
+ * clients requesting notification of updates and notifies them when updates occur.
  *
  * @since GemFire 3.2
  */
-@SuppressWarnings({"synthetic-access", "deprecation"})
 public class CacheClientNotifier {
   private static final Logger logger = LogService.getLogger();
 
   private static volatile CacheClientNotifier ccnSingleton;
 
   /**
-   * Factory method to construct a CacheClientNotifier <code>CacheClientNotifier</code> instance.
+   * The map of known {@code CacheClientProxy} instances. Maps ClientProxyMembershipID to
+   * CacheClientProxy. Note that the keys in this map are not updated when a durable client
+   * reconnects. To make sure you get the updated ClientProxyMembershipID use this map to lookup the
+   * CacheClientProxy and then call getProxyID on it.
+   * <p>
+   * NOTE: package-private to avoid synthetic accessor
+   */
+  final ConcurrentMap/* <ClientProxyMembershipID, CacheClientProxy> */ clientProxies =
+      new ConcurrentHashMap();
+
+  /**
+   * The map of {@code CacheClientProxy} instances which are getting initialized. Maps
+   * ClientProxyMembershipID to CacheClientProxy.
+   */
+  private final ConcurrentMap/* <ClientProxyMembershipID, CacheClientProxy> */ initClientProxies =
+      new ConcurrentHashMap();
+
+  private final Set<ClientProxyMembershipID> timedOutDurableClientProxies = new HashSet<>();
+
+  /**
+   * The GemFire {@code InternalCache}. Note that since this is a singleton class you should not use
+   * a direct reference to cache in CacheClientNotifier code. Instead, you should always use
+   * {@code getCache()}
+   */
+  private InternalCache cache; // TODO: fix synchronization of cache
+
+  private InternalLogWriter logWriter;
+
+  /**
+   * The GemFire security {@code LogWriter}
+   */
+  private InternalLogWriter securityLogWriter;
+
+  /** the maximum number of messages that can be enqueued in a client-queue. */
+  private final int maximumMessageCount;
+
+  /**
+   * the time (in seconds) after which a message in the client queue will expire.
+   */
+  private final int messageTimeToLive;
+
+  /**
+   * A listener which receives notifications about queues that are added or removed
+   */
+  private final ConnectionListener connectionListener;
+
+  private final CacheServerStats acceptorStats;
+
+  /**
+   * haContainer can hold either the name of the client-messages-region (in case of eviction
+   * policies "mem" or "entry") or an instance of HashMap (in case of eviction policy "none"). In
+   * both the cases, it'll store HAEventWrapper as its key and ClientUpdateMessage as its value.
+   */
+  private volatile HAContainerWrapper haContainer;
+
+  /**
+   * The size of the server-to-client communication socket buffers. This can be modified using the
+   * BridgeServer.SOCKET_BUFFER_SIZE system property.
+   */
+  private static final int socketBufferSize =
+      Integer.getInteger("BridgeServer.SOCKET_BUFFER_SIZE", 32768);
+
+  /**
+   * The statistics for this notifier
+   */
+  final CacheClientNotifierStats statistics; // TODO: pass statistics into CacheClientProxy then
+                                             // make private
+
+  /**
+   * The {@code InterestRegistrationListener} instances registered in this VM. This is used when
+   * modifying the set of listeners.
+   */
+  private final Set writableInterestRegistrationListeners = new CopyOnWriteArraySet();
+
+  /**
+   * The {@code InterestRegistrationListener} instances registered in this VM. This is used to
+   * provide a read-only {@code Set} of listeners.
+   */
+  private final Set readableInterestRegistrationListeners =
+      Collections.unmodifiableSet(this.writableInterestRegistrationListeners);
+
+  /**
+   * System property name for indicating how much frequently the "Queue full" message should be
+   * logged.
+   */
+  private static final String MAX_QUEUE_LOG_FREQUENCY =
+      DistributionConfig.GEMFIRE_PREFIX + "logFrequency.clientQueueReachedMaxLimit";
+
+  public static final long DEFAULT_LOG_FREQUENCY = 1000;
+
+  private static final String EVENT_ENQUEUE_WAIT_TIME_NAME =
+      DistributionConfig.GEMFIRE_PREFIX + "subscription.EVENT_ENQUEUE_WAIT_TIME";
+
+  private static final int DEFAULT_EVENT_ENQUEUE_WAIT_TIME = 100;
+
+  /**
+   * System property value denoting the time in milliseconds. Any thread putting an event into a
+   * subscription queue, which is full, will wait this much time for the queue to make space. It'll
+   * then enque the event possibly causing the queue to grow beyond its capacity/max-size. See
+   * #51400.
+   */
+  public static int eventEnqueueWaitTime; // TODO: encapsulate eventEnqueueWaitTime
+
+  /**
+   * The frequency of logging the "Queue full" message.
+   */
+  private long logFrequency = DEFAULT_LOG_FREQUENCY;
+
+  private final Map<String, DefaultQuery> compiledQueries = new ConcurrentHashMap<>();
+
+  private volatile boolean isCompiledQueryCleanupThreadStarted = false;
+
+  private final Object lockIsCompiledQueryCleanupThreadStarted = new Object();
+
+  private SystemTimer.SystemTimerTask clientPingTask; // TODO: fix synchronization of clientPingTask
+
+  private final SocketCloser socketCloser;
+
+  private static final long CLIENT_PING_TASK_PERIOD =
+      Long.getLong(DistributionConfig.GEMFIRE_PREFIX + "serverToClientPingPeriod", 60000);
+
+  /**
+   * package-private to avoid synthetic accessor
+   */
+  static final long CLIENT_PING_TASK_COUNTER =
+      Long.getLong(DistributionConfig.GEMFIRE_PREFIX + "serverToClientPingCounter", 3);
+
+  private final Set blackListedClients = new CopyOnWriteArraySet();
+
+  /**
+   * Factory method to construct a CacheClientNotifier {@code CacheClientNotifier} instance.
    *
-   * @param cache The GemFire <code>InternalCache</code>
-   * @return A <code>CacheClientNotifier</code> instance
+   * @param cache The GemFire {@code InternalCache}
+   * @return A {@code CacheClientNotifier} instance
    */
   public static synchronized CacheClientNotifier getInstance(InternalCache cache,
       CacheServerStats acceptorStats, int maximumMessageCount, int messageTimeToLive,
       ConnectionListener listener, List overflowAttributesList, boolean isGatewayReceiver) {
     if (ccnSingleton == null) {
       ccnSingleton = new CacheClientNotifier(cache, acceptorStats, maximumMessageCount,
-          messageTimeToLive, listener, overflowAttributesList, isGatewayReceiver);
+          messageTimeToLive, listener, isGatewayReceiver);
     }
 
     if (!isGatewayReceiver && ccnSingleton.getHaContainer() == null) {
@@ -158,20 +286,72 @@ public class CacheClientNotifier {
   }
 
   /**
+   * @param cache The GemFire {@code InternalCache}
+   * @param listener a listener which should receive notifications abouts queues being added or
+   *        removed.
+   */
+  private CacheClientNotifier(InternalCache cache, CacheServerStats acceptorStats,
+      int maximumMessageCount, int messageTimeToLive, ConnectionListener listener,
+      boolean isGatewayReceiver) {
+    // Set the Cache
+    setCache(cache);
+    this.acceptorStats = acceptorStats;
+    // we only need one thread per client and wait 50ms for close
+    this.socketCloser = new SocketCloser(1, 50);
+
+    // Set the LogWriter
+    this.logWriter = (InternalLogWriter) cache.getLogger();
+
+    this.connectionListener = listener;
+
+    // Set the security LogWriter
+    this.securityLogWriter = (InternalLogWriter) cache.getSecurityLogger();
+
+    this.maximumMessageCount = maximumMessageCount;
+    this.messageTimeToLive = messageTimeToLive;
+
+    // Initialize the statistics
+    StatisticsFactory factory;
+    if (isGatewayReceiver) {
+      factory = new DummyStatisticsFactory();
+    } else {
+      factory = getCache().getDistributedSystem();
+    }
+    this.statistics = new CacheClientNotifierStats(factory);
+
+    try {
+      this.logFrequency = Long.valueOf(System.getProperty(MAX_QUEUE_LOG_FREQUENCY));
+      if (this.logFrequency <= 0) {
+        this.logFrequency = DEFAULT_LOG_FREQUENCY;
+      }
+    } catch (Exception e) {
+      this.logFrequency = DEFAULT_LOG_FREQUENCY;
+    }
+
+    eventEnqueueWaitTime =
+        Integer.getInteger(EVENT_ENQUEUE_WAIT_TIME_NAME, DEFAULT_EVENT_ENQUEUE_WAIT_TIME);
+    if (eventEnqueueWaitTime < 0) {
+      eventEnqueueWaitTime = DEFAULT_EVENT_ENQUEUE_WAIT_TIME;
+    }
+
+    // Schedule task to periodically ping clients.
+    scheduleClientPingTask();
+  }
+
+  /**
    * Writes a given message to the output stream
    *
-   * @param dos the <code>DataOutputStream</code> to use for writing the message
+   * @param dos the {@code DataOutputStream} to use for writing the message
    * @param type a byte representing the message type
-   * @param p_msg the message to be written; can be null
+   * @param message the message to be written; can be null
    */
-  private void writeMessage(DataOutputStream dos, byte type, String p_msg, Version clientVersion)
+  private void writeMessage(DataOutputStream dos, byte type, String message, Version clientVersion)
       throws IOException {
-    writeMessage(dos, type, p_msg, clientVersion, (byte) 0x00, 0);
+    writeMessage(dos, type, message, clientVersion, (byte) 0x00, 0);
   }
 
-  private void writeMessage(DataOutputStream dos, byte type, String p_msg, Version clientVersion,
+  private void writeMessage(DataOutputStream dos, byte type, String message, Version clientVersion,
       byte epType, int qSize) throws IOException {
-    String msg = p_msg;
 
     // write the message type
     dos.writeByte(type);
@@ -181,6 +361,7 @@ public class CacheClientNotifier {
     // dummy qSize
     dos.writeInt(qSize);
 
+    String msg = message;
     if (msg == null) {
       msg = "";
     }
@@ -188,10 +369,10 @@ public class CacheClientNotifier {
     if (clientVersion != null && clientVersion.compareTo(Version.GFE_61) >= 0) {
       // get all the instantiators.
       Instantiator[] instantiators = InternalInstantiator.getInstantiators();
-      HashMap instantiatorMap = new HashMap();
+      Map instantiatorMap = new HashMap();
       if (instantiators != null && instantiators.length > 0) {
         for (Instantiator instantiator : instantiators) {
-          ArrayList instantiatorAttributes = new ArrayList();
+          List<String> instantiatorAttributes = new ArrayList<>();
           instantiatorAttributes.add(instantiator.getClass().toString().substring(6));
           instantiatorAttributes.add(instantiator.getInstantiatedClass().toString().substring(6));
           instantiatorMap.put(instantiator.getId(), instantiatorAttributes);
@@ -201,15 +382,14 @@ public class CacheClientNotifier {
 
       // get all the dataserializers.
       DataSerializer[] dataSerializers = InternalDataSerializer.getSerializers();
-      HashMap<Integer, ArrayList<String>> dsToSupportedClasses =
-          new HashMap<Integer, ArrayList<String>>();
-      HashMap<Integer, String> dataSerializersMap = new HashMap<Integer, String>();
+      Map<Integer, List<String>> dsToSupportedClasses = new HashMap<>();
+      Map<Integer, String> dataSerializersMap = new HashMap<>();
       if (dataSerializers != null && dataSerializers.length > 0) {
         for (DataSerializer dataSerializer : dataSerializers) {
           dataSerializersMap.put(dataSerializer.getId(),
               dataSerializer.getClass().toString().substring(6));
           if (clientVersion.compareTo(Version.GFE_6516) >= 0) {
-            ArrayList<String> supportedClassNames = new ArrayList<String>();
+            List<String> supportedClassNames = new ArrayList<>();
             for (Class clazz : dataSerializer.getSupportedClasses()) {
               supportedClassNames.add(clazz.getName());
             }
@@ -228,7 +408,7 @@ public class CacheClientNotifier {
   /**
    * Writes an exception message to the socket
    *
-   * @param dos the <code>DataOutputStream</code> to use for writing the message
+   * @param dos the {@code DataOutputStream} to use for writing the message
    * @param type a byte representing the exception type
    * @param ex the exception to be written; should not be null
    */
@@ -245,7 +425,7 @@ public class CacheClientNotifier {
   public void registerClient(Socket socket, boolean isPrimary, long acceptorId,
       boolean notifyBySubscription) throws IOException {
     // Since no remote ports were specified in the message, wait for them.
-    long startTime = this._statistics.startTime();
+    long startTime = this.statistics.startTime();
     DataInputStream dis = new DataInputStream(socket.getInputStream());
     DataOutputStream dos = new DataOutputStream(socket.getOutputStream());
 
@@ -261,7 +441,7 @@ public class CacheClientNotifier {
       SocketAddress sa = socket.getRemoteSocketAddress();
       UnsupportedVersionException uve = e;
       if (sa != null) {
-        String sInfo = " Client: " + sa.toString() + ".";
+        String sInfo = " Client: " + sa + ".";
         uve = new UnsupportedVersionException(e.getMessage() + sInfo);
       }
       logger.warn(
@@ -272,8 +452,7 @@ public class CacheClientNotifier {
       return;
     }
 
-    // Read and ignore the reply code. This is used on the client to server
-    // handshake.
+    // Read and ignore the reply code. This is used on the client to server handshake.
     dis.readByte(); // replyCode
 
     if (Version.GFE_57.compareTo(clientVersion) <= 0) {
@@ -289,7 +468,7 @@ public class CacheClientNotifier {
     }
   }
 
-  protected void registerGFEClient(DataInputStream dis, DataOutputStream dos, Socket socket,
+  private void registerGFEClient(DataInputStream dis, DataOutputStream dos, Socket socket,
       boolean isPrimary, long startTime, Version clientVersion, long acceptorId,
       boolean notifyBySubscription) throws IOException {
     // Read the ports and throw them away. We no longer need them
@@ -299,9 +478,6 @@ public class CacheClientNotifier {
     }
     // Read the handshake identifier and convert it to a string member id
     ClientProxyMembershipID proxyID = null;
-    CacheClientProxy proxy;
-    AccessControl authzCallback = null;
-    byte clientConflation = HandShake.CONFLATION_DEFAULT;
     try {
       proxyID = ClientProxyMembershipID.readCanonicalized(dis);
       if (getBlacklistedClient().contains(proxyID)) {
@@ -309,13 +485,14 @@ public class CacheClientNotifier {
             new Exception("This client is blacklisted by server"), clientVersion);
         return;
       }
-      proxy = getClientProxy(proxyID);
+      CacheClientProxy proxy = getClientProxy(proxyID);
       DistributedMember member = proxyID.getDistributedMember();
 
-      DistributedSystem system = this.getCache().getDistributedSystem();
+      DistributedSystem system = getCache().getDistributedSystem();
       Properties sysProps = system.getProperties();
       String authenticator = sysProps.getProperty(SECURITY_CLIENT_AUTHENTICATOR);
 
+      byte clientConflation;
       if (clientVersion.compareTo(Version.GFE_603) >= 0) {
         byte[] overrides = HandShake.extractOverrides(new byte[] {(byte) dis.read()});
         clientConflation = overrides[0];
@@ -339,27 +516,23 @@ public class CacheClientNotifier {
 
       Properties credentials = HandShake.readCredentials(dis, dos, system);
       if (credentials != null && proxy != null) {
-        if (securityLogWriter.fineEnabled()) {
-          securityLogWriter
+        if (this.securityLogWriter.fineEnabled()) {
+          this.securityLogWriter
               .fine("CacheClientNotifier: verifying credentials for proxyID: " + proxyID);
         }
         Object subject = HandShake.verifyCredentials(authenticator, credentials,
             system.getSecurityProperties(), this.logWriter, this.securityLogWriter, member);
         if (subject instanceof Principal) {
           Principal principal = (Principal) subject;
-          if (securityLogWriter.fineEnabled()) {
-            securityLogWriter
+          if (this.securityLogWriter.fineEnabled()) {
+            this.securityLogWriter
                 .fine("CacheClientNotifier: successfully verified credentials for proxyID: "
                     + proxyID + " having principal: " + principal.getName());
           }
 
           String postAuthzFactoryName = sysProps.getProperty(SECURITY_CLIENT_ACCESSOR_PP);
-          if (postAuthzFactoryName != null && postAuthzFactoryName.length() > 0) {
-            if (principal == null) {
-              securityLogWriter.warning(
-                  LocalizedStrings.CacheClientNotifier_CACHECLIENTNOTIFIER_POST_PROCESS_AUTHORIZATION_CALLBACK_ENABLED_BUT_AUTHENTICATION_CALLBACK_0_RETURNED_WITH_NULL_CREDENTIALS_FOR_PROXYID_1,
-                  new Object[] {SECURITY_CLIENT_AUTHENTICATOR, proxyID});
-            }
+          AccessControl authzCallback = null;
+          if (postAuthzFactoryName != null && !postAuthzFactoryName.isEmpty()) {
             Method authzMethod = ClassLoadUtil.methodFromName(postAuthzFactoryName);
             authzCallback = (AccessControl) authzMethod.invoke(null, (Object[]) null);
             authzCallback.init(principal, member, this.getCache());
@@ -374,13 +547,13 @@ public class CacheClientNotifier {
           LocalizedStrings.CacheClientNotifier_CLIENTPROXYMEMBERSHIPID_OBJECT_COULD_NOT_BE_CREATED_EXCEPTION_OCCURRED_WAS_0
               .toLocalizedString(e));
     } catch (AuthenticationRequiredException ex) {
-      securityLogWriter.warning(
+      this.securityLogWriter.warning(
           LocalizedStrings.CacheClientNotifier_AN_EXCEPTION_WAS_THROWN_FOR_CLIENT_0_1,
           new Object[] {proxyID, ex});
       writeException(dos, HandShake.REPLY_EXCEPTION_AUTHENTICATION_REQUIRED, ex, clientVersion);
       return;
     } catch (AuthenticationFailedException ex) {
-      securityLogWriter.warning(
+      this.securityLogWriter.warning(
           LocalizedStrings.CacheClientNotifier_AN_EXCEPTION_WAS_THROWN_FOR_CLIENT_0_1,
           new Object[] {proxyID, ex});
       writeException(dos, HandShake.REPLY_EXCEPTION_AUTHENTICATION_FAILED, ex, clientVersion);
@@ -389,11 +562,10 @@ public class CacheClientNotifier {
       logger.warn(LocalizedMessage.create(
           LocalizedStrings.CacheClientNotifier_0_REGISTERCLIENT_EXCEPTION_ENCOUNTERED_IN_REGISTRATION_1,
           new Object[] {this, e}), e);
-      IOException io = new IOException(
+      throw new IOException(
           LocalizedStrings.CacheClientNotifier_EXCEPTION_OCCURRED_WHILE_TRYING_TO_REGISTER_INTEREST_DUE_TO_0
-              .toLocalizedString(e.getMessage()));
-      io.initCause(e);
-      throw io;
+              .toLocalizedString(e.getMessage()),
+          e);
     } catch (Exception ex) {
       logger.warn(LocalizedMessage.create(
           LocalizedStrings.CacheClientNotifier_AN_EXCEPTION_WAS_THROWN_FOR_CLIENT_0_1,
@@ -402,7 +574,7 @@ public class CacheClientNotifier {
       return;
     }
 
-    this._statistics.endClientRegistration(startTime);
+    this.statistics.endClientRegistration(startTime);
   }
 
   /**
@@ -410,14 +582,13 @@ public class CacheClientNotifier {
    *
    * @param socket The socket over which the server communicates with the client.
    * @param proxyId The distributed member id of the client being registered
-   * @param proxy The <code>CacheClientProxy</code> of the given <code>proxyId</code>
+   * @param proxy The {@code CacheClientProxy} of the given {@code proxyId}
    *
    * @return CacheClientProxy for the registered client
    */
   private CacheClientProxy registerClient(Socket socket, ClientProxyMembershipID proxyId,
       CacheClientProxy proxy, boolean isPrimary, byte clientConflation, Version clientVersion,
       long acceptorId, boolean notifyBySubscription) throws IOException, CacheException {
-    CacheClientProxy l_proxy = proxy;
 
     // Initialize the socket
     socket.setTcpNoDelay(true);
@@ -431,9 +602,6 @@ public class CacheClientNotifier {
     }
 
     // Determine whether the client is durable or not.
-    byte responseByte = Acceptor.SUCCESSFUL_SERVER_TO_CLIENT;
-    String unsuccessfulMsg = null;
-    boolean successful = true;
     boolean clientIsDurable = proxyId.isDurable();
     if (logger.isDebugEnabled()) {
       if (clientIsDurable) {
@@ -446,8 +614,11 @@ public class CacheClientNotifier {
 
     byte epType = 0x00;
     int qSize = 0;
+    byte responseByte = Acceptor.SUCCESSFUL_SERVER_TO_CLIENT;
+    String unsuccessfulMsg = null;
+    boolean successful = true;
     if (clientIsDurable) {
-      if (l_proxy == null) {
+      if (proxy == null) {
         if (isTimedOut(proxyId)) {
           qSize = PoolImpl.PRIMARY_QUEUE_TIMED_OUT;
         } else {
@@ -459,9 +630,9 @@ public class CacheClientNotifier {
               "CacheClientNotifier: No proxy exists for durable client with id {}. It must be created.",
               proxyId.getDurableId());
         }
-        l_proxy = new CacheClientProxy(this, socket, proxyId, isPrimary, clientConflation,
+        proxy = new CacheClientProxy(this, socket, proxyId, isPrimary, clientConflation,
             clientVersion, acceptorId, notifyBySubscription);
-        successful = this.initializeProxy(l_proxy);
+        successful = this.initializeProxy(proxy);
       } else {
         if (proxy.isPrimary()) {
           epType = (byte) 2;
@@ -470,27 +641,27 @@ public class CacheClientNotifier {
         }
         qSize = proxy.getQueueSize();
         // A proxy exists for this durable client. It must be reinitialized.
-        if (l_proxy.isPaused()) {
+        if (proxy.isPaused()) {
           if (CacheClientProxy.testHook != null) {
             CacheClientProxy.testHook.doTestHook("CLIENT_PRE_RECONNECT");
           }
-          if (l_proxy.lockDrain()) {
+          if (proxy.lockDrain()) {
             try {
               if (logger.isDebugEnabled()) {
                 logger.debug(
                     "CacheClientNotifier: A proxy exists for durable client with id {}. This proxy will be reinitialized: {}",
-                    proxyId.getDurableId(), l_proxy);
+                    proxyId.getDurableId(), proxy);
               }
-              this._statistics.incDurableReconnectionCount();
-              l_proxy.getProxyID().updateDurableTimeout(proxyId.getDurableTimeout());
-              l_proxy.reinitialize(socket, proxyId, this.getCache(), isPrimary, clientConflation,
+              this.statistics.incDurableReconnectionCount();
+              proxy.getProxyID().updateDurableTimeout(proxyId.getDurableTimeout());
+              proxy.reinitialize(socket, proxyId, this.getCache(), isPrimary, clientConflation,
                   clientVersion);
-              l_proxy.setMarkerEnqueued(true);
+              proxy.setMarkerEnqueued(true);
               if (CacheClientProxy.testHook != null) {
                 CacheClientProxy.testHook.doTestHook("CLIENT_RECONNECTED");
               }
             } finally {
-              l_proxy.unlockDrain();
+              proxy.unlockDrain();
             }
           } else {
             unsuccessfulMsg =
@@ -507,7 +678,7 @@ public class CacheClientNotifier {
           // client is already using this durable id.
           unsuccessfulMsg =
               LocalizedStrings.CacheClientNotifier_CACHECLIENTNOTIFIER_THE_REQUESTED_DURABLE_CLIENT_HAS_THE_SAME_IDENTIFIER__0__AS_AN_EXISTING_DURABLE_CLIENT__1__DUPLICATE_DURABLE_CLIENTS_ARE_NOT_ALLOWED
-                  .toLocalizedString(new Object[] {proxyId.getDurableId(), proxy});
+                  .toLocalizedString(proxyId.getDurableId(), proxy);
           logger.warn(unsuccessfulMsg);
           // Set the unsuccessful response byte.
           responseByte = HandShake.REPLY_EXCEPTION_DUPLICATE_DURABLE_CLIENT;
@@ -537,18 +708,18 @@ public class CacheClientNotifier {
 
       if (toCreateNewProxy) {
         // Create the new proxy for this non-durable client
-        l_proxy = new CacheClientProxy(this, socket, proxyId, isPrimary, clientConflation,
+        proxy = new CacheClientProxy(this, socket, proxyId, isPrimary, clientConflation,
             clientVersion, acceptorId, notifyBySubscription);
-        successful = this.initializeProxy(l_proxy);
+        successful = this.initializeProxy(proxy);
       }
     }
 
     if (!successful) {
-      l_proxy = null;
+      proxy = null;
       responseByte = HandShake.REPLY_REFUSED;
       unsuccessfulMsg =
           LocalizedStrings.CacheClientNotifier_CACHECLIENTNOTIFIER_A_PREVIOUS_CONNECTION_ATTEMPT_FROM_THIS_CLIENT_IS_STILL_BEING_PROCESSED__0
-              .toLocalizedString(new Object[] {proxyId});
+              .toLocalizedString(proxyId);
     }
 
     // Tell the client that the proxy has been registered using the response
@@ -562,10 +733,10 @@ public class CacheClientNotifier {
       // write the message type, message length and the error message (if any)
       writeMessage(dos, responseByte, unsuccessfulMsg, clientVersion, epType, qSize);
     } catch (IOException ioe) {// remove the added proxy if we get IOException.
-      if (l_proxy != null) {
-        boolean keepProxy = l_proxy.close(false, false); // do not check for queue, just close it
+      if (proxy != null) {
+        boolean keepProxy = proxy.close(false, false); // do not check for queue, just close it
         if (!keepProxy) {
-          removeClientProxy(l_proxy);
+          removeClientProxy(proxy);
         }
       }
       throw ioe;
@@ -580,41 +751,39 @@ public class CacheClientNotifier {
     // will ensure that the response byte is sent to the client before
     // the marker message. If the client is durable, the message processor
     // is not started until the clientReady message is received.
-    if (!clientIsDurable && l_proxy != null
-        && responseByte == Acceptor.SUCCESSFUL_SERVER_TO_CLIENT) {
+    if (!clientIsDurable && proxy != null && responseByte == Acceptor.SUCCESSFUL_SERVER_TO_CLIENT) {
       // The startOrResumeMessageDispatcher tests if the proxy is a primary.
       // If this is a secondary proxy, the dispatcher is not started.
       // The false parameter signifies that a marker message has not already been
       // processed. This will generate and send one.
-      l_proxy.startOrResumeMessageDispatcher(false);
+      proxy.startOrResumeMessageDispatcher(false);
     }
 
     if (responseByte == Acceptor.SUCCESSFUL_SERVER_TO_CLIENT) {
       if (logger.isDebugEnabled()) {
-        logger.debug("CacheClientNotifier: Successfully registered {}", l_proxy);
+        logger.debug("CacheClientNotifier: Successfully registered {}", proxy);
       }
     } else {
       logger.warn(LocalizedMessage.create(
           LocalizedStrings.CacheClientNotifier_CACHECLIENTNOTIFIER_UNSUCCESSFULLY_REGISTERED_CLIENT_WITH_IDENTIFIER__0,
           proxyId));
     }
-    return l_proxy;
+    return proxy;
   }
 
-  private boolean initializeProxy(CacheClientProxy l_proxy) throws IOException, CacheException {
-    boolean status = false;
-    if (!this.isProxyInInitializationMode(l_proxy)) {
+  private boolean initializeProxy(CacheClientProxy proxy) throws CacheException {
+    if (!this.isProxyInInitializationMode(proxy)) {
       if (logger.isDebugEnabled()) {
-        logger.debug("Initializing proxy: {}", l_proxy);
+        logger.debug("Initializing proxy: {}", proxy);
       }
       try {
         // Add client proxy to initialization list. This has to be done before
         // the queue is created so that events can be buffered here for delivery
         // to the queue once it's initialized (bug #41681 and others)
-        addClientInitProxy(l_proxy);
-        l_proxy.initializeMessageDispatcher();
+        addClientInitProxy(proxy);
+        proxy.initializeMessageDispatcher();
         // Initialization success. Add to client proxy list.
-        addClientProxy(l_proxy);
+        addClientProxy(proxy);
         return true;
       } catch (RegionExistsException ree) {
         if (logger.isDebugEnabled()) {
@@ -624,10 +793,10 @@ public class CacheClientNotifier {
         }
         // This will return false;
       } finally {
-        removeClientInitProxy(l_proxy);
+        removeClientInitProxy(proxy);
       }
     }
-    return status;
+    return false;
   }
 
   /**
@@ -670,9 +839,9 @@ public class CacheClientNotifier {
     boolean success = false;
     CacheClientProxy proxy = getClientProxy(proxyId);
     if (proxy != null) {
-      HARegionQueue harq = proxy.getHARegionQueue();
-      harq.addDispatchedMessage(new ThreadIdentifier(eid.getMembershipID(), eid.getThreadID()),
-          eid.getSequenceID());
+      HARegionQueue haRegionQueue = proxy.getHARegionQueue();
+      haRegionQueue.addDispatchedMessage(
+          new ThreadIdentifier(eid.getMembershipID(), eid.getThreadID()), eid.getSequenceID());
       success = true;
     }
     return success;
@@ -690,11 +859,6 @@ public class CacheClientNotifier {
     }
     CacheClientProxy proxy = getClientProxy(membershipID);
     if (proxy != null) {
-      // Close the port if the proxy represents the client and contains the
-      // port)
-      // // If so, remove the port from the client's remote ports
-      // proxy.removePort(clientPort);
-      // Set the keepalive flag
       proxy.setKeepAlive(keepAlive);
     }
   }
@@ -704,7 +868,7 @@ public class CacheClientNotifier {
    *
    * @param memberId Uniquely identifies the client
    */
-  public void unregisterClient(ClientProxyMembershipID memberId, boolean normalShutdown) {
+  void unregisterClient(ClientProxyMembershipID memberId, boolean normalShutdown) {
     if (logger.isDebugEnabled()) {
       logger.debug("CacheClientNotifier: Unregistering all clients with member id: {}", memberId);
     }
@@ -769,14 +933,16 @@ public class CacheClientNotifier {
    * notify interested clients of the given cache event using the given update message. The event
    * should have routing information in it that determines which clients will receive the event.
    */
-  public static void notifyClients(InternalCacheEvent event, ClientUpdateMessage cmsg) {
+  public static void notifyClients(InternalCacheEvent event,
+      ClientUpdateMessage clientUpdateMessage) {
     CacheClientNotifier instance = ccnSingleton;
     if (instance != null) {
-      instance.singletonNotifyClients(event, cmsg);
+      instance.singletonNotifyClients(event, clientUpdateMessage);
     }
   }
 
-  private void singletonNotifyClients(InternalCacheEvent event, ClientUpdateMessage cmsg) {
+  private void singletonNotifyClients(InternalCacheEvent event,
+      ClientUpdateMessage clientUpdateMessage) {
     final boolean isDebugEnabled = logger.isDebugEnabled();
     final boolean isTraceEnabled = logger.isTraceEnabled();
 
@@ -796,20 +962,20 @@ public class CacheClientNotifier {
       return;
     }
 
-    long startTime = this._statistics.startTime();
+    long startTime = this.statistics.startTime();
 
     ClientUpdateMessageImpl clientMessage;
-    if (cmsg == null) {
+    if (clientUpdateMessage == null) {
       clientMessage = constructClientMessage(event);
     } else {
-      clientMessage = (ClientUpdateMessageImpl) cmsg;
+      clientMessage = (ClientUpdateMessageImpl) clientUpdateMessage;
     }
     if (clientMessage == null) {
       return;
     }
 
     // Holds the clientIds to which filter message needs to be sent.
-    Set<ClientProxyMembershipID> filterClients = new HashSet();
+    Set<ClientProxyMembershipID> filterClients = new HashSet<>();
 
     // Add CQ info.
     if (filterInfo.getCQs() != null) {
@@ -868,7 +1034,7 @@ public class CacheClientNotifier {
       }
     }
 
-    Conflatable conflatable = null;
+    Conflatable conflatable;
 
     if (clientMessage instanceof ClientTombstoneMessage) {
       // bug #46832 - HAEventWrapper deserialization can't handle subclasses
@@ -893,7 +1059,7 @@ public class CacheClientNotifier {
 
     singletonRouteClientMessage(conflatable, filterClients);
 
-    this._statistics.endEvent(startTime);
+    this.statistics.endEvent(startTime);
 
     // Cleanup destroyed events in CQ result cache.
     // While maintaining the CQ results key caching. the destroy event
@@ -915,7 +1081,7 @@ public class CacheClientNotifier {
         String cqName = regionProfile.getRealCqID(cqID);
         if (cqName != null) {
           ServerCQ cq = regionProfile.getCq(cqName);
-          if (cq != null && e.getValue().equals(Integer.valueOf(MessageType.LOCAL_DESTROY))) {
+          if (cq != null && e.getValue().equals(MessageType.LOCAL_DESTROY)) {
             cq.removeFromCqResultKeys(entryEvent.getKey(), true);
           }
         }
@@ -930,28 +1096,15 @@ public class CacheClientNotifier {
   public static void routeClientMessage(Conflatable clientMessage) {
     CacheClientNotifier instance = ccnSingleton;
     if (instance != null) {
-      instance.singletonRouteClientMessage(clientMessage, instance._clientProxies.keySet()); // ok
-                                                                                             // to
-                                                                                             // use
-                                                                                             // keySet
-                                                                                             // here
-                                                                                             // because
-                                                                                             // all
-                                                                                             // we
-                                                                                             // do
-                                                                                             // is
-                                                                                             // call
-                                                                                             // getClientProxy
-                                                                                             // with
-                                                                                             // these
-                                                                                             // keys
+      // ok to use keySet here because all we do is call getClientProxy with these keys
+      instance.singletonRouteClientMessage(clientMessage, instance.clientProxies.keySet());
     }
   }
 
   /**
    * this is for server side registration of client queue
    */
-  public static void routeSingleClientMessage(ClientUpdateMessage clientMessage,
+  static void routeSingleClientMessage(ClientUpdateMessage clientMessage,
       ClientProxyMembershipID clientProxyMembershipId) {
     CacheClientNotifier instance = ccnSingleton;
     if (instance != null) {
@@ -963,27 +1116,25 @@ public class CacheClientNotifier {
   private void singletonRouteClientMessage(Conflatable conflatable,
       Collection<ClientProxyMembershipID> filterClients) {
 
-    this._cache.getCancelCriterion().checkCancelInProgress(null); // bug #43942 - client notified
-                                                                  // but no p2p distribution
+    this.cache.getCancelCriterion().checkCancelInProgress(null);
 
     List<CacheClientProxy> deadProxies = null;
     for (ClientProxyMembershipID clientId : filterClients) {
-      CacheClientProxy proxy;
-      proxy = this.getClientProxy(clientId, true);
+      CacheClientProxy proxy = this.getClientProxy(clientId, true);
       if (proxy != null) {
         if (proxy.isAlive() || proxy.isPaused() || proxy.isConnected() || proxy.isDurable()) {
           proxy.deliverMessage(conflatable);
         } else {
           proxy.getStatistics().incMessagesFailedQueued();
           if (deadProxies == null) {
-            deadProxies = new ArrayList<CacheClientProxy>();
+            deadProxies = new ArrayList<>();
           }
           deadProxies.add(proxy);
         }
-        this.blackListSlowReciever(proxy);
+        this.blackListSlowReceiver(proxy);
       }
     }
-    checkAndRemoveFromClientMsgsRegion(conflatable);
+    checkAndRemoveFromClientMessagesRegion(conflatable);
     // Remove any dead clients from the clients to notify
     if (deadProxies != null) {
       closeDeadProxies(deadProxies, false);
@@ -994,7 +1145,7 @@ public class CacheClientNotifier {
    * processes the given collection of durable and non-durable client identifiers, returning a
    * collection of non-durable identifiers of clients connected to this VM
    */
-  public Set<ClientProxyMembershipID> getProxyIDs(Set mixedDurableAndNonDurableIDs) {
+  Set<ClientProxyMembershipID> getProxyIDs(Set mixedDurableAndNonDurableIDs) {
     return getProxyIDs(mixedDurableAndNonDurableIDs, false);
   }
 
@@ -1003,52 +1154,44 @@ public class CacheClientNotifier {
    * collection of non-durable identifiers of clients connected to this VM. This version can check
    * for proxies in initialization as well as fully initialized proxies.
    */
-  public Set<ClientProxyMembershipID> getProxyIDs(Set mixedDurableAndNonDurableIDs,
+  private Set<ClientProxyMembershipID> getProxyIDs(Set mixedDurableAndNonDurableIDs,
       boolean proxyInInitMode) {
-    Set<ClientProxyMembershipID> result = new HashSet();
+    Set<ClientProxyMembershipID> result = new HashSet<>();
     for (Object id : mixedDurableAndNonDurableIDs) {
       if (id instanceof String) {
         CacheClientProxy clientProxy = getClientProxy((String) id, true);
         if (clientProxy != null) {
           result.add(clientProxy.getProxyID());
         }
-        // else { we don't have a proxy for the given durable ID }
       } else {
         // try to canonicalize the ID.
         CacheClientProxy proxy = getClientProxy((ClientProxyMembershipID) id, true);
         if (proxy != null) {
-          // this._logger.info(LocalizedStrings.DEBUG, "BRUCE: found match for " + id + ": " +
-          // proxy.getProxyID());
           result.add(proxy.getProxyID());
-        } else {
-          // this._logger.info(LocalizedStrings.DEBUG, "BRUCE: did not find match for " + id);
-          // this was causing OOMEs in HARegion initial image processing because
-          // messages had routing for clients unknown to this server
-          // result.add((ClientProxyMembershipID)id);
         }
       }
     }
     return result;
   }
 
-  private void blackListSlowReciever(CacheClientProxy clientProxy) {
+  private void blackListSlowReceiver(CacheClientProxy clientProxy) {
     final CacheClientProxy proxy = clientProxy;
-    if ((proxy.getHARegionQueue() != null && proxy.getHARegionQueue().isClientSlowReciever())
-        && !blackListedClients.contains(proxy.getProxyID())) {
+    if (proxy.getHARegionQueue() != null && proxy.getHARegionQueue().isClientSlowReciever()
+        && !this.blackListedClients.contains(proxy.getProxyID())) {
       // log alert with client info.
       logger.warn(
           LocalizedMessage.create(LocalizedStrings.CacheClientNotifier_CLIENT_0_IS_A_SLOW_RECEIVER,
               new Object[] {proxy.getProxyID()}));
       addToBlacklistedClient(proxy.getProxyID());
-      InternalDistributedSystem ids =
-          (InternalDistributedSystem) this.getCache().getDistributedSystem();
-      final DM dm = ids.getDistributionManager();
+      InternalDistributedSystem system = getCache().getInternalDistributedSystem();
+      final DM dm = system.getDistributionManager();
+
       dm.getWaitingThreadPool().execute(new Runnable() {
+        @Override
         public void run() {
 
           CacheDistributionAdvisor advisor =
-              ((DistributedRegion) proxy.getHARegionQueue().getRegion())
-                  .getCacheDistributionAdvisor();
+              proxy.getHARegionQueue().getRegion().getCacheDistributionAdvisor();
           Set members = advisor.adviseCacheOp();
 
           // Send client blacklist message
@@ -1074,25 +1217,24 @@ public class CacheClientNotifier {
   }
 
   /**
-   * Initializes a <code>ClientUpdateMessage</code> from an operation and event
+   * Initializes a {@code ClientUpdateMessage} from an operation and event
    *
    * @param operation The operation that occurred (e.g. AFTER_CREATE)
    * @param event The event containing the data to be updated
-   * @return a <code>ClientUpdateMessage</code>
+   * @return a {@code ClientUpdateMessage}
    */
-  private ClientUpdateMessageImpl initializeMessage(EnumListenerEvent operation, CacheEvent event)
-      throws Exception {
+  private ClientUpdateMessageImpl initializeMessage(EnumListenerEvent operation, CacheEvent event) {
     if (!supportsOperation(operation)) {
-      throw new Exception(
+      throw new UnsupportedOperationException(
           LocalizedStrings.CacheClientNotifier_THE_CACHE_CLIENT_NOTIFIER_DOES_NOT_SUPPORT_OPERATIONS_OF_TYPE_0
               .toLocalizedString(operation));
     }
-    // String regionName = event.getRegion().getFullPath();
+
     Object keyOfInterest = null;
     final EventID eventIdentifier;
     ClientProxyMembershipID membershipID = null;
     boolean isNetLoad = false;
-    Object callbackArgument = null;
+    Object callbackArgument;
     byte[] delta = null;
     VersionTag versionTag = null;
 
@@ -1129,19 +1271,19 @@ public class CacheClientNotifier {
     }
 
     if (isNetLoad) {
-      clientUpdateMsg.setIsNetLoad(isNetLoad);
+      clientUpdateMsg.setIsNetLoad(true);
     }
 
     return clientUpdateMsg;
   }
 
   /**
-   * Returns whether the <code>CacheClientNotifier</code> supports the input operation.
+   * Returns whether the {@code CacheClientNotifier} supports the input operation.
    *
    * @param operation The operation that occurred (e.g. AFTER_CREATE)
-   * @return whether the <code>CacheClientNotifier</code> supports the input operation
+   * @return whether the {@code CacheClientNotifier} supports the input operation
    */
-  protected boolean supportsOperation(EnumListenerEvent operation) {
+  private boolean supportsOperation(EnumListenerEvent operation) {
     return operation == EnumListenerEvent.AFTER_CREATE
         || operation == EnumListenerEvent.AFTER_UPDATE
         || operation == EnumListenerEvent.AFTER_DESTROY
@@ -1211,7 +1353,7 @@ public class CacheClientNotifier {
       int regionDataPolicy) {
     if (regionDataPolicy == 0) {
       if (!regionsWithEmptyDataPolicy.containsKey(regionName)) {
-        regionsWithEmptyDataPolicy.put(regionName, Integer.valueOf(0));
+        regionsWithEmptyDataPolicy.put(regionName, 0);
       }
     }
   }
@@ -1222,8 +1364,8 @@ public class CacheClientNotifier {
    * @param regionName The name of the region of interest
    * @param keyOfInterest The name of the key of interest
    * @param isClosing Whether the caller is closing
-   * @param membershipID The <code>ClientProxyMembershipID</code> of the client no longer interested
-   *        in this <code>Region</code> and key
+   * @param membershipID The {@code ClientProxyMembershipID} of the client no longer interested in
+   *        this {@code Region} and key
    */
   public void unregisterClientInterest(String regionName, Object keyOfInterest, int interestType,
       boolean isClosing, ClientProxyMembershipID membershipID, boolean keepalive) {
@@ -1244,8 +1386,8 @@ public class CacheClientNotifier {
    *
    * @param regionName The name of the region of interest
    * @param keysOfInterest The list of keys of interest
-   * @param membershipID The <code>ClientProxyMembershipID</code> of the client no longer interested
-   *        in this <code>Region</code> and key
+   * @param membershipID The {@code ClientProxyMembershipID} of the client no longer interested in
+   *        this {@code Region} and key
    */
   public void registerClientInterest(String regionName, List keysOfInterest,
       ClientProxyMembershipID membershipID, boolean isDurable, boolean sendUpdatesAsInvalidates,
@@ -1278,8 +1420,8 @@ public class CacheClientNotifier {
    * @param regionName The name of the region of interest
    * @param keysOfInterest The list of keys of interest
    * @param isClosing Whether the caller is closing
-   * @param membershipID The <code>ClientProxyMembershipID</code> of the client no longer interested
-   *        in this <code>Region</code> and key
+   * @param membershipID The {@code ClientProxyMembershipID} of the client no longer interested in
+   *        this {@code Region} and key
    */
   public void unregisterClientInterest(String regionName, List keysOfInterest, boolean isClosing,
       ClientProxyMembershipID membershipID, boolean keepalive) {
@@ -1301,21 +1443,22 @@ public class CacheClientNotifier {
    * 
    * @since GemFire 5.7
    */
-  private void checkAndRemoveFromClientMsgsRegion(Conflatable conflatable) {
-    if (haContainer == null) {
+  private void checkAndRemoveFromClientMessagesRegion(Conflatable conflatable) {
+    if (this.haContainer == null) {
       return;
     }
+
     if (conflatable instanceof HAEventWrapper) {
       HAEventWrapper wrapper = (HAEventWrapper) conflatable;
       if (!wrapper.getIsRefFromHAContainer()) {
-        wrapper = (HAEventWrapper) haContainer.getKey(wrapper);
+        wrapper = (HAEventWrapper) this.haContainer.getKey(wrapper);
         if (wrapper != null && !wrapper.getPutInProgress()) {
           synchronized (wrapper) {
             if (wrapper.getReferenceCount() == 0L) {
               if (logger.isDebugEnabled()) {
                 logger.debug("Removing event from haContainer: {}", wrapper);
               }
-              haContainer.remove(wrapper);
+              this.haContainer.remove(wrapper);
             }
           }
         }
@@ -1328,7 +1471,7 @@ public class CacheClientNotifier {
             if (logger.isDebugEnabled()) {
               logger.debug("Removing event from haContainer: {}", wrapper);
             }
-            haContainer.remove(wrapper);
+            this.haContainer.remove(wrapper);
           }
         }
       }
@@ -1336,12 +1479,12 @@ public class CacheClientNotifier {
   }
 
   /**
-   * Returns the <code>CacheClientProxy</code> associated to the membershipID *
+   * Returns the {@code CacheClientProxy} associated to the membershipID *
    *
-   * @return the <code>CacheClientProxy</code> associated to the membershipID
+   * @return the {@code CacheClientProxy} associated to the membershipID
    */
   public CacheClientProxy getClientProxy(ClientProxyMembershipID membershipID) {
-    return (CacheClientProxy) this._clientProxies.get(membershipID);
+    return (CacheClientProxy) this.clientProxies.get(membershipID);
   }
 
   /**
@@ -1352,25 +1495,25 @@ public class CacheClientNotifier {
       boolean proxyInInitMode) {
     CacheClientProxy proxy = getClientProxy(membershipID);
     if (proxyInInitMode && proxy == null) {
-      proxy = (CacheClientProxy) this._initClientProxies.get(membershipID);
+      proxy = (CacheClientProxy) this.initClientProxies.get(membershipID);
     }
     return proxy;
   }
 
   /**
-   * Returns the <code>CacheClientProxy</code> associated to the durableClientId
+   * Returns the {@code CacheClientProxy} associated to the durableClientId
    * 
-   * @return the <code>CacheClientProxy</code> associated to the durableClientId
+   * @return the {@code CacheClientProxy} associated to the durableClientId
    */
   public CacheClientProxy getClientProxy(String durableClientId) {
     return getClientProxy(durableClientId, false);
   }
 
   /**
-   * Returns the <code>CacheClientProxy</code> associated to the durableClientId. This version of
-   * the method can check for initializing proxies as well as fully initialized proxies.
+   * Returns the {@code CacheClientProxy} associated to the durableClientId. This version of the
+   * method can check for initializing proxies as well as fully initialized proxies.
    * 
-   * @return the <code>CacheClientProxy</code> associated to the durableClientId
+   * @return the {@code CacheClientProxy} associated to the durableClientId
    */
   public CacheClientProxy getClientProxy(String durableClientId, boolean proxyInInitMode) {
     final boolean isDebugEnabled = logger.isDebugEnabled();
@@ -1379,9 +1522,9 @@ public class CacheClientNotifier {
     if (isDebugEnabled) {
       logger.debug("CacheClientNotifier: Determining client for {}", durableClientId);
     }
+
     CacheClientProxy proxy = null;
-    for (Iterator i = getClientProxies().iterator(); i.hasNext();) {
-      CacheClientProxy clientProxy = (CacheClientProxy) i.next();
+    for (CacheClientProxy clientProxy : getClientProxies()) {
       if (isTraceEnabled) {
         logger.trace("CacheClientNotifier: Checking client {}", clientProxy);
       }
@@ -1394,9 +1537,10 @@ public class CacheClientNotifier {
         break;
       }
     }
+
     if (proxy == null && proxyInInitMode) {
-      for (Iterator i = this._initClientProxies.values().iterator(); i.hasNext();) {
-        CacheClientProxy clientProxy = (CacheClientProxy) i.next();
+      for (Object clientProxyObject : this.initClientProxies.values()) {
+        CacheClientProxy clientProxy = (CacheClientProxy) clientProxyObject;
         if (isTraceEnabled) {
           logger.trace("CacheClientNotifier: Checking initializing client {}", clientProxy);
         }
@@ -1415,37 +1559,6 @@ public class CacheClientNotifier {
   }
 
   /**
-   * Returns the <code>CacheClientProxySameDS</code> associated to the membershipID *
-   * 
-   * @return the <code>CacheClientProxy</code> associated to the same distributed system
-   */
-  public CacheClientProxy getClientProxySameDS(ClientProxyMembershipID membershipID) {
-    final boolean isDebugEnabled = logger.isDebugEnabled();
-    if (isDebugEnabled) {
-      logger.debug("{}::getClientProxySameDS(), Determining client for host {}", this,
-          membershipID);
-      logger.debug("{}::getClientProxySameDS(), Number of proxies in the Cache Clinet Notifier: {}",
-          this, getClientProxies().size());
-    }
-    CacheClientProxy proxy = null;
-    for (Iterator i = getClientProxies().iterator(); i.hasNext();) {
-      CacheClientProxy clientProxy = (CacheClientProxy) i.next();
-      if (isDebugEnabled) {
-        logger.debug("CacheClientNotifier: Checking client {}", clientProxy);
-      }
-      if (clientProxy.isSameDSMember(membershipID)) {
-        proxy = clientProxy;
-        if (isDebugEnabled) {
-          logger.debug("CacheClientNotifier: {} represents the client running on host {}", proxy,
-              membershipID);
-        }
-        break;
-      }
-    }
-    return proxy;
-  }
-
-  /**
    * It will remove the clients connected to the passed acceptorId. If its the only server, shuts
    * down this instance.
    */
@@ -1453,10 +1566,10 @@ public class CacheClientNotifier {
     final boolean isDebugEnabled = logger.isDebugEnabled();
     if (isDebugEnabled) {
       logger.debug("At cache server shutdown time, the number of cache servers in the cache is {}",
-          this.getCache().getCacheServers().size());
+          getCache().getCacheServers().size());
     }
 
-    Iterator it = this._clientProxies.values().iterator();
+    Iterator it = this.clientProxies.values().iterator();
     // Close all the client proxies
     while (it.hasNext()) {
       CacheClientProxy proxy = (CacheClientProxy) it.next();
@@ -1478,27 +1591,27 @@ public class CacheClientNotifier {
 
     if (noActiveServer() && ccnSingleton != null) {
       ccnSingleton = null;
-      if (haContainer != null) {
-        haContainer.cleanUp();
+      if (this.haContainer != null) {
+        this.haContainer.cleanUp();
         if (isDebugEnabled) {
-          logger.debug("haContainer ({}) is now cleaned up.", haContainer.getName());
+          logger.debug("haContainer ({}) is now cleaned up.", this.haContainer.getName());
         }
       }
       this.clearCompiledQueries();
-      blackListedClients.clear();
+      this.blackListedClients.clear();
 
       // cancel the ping task
       this.clientPingTask.cancel();
 
       // Close the statistics
-      this._statistics.close();
+      this.statistics.close();
 
       this.socketCloser.close();
     }
   }
 
   private boolean noActiveServer() {
-    for (CacheServer server : this.getCache().getCacheServers()) {
+    for (CacheServer server : getCache().getCacheServers()) {
       if (server.isRunning()) {
         return false;
       }
@@ -1507,41 +1620,40 @@ public class CacheClientNotifier {
   }
 
   /**
-   * Adds a new <code>CacheClientProxy</code> to the list of known client proxies
+   * Adds a new {@code CacheClientProxy} to the list of known client proxies
    *
-   * @param proxy The <code>CacheClientProxy</code> to add
+   * @param proxy The {@code CacheClientProxy} to add
    */
-  protected void addClientProxy(CacheClientProxy proxy) throws IOException {
-    // this._logger.info(LocalizedStrings.DEBUG, "adding client proxy " + proxy);
+  void addClientProxy(CacheClientProxy proxy) {
     getCache(); // ensure cache reference is up to date so firstclient state is correct
-    this._clientProxies.put(proxy.getProxyID(), proxy);
+    this.clientProxies.put(proxy.getProxyID(), proxy);
     // Remove this proxy from the init proxy list.
     removeClientInitProxy(proxy);
-    this._connectionListener.queueAdded(proxy.getProxyID());
-    if (!(proxy.clientConflation == HandShake.CONFLATION_ON)) {
+    this.connectionListener.queueAdded(proxy.getProxyID());
+    if (proxy.clientConflation != HandShake.CONFLATION_ON) {
       // Delta not supported with conflation ON
-      ClientHealthMonitor chm = ClientHealthMonitor.getInstance();
+      ClientHealthMonitor clientHealthMonitor = ClientHealthMonitor.getInstance();
       /*
        * #41788 - If the client connection init starts while cache/member is shutting down,
        * ClientHealthMonitor.getInstance() might return null.
        */
-      if (chm != null) {
-        chm.numOfClientsPerVersion.incrementAndGet(proxy.getVersion().ordinal());
+      if (clientHealthMonitor != null) {
+        clientHealthMonitor.numOfClientsPerVersion.incrementAndGet(proxy.getVersion().ordinal());
       }
     }
     this.timedOutDurableClientProxies.remove(proxy.getProxyID());
   }
 
-  protected void addClientInitProxy(CacheClientProxy proxy) throws IOException {
-    this._initClientProxies.put(proxy.getProxyID(), proxy);
+  private void addClientInitProxy(CacheClientProxy proxy) {
+    this.initClientProxies.put(proxy.getProxyID(), proxy);
   }
 
-  protected void removeClientInitProxy(CacheClientProxy proxy) throws IOException {
-    this._initClientProxies.remove(proxy.getProxyID());
+  private void removeClientInitProxy(CacheClientProxy proxy) {
+    this.initClientProxies.remove(proxy.getProxyID());
   }
 
-  protected boolean isProxyInInitializationMode(CacheClientProxy proxy) throws IOException {
-    return this._initClientProxies.containsKey(proxy.getProxyID());
+  private boolean isProxyInInitializationMode(CacheClientProxy proxy) {
+    return this.initClientProxies.containsKey(proxy.getProxyID());
   }
 
   /**
@@ -1552,8 +1664,7 @@ public class CacheClientNotifier {
    */
   public Set getActiveClients() {
     Set clients = new HashSet();
-    for (Iterator iter = getClientProxies().iterator(); iter.hasNext();) {
-      CacheClientProxy proxy = (CacheClientProxy) iter.next();
+    for (CacheClientProxy proxy : getClientProxies()) {
       if (proxy.hasRegisteredInterested()) {
         ClientProxyMembershipID proxyID = proxy.getProxyID();
         clients.add(proxyID);
@@ -1569,8 +1680,8 @@ public class CacheClientNotifier {
    */
   public Map getAllClients() {
     Map clients = new HashMap();
-    for (Iterator iter = this._clientProxies.values().iterator(); iter.hasNext();) {
-      CacheClientProxy proxy = (CacheClientProxy) iter.next();
+    for (final Object o : this.clientProxies.values()) {
+      CacheClientProxy proxy = (CacheClientProxy) o;
       ClientProxyMembershipID proxyID = proxy.getProxyID();
       clients.put(proxyID, new CacheClientStatus(proxyID));
     }
@@ -1586,8 +1697,8 @@ public class CacheClientNotifier {
    * @since GemFire 5.6
    */
   public boolean hasDurableClient(String durableId) {
-    for (Iterator iter = this._clientProxies.values().iterator(); iter.hasNext();) {
-      CacheClientProxy proxy = (CacheClientProxy) iter.next();
+    for (Object clientProxyObject : this.clientProxies.values()) {
+      CacheClientProxy proxy = (CacheClientProxy) clientProxyObject;
       ClientProxyMembershipID proxyID = proxy.getProxyID();
       if (durableId.equals(proxyID.getDurableId())) {
         return true;
@@ -1605,15 +1716,11 @@ public class CacheClientNotifier {
    * @since GemFire 5.6
    */
   public boolean hasPrimaryForDurableClient(String durableId) {
-    for (Iterator iter = this._clientProxies.values().iterator(); iter.hasNext();) {
-      CacheClientProxy proxy = (CacheClientProxy) iter.next();
+    for (Object clientProxyObject : this.clientProxies.values()) {
+      CacheClientProxy proxy = (CacheClientProxy) clientProxyObject;
       ClientProxyMembershipID proxyID = proxy.getProxyID();
       if (durableId.equals(proxyID.getDurableId())) {
-        if (proxy.isPrimary()) {
-          return true;
-        } else {
-          return false;
-        }
+        return proxy.isPrimary();
       }
     }
     return false;
@@ -1626,9 +1733,9 @@ public class CacheClientNotifier {
    */
   public Map getClientQueueSizes() {
     Map/* <ClientProxyMembershipID,Integer> */ queueSizes = new HashMap();
-    for (Iterator iter = this._clientProxies.values().iterator(); iter.hasNext();) {
-      CacheClientProxy proxy = (CacheClientProxy) iter.next();
-      queueSizes.put(proxy.getProxyID(), Integer.valueOf(proxy.getQueueSize()));
+    for (Object clientProxyObject : this.clientProxies.values()) {
+      CacheClientProxy proxy = (CacheClientProxy) clientProxyObject;
+      queueSizes.put(proxy.getProxyID(), proxy.getQueueSize());
     }
     return queueSizes;
   }
@@ -1645,25 +1752,20 @@ public class CacheClientNotifier {
   public boolean closeClientCq(String durableClientId, String clientCQName) throws CqException {
     CacheClientProxy proxy = getClientProxy(durableClientId);
     // close and drain
-    if (proxy != null) {
-      return proxy.closeClientCq(clientCQName);
-    }
-    return false;
+    return proxy != null && proxy.closeClientCq(clientCQName);
   }
 
   /**
-   * Removes an existing <code>CacheClientProxy</code> from the list of known client proxies
+   * Removes an existing {@code CacheClientProxy} from the list of known client proxies
    *
-   * @param proxy The <code>CacheClientProxy</code> to remove
+   * @param proxy The {@code CacheClientProxy} to remove
    */
-  protected void removeClientProxy(CacheClientProxy proxy) {
-    // this._logger.info(LocalizedStrings.DEBUG, "removing client proxy " + proxy, new
-    // Exception("stack trace"));
+  void removeClientProxy(CacheClientProxy proxy) {
     ClientProxyMembershipID client = proxy.getProxyID();
-    this._clientProxies.remove(client);
-    this._connectionListener.queueRemoved();
-    this.getCache().cleanupForClient(this, client);
-    if (!(proxy.clientConflation == HandShake.CONFLATION_ON)) {
+    this.clientProxies.remove(client);
+    this.connectionListener.queueRemoved();
+    getCache().cleanupForClient(this, client);
+    if (proxy.clientConflation != HandShake.CONFLATION_ON) {
       ClientHealthMonitor chm = ClientHealthMonitor.getInstance();
       if (chm != null) {
         chm.numOfClientsPerVersion.decrementAndGet(proxy.getVersion().ordinal());
@@ -1675,18 +1777,18 @@ public class CacheClientNotifier {
     this.timedOutDurableClientProxies.add(client);
   }
 
-  public boolean isTimedOut(ClientProxyMembershipID client) {
+  private boolean isTimedOut(ClientProxyMembershipID client) {
     return this.timedOutDurableClientProxies.contains(client);
   }
 
   /**
-   * Returns an unmodifiable Collection of known <code>CacheClientProxy</code> instances. The
-   * collection is not static so its contents may change.
+   * Returns an unmodifiable Collection of known {@code CacheClientProxy} instances. The collection
+   * is not static so its contents may change.
    *
-   * @return the collection of known <code>CacheClientProxy</code> instances
+   * @return the collection of known {@code CacheClientProxy} instances
    */
   public Collection<CacheClientProxy> getClientProxies() {
-    return Collections.unmodifiableCollection(this._clientProxies.values());
+    return Collections.unmodifiableCollection(this.clientProxies.values());
   }
 
   private void closeAllClientCqs(CacheClientProxy proxy) {
@@ -1698,12 +1800,12 @@ public class CacheClientNotifier {
           logger.debug("CacheClientNotifier: Closing client CQs: {}", proxy);
         }
         cqService.closeClientCqs(proxy.getProxyID());
-      } catch (CqException e1) {
+      } catch (CqException e) {
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.CacheClientNotifier_UNABLE_TO_CLOSE_CQS_FOR_THE_CLIENT__0,
             proxy.getProxyID()));
         if (isDebugEnabled) {
-          e1.printStackTrace();
+          logger.debug(e);
         }
       }
     }
@@ -1732,16 +1834,17 @@ public class CacheClientNotifier {
   }
 
   /**
-   * Close dead <code>CacheClientProxy</code> instances
+   * Close dead {@code CacheClientProxy} instances
    *
-   * @param deadProxies The list of <code>CacheClientProxy</code> instances to close
+   * @param deadProxies The list of {@code CacheClientProxy} instances to close
    */
   private void closeDeadProxies(List deadProxies, boolean stoppedNormally) {
     final boolean isDebugEnabled = logger.isDebugEnabled();
-    for (Iterator i = deadProxies.iterator(); i.hasNext();) {
-      CacheClientProxy proxy = (CacheClientProxy) i.next();
-      if (isDebugEnabled)
+    for (Object deadProxy : deadProxies) {
+      CacheClientProxy proxy = (CacheClientProxy) deadProxy;
+      if (isDebugEnabled) {
         logger.debug("CacheClientNotifier: Closing dead client: {}", proxy);
+      }
 
       // Close the proxy
       boolean keepProxy = false;
@@ -1757,8 +1860,7 @@ public class CacheClientNotifier {
       if (keepProxy) {
         logger.info(LocalizedMessage.create(
             LocalizedStrings.CacheClientNotifier_CACHECLIENTNOTIFIER_KEEPING_PROXY_FOR_DURABLE_CLIENT_NAMED_0_FOR_1_SECONDS_2,
-            new Object[] {proxy.getDurableId(), Integer.valueOf(proxy.getDurableTimeout()),
-                proxy}));
+            new Object[] {proxy.getDurableId(), proxy.getDurableTimeout(), proxy}));
       } else {
         closeAllClientCqs(proxy);
         if (isDebugEnabled) {
@@ -1771,10 +1873,10 @@ public class CacheClientNotifier {
   }
 
   /**
-   * Registers a new <code>InterestRegistrationListener</code> with the set of
-   * <code>InterestRegistrationListener</code>s.
+   * Registers a new {@code InterestRegistrationListener} with the set of
+   * {@code InterestRegistrationListener}s.
    * 
-   * @param listener The <code>InterestRegistrationListener</code> to register
+   * @param listener The {@code InterestRegistrationListener} to register
    * 
    * @since GemFire 5.8Beta
    */
@@ -1783,10 +1885,10 @@ public class CacheClientNotifier {
   }
 
   /**
-   * Unregisters an existing <code>InterestRegistrationListener</code> from the set of
-   * <code>InterestRegistrationListener</code>s.
+   * Unregisters an existing {@code InterestRegistrationListener} from the set of
+   * {@code InterestRegistrationListener}s.
    * 
-   * @param listener The <code>InterestRegistrationListener</code> to unregister
+   * @param listener The {@code InterestRegistrationListener} to unregister
    * 
    * @since GemFire 5.8Beta
    */
@@ -1795,11 +1897,11 @@ public class CacheClientNotifier {
   }
 
   /**
-   * Returns a read-only collection of <code>InterestRegistrationListener</code>s registered with
-   * this notifier.
+   * Returns a read-only collection of {@code InterestRegistrationListener}s registered with this
+   * notifier.
    * 
-   * @return a read-only collection of <code>InterestRegistrationListener</code>s registered with
-   *         this notifier
+   * @return a read-only collection of {@code InterestRegistrationListener}s registered with this
+   *         notifier
    * 
    * @since GemFire 5.8Beta
    */
@@ -1811,17 +1913,17 @@ public class CacheClientNotifier {
    * 
    * @since GemFire 5.8Beta
    */
-  protected boolean containsInterestRegistrationListeners() {
+  boolean containsInterestRegistrationListeners() {
     return !this.writableInterestRegistrationListeners.isEmpty();
   }
 
   /**
-   * 
    * @since GemFire 5.8Beta
    */
-  protected void notifyInterestRegistrationListeners(InterestRegistrationEvent event) {
-    for (Iterator i = this.writableInterestRegistrationListeners.iterator(); i.hasNext();) {
-      InterestRegistrationListener listener = (InterestRegistrationListener) i.next();
+  void notifyInterestRegistrationListeners(InterestRegistrationEvent event) {
+    for (Object writableInterestRegistrationListener : this.writableInterestRegistrationListeners) {
+      InterestRegistrationListener listener =
+          (InterestRegistrationListener) writableInterestRegistrationListener;
       if (event.isRegister()) {
         listener.afterRegisterInterest(event);
       } else {
@@ -1836,207 +1938,70 @@ public class CacheClientNotifier {
    * @return the statistics for the notifier
    */
   public CacheClientNotifierStats getStats() {
-    return this._statistics;
+    return this.statistics;
   }
 
   /**
-   * Returns this <code>CacheClientNotifier</code>'s <code>InternalCache</code>.
+   * Returns this {@code CacheClientNotifier}'s {@code InternalCache}.
    * 
-   * @return this <code>CacheClientNotifier</code>'s <code>InternalCache</code>
+   * @return this {@code CacheClientNotifier}'s {@code InternalCache}
    */
   protected InternalCache getCache() { // TODO:SYNC: looks wrong
-    if (this._cache != null && this._cache.isClosed()) {
+    if (this.cache != null && this.cache.isClosed()) {
       InternalCache cache = GemFireCacheImpl.getInstance();
       if (cache != null) {
-        this._cache = cache;
+        this.cache = cache;
         this.logWriter = cache.getInternalLogWriter();
         this.securityLogWriter = cache.getSecurityInternalLogWriter();
       }
     }
-    return this._cache;
+    return this.cache;
   }
 
   /**
-   * Returns this <code>CacheClientNotifier</code>'s maximum message count.
+   * Returns this {@code CacheClientNotifier}'s maximum message count.
    * 
-   * @return this <code>CacheClientNotifier</code>'s maximum message count
+   * @return this {@code CacheClientNotifier}'s maximum message count
    */
   protected int getMaximumMessageCount() {
     return this.maximumMessageCount;
   }
 
   /**
-   * Returns this <code>CacheClientNotifier</code>'s message time-to-live.
+   * Returns this {@code CacheClientNotifier}'s message time-to-live.
    * 
-   * @return this <code>CacheClientNotifier</code>'s message time-to-live
+   * @return this {@code CacheClientNotifier}'s message time-to-live
    */
   protected int getMessageTimeToLive() {
     return this.messageTimeToLive;
   }
 
-  protected void handleInterestEvent(InterestRegistrationEvent event) {
+  void handleInterestEvent(InterestRegistrationEvent event) {
     LocalRegion region = (LocalRegion) event.getRegion();
     region.handleInterestEvent(event);
   }
 
-  /**
-   * @param cache The GemFire <code>InternalCache</code>
-   * @param listener a listener which should receive notifications abouts queues being added or
-   *        removed.
-   */
-  private CacheClientNotifier(InternalCache cache, CacheServerStats acceptorStats,
-      int maximumMessageCount, int messageTimeToLive, ConnectionListener listener,
-      List overflowAttributesList, boolean isGatewayReceiver) {
-    // Set the Cache
-    setCache(cache);
-    this.acceptorStats = acceptorStats;
-    this.socketCloser = new SocketCloser(1, 50); // we only need one thread per client and wait 50ms
-                                                 // for close
-
-    // Set the LogWriter
-    this.logWriter = (InternalLogWriter) cache.getLogger();
-
-    this._connectionListener = listener;
-
-    // Set the security LogWriter
-    this.securityLogWriter = (InternalLogWriter) cache.getSecurityLogger();
-
-    this.maximumMessageCount = maximumMessageCount;
-    this.messageTimeToLive = messageTimeToLive;
-
-    // Initialize the statistics
-    StatisticsFactory factory;
-    if (isGatewayReceiver) {
-      factory = new DummyStatisticsFactory();
-    } else {
-      factory = this.getCache().getDistributedSystem();
-    }
-    this._statistics = new CacheClientNotifierStats(factory);
-
-    try {
-      this.logFrequency = Long.valueOf(System.getProperty(MAX_QUEUE_LOG_FREQUENCY));
-      if (this.logFrequency <= 0) {
-        this.logFrequency = DEFAULT_LOG_FREQUENCY;
-      }
-    } catch (Exception e) {
-      this.logFrequency = DEFAULT_LOG_FREQUENCY;
-    }
-
-    eventEnqueueWaitTime =
-        Integer.getInteger(EVENT_ENQUEUE_WAIT_TIME_NAME, DEFAULT_EVENT_ENQUEUE_WAIT_TIME);
-    if (eventEnqueueWaitTime < 0) {
-      eventEnqueueWaitTime = DEFAULT_EVENT_ENQUEUE_WAIT_TIME;
-    }
-
-    // Schedule task to periodically ping clients.
-    scheduleClientPingTask();
-  }
-
-  /**
-   * this message is used to send interest registration to another server. Since interest
-   * registration performs a state-flush operation this message must not transmitted on an ordered
-   * socket
-   */
-  public static class ServerInterestRegistrationMessage extends HighPriorityDistributionMessage
-      implements MessageWithReply {
-    ClientProxyMembershipID clientId;
-    ClientInterestMessageImpl clientMessage;
-    int processorId;
-
-    ServerInterestRegistrationMessage(ClientProxyMembershipID clientID,
-        ClientInterestMessageImpl msg) {
-      this.clientId = clientID;
-      this.clientMessage = msg;
-    }
-
-    public ServerInterestRegistrationMessage() {}
-
-    static void sendInterestChange(DM dm, ClientProxyMembershipID clientID,
-        ClientInterestMessageImpl msg) {
-      ServerInterestRegistrationMessage smsg = new ServerInterestRegistrationMessage(clientID, msg);
-      Set recipients = dm.getOtherDistributionManagerIds();
-      smsg.setRecipients(recipients);
-      ReplyProcessor21 rp = new ReplyProcessor21(dm, recipients);
-      smsg.processorId = rp.getProcessorId();
-      dm.putOutgoing(smsg);
-      try {
-        rp.waitForReplies();
-      } catch (InterruptedException ie) {
-        Thread.currentThread().interrupt();
-      }
-    }
-
-    @Override
-    protected void process(DistributionManager dm) {
-      // Get the proxy for the proxy id
-      try {
-        CacheClientNotifier ccn = CacheClientNotifier.getInstance();
-        if (ccn != null) {
-          CacheClientProxy proxy = ccn.getClientProxy(clientId);
-          // If this VM contains a proxy for the requested proxy id, forward the
-          // message on to the proxy for processing
-          if (proxy != null) {
-            proxy.processInterestMessage(this.clientMessage);
-          }
-        }
-      } finally {
-        ReplyMessage reply = new ReplyMessage();
-        reply.setProcessorId(this.processorId);
-        reply.setRecipient(getSender());
-        try {
-          dm.putOutgoing(reply);
-        } catch (CancelException e) {
-          // can't send a reply, so ignore the exception
-        }
-      }
-    }
-
-    public int getDSFID() {
-      return SERVER_INTEREST_REGISTRATION_MESSAGE;
-    }
-
-    @Override
-    public void toData(DataOutput out) throws IOException {
-      super.toData(out);
-      out.writeInt(this.processorId);
-      InternalDataSerializer.invokeToData(this.clientId, out);
-      InternalDataSerializer.invokeToData(this.clientMessage, out);
-    }
-
-    @Override
-    public void fromData(DataInput in) throws IOException, ClassNotFoundException {
-      super.fromData(in);
-      this.processorId = in.readInt();
-      this.clientId = new ClientProxyMembershipID();
-      InternalDataSerializer.invokeFromData(this.clientId, in);
-      this.clientMessage = new ClientInterestMessageImpl();
-      InternalDataSerializer.invokeFromData(this.clientMessage, in);
-    }
-  }
-
-  protected void deliverInterestChange(ClientProxyMembershipID proxyID,
-      ClientInterestMessageImpl message) {
-    DM dm = ((InternalDistributedSystem) this.getCache().getDistributedSystem())
-        .getDistributionManager();
+  void deliverInterestChange(ClientProxyMembershipID proxyID, ClientInterestMessageImpl message) {
+    DM dm = getCache().getInternalDistributedSystem().getDistributionManager();
     ServerInterestRegistrationMessage.sendInterestChange(dm, proxyID, message);
   }
 
-  public CacheServerStats getAcceptorStats() {
+  CacheServerStats getAcceptorStats() {
     return this.acceptorStats;
   }
 
-  public SocketCloser getSocketCloser() {
+  SocketCloser getSocketCloser() {
     return this.socketCloser;
   }
 
   public void addCompiledQuery(DefaultQuery query) {
     if (this.compiledQueries.putIfAbsent(query.getQueryString(), query) == null) {
       // Added successfully.
-      this._statistics.incCompiledQueryCount(1);
+      this.statistics.incCompiledQueryCount(1);
       if (logger.isDebugEnabled()) {
         logger.debug(
             "Added compiled query into ccn.compliedQueries list. Query: {}. Total compiled queries: {}",
-            query.getQueryString(), this._statistics.getCompiledQueryCount());
+            query.getQueryString(), this.statistics.getCompiledQueryCount());
       }
       // Start the clearIdleCompiledQueries thread.
       startCompiledQueryCleanupThread();
@@ -2048,13 +2013,13 @@ public class CacheClientNotifier {
   }
 
   private void clearCompiledQueries() {
-    if (this.compiledQueries.size() > 0) {
-      this._statistics.incCompiledQueryCount(-(this.compiledQueries.size()));
+    if (!this.compiledQueries.isEmpty()) {
+      this.statistics.incCompiledQueryCount(-this.compiledQueries.size());
       this.compiledQueries.clear();
       if (logger.isDebugEnabled()) {
         logger.debug(
             "Removed all compiled queries from ccn.compliedQueries list. Total compiled queries: {}",
-            this._statistics.getCompiledQueryCount());
+            this.statistics.getCompiledQueryCount());
       }
     }
   }
@@ -2064,7 +2029,7 @@ public class CacheClientNotifier {
    * checks for the compiled queries that are not used and removes them.
    */
   private void startCompiledQueryCleanupThread() {
-    if (isCompiledQueryCleanupThreadStarted) {
+    if (this.isCompiledQueryCleanupThreadStarted) {
       return;
     }
 
@@ -2082,11 +2047,11 @@ public class CacheClientNotifier {
           } else {
             if (compiledQueries.remove(e.getKey()) != null) {
               // If successfully removed decrement the counter.
-              _statistics.incCompiledQueryCount(-1);
+              statistics.incCompiledQueryCount(-1);
               if (isDebugEnabled) {
                 logger.debug("Removed compiled query from ccn.compliedQueries list. Query: "
                     + q.getQueryString() + ". Total compiled queries are : "
-                    + _statistics.getCompiledQueryCount());
+                    + statistics.getCompiledQueryCount());
               }
             }
           }
@@ -2094,23 +2059,23 @@ public class CacheClientNotifier {
       }
     };
 
-    synchronized (lockIsCompiledQueryCleanupThreadStarted) {
-      if (!isCompiledQueryCleanupThreadStarted) {
+    synchronized (this.lockIsCompiledQueryCleanupThreadStarted) {
+      if (!this.isCompiledQueryCleanupThreadStarted) {
         long period = DefaultQuery.TEST_COMPILED_QUERY_CLEAR_TIME > 0
             ? DefaultQuery.TEST_COMPILED_QUERY_CLEAR_TIME : DefaultQuery.COMPILED_QUERY_CLEAR_TIME;
-        _cache.getCCPTimer().scheduleAtFixedRate(task, period, period);
+        this.cache.getCCPTimer().scheduleAtFixedRate(task, period, period);
       }
-      isCompiledQueryCleanupThreadStarted = true;
+      this.isCompiledQueryCleanupThreadStarted = true;
     }
   }
 
-  protected void scheduleClientPingTask() {
+  void scheduleClientPingTask() {
     this.clientPingTask = new SystemTimer.SystemTimerTask() {
 
       @Override
       public void run2() {
         // If there are no proxies, return
-        if (CacheClientNotifier.this._clientProxies.isEmpty()) {
+        if (clientProxies.isEmpty()) {
           return;
         }
 
@@ -2145,144 +2110,10 @@ public class CacheClientNotifier {
     if (logger.isDebugEnabled()) {
       logger.debug("Scheduling client ping task with period={} ms", CLIENT_PING_TASK_PERIOD);
     }
-    CacheClientNotifier.this._cache.getCCPTimer().scheduleAtFixedRate(this.clientPingTask,
+    CacheClientNotifier.this.cache.getCCPTimer().scheduleAtFixedRate(this.clientPingTask,
         CLIENT_PING_TASK_PERIOD, CLIENT_PING_TASK_PERIOD);
   }
 
-  /**
-   * A string representing all hosts used for delivery purposes.
-   */
-  protected static final String ALL_HOSTS = "ALL_HOSTS";
-
-  /**
-   * An int representing all ports used for delivery purposes.
-   */
-  protected static final int ALL_PORTS = -1;
-
-  /**
-   * The map of known <code>CacheClientProxy</code> instances. Maps ClientProxyMembershipID to
-   * CacheClientProxy. Note that the keys in this map are not updated when a durable client
-   * reconnects. To make sure you get the updated ClientProxyMembershipID use this map to lookup the
-   * CacheClientProxy and then call getProxyID on it.
-   */
-  private final ConcurrentMap/* <ClientProxyMembershipID, CacheClientProxy> */ _clientProxies =
-      new ConcurrentHashMap();
-
-  /**
-   * The map of <code>CacheClientProxy</code> instances which are getting initialized. Maps
-   * ClientProxyMembershipID to CacheClientProxy.
-   */
-  private final ConcurrentMap/* <ClientProxyMembershipID, CacheClientProxy> */ _initClientProxies =
-      new ConcurrentHashMap();
-
-  private final HashSet<ClientProxyMembershipID> timedOutDurableClientProxies =
-      new HashSet<ClientProxyMembershipID>();
-
-  /**
-   * The GemFire <code>InternalCache</code>. Note that since this is a singleton class you should
-   * not use a direct reference to _cache in CacheClientNotifier code. Instead, you should always
-   * use <code>getCache()</code>
-   */
-  private InternalCache _cache;
-
-  private InternalLogWriter logWriter;
-
-  /**
-   * The GemFire security <code>LogWriter</code>
-   */
-  private InternalLogWriter securityLogWriter;
-
-  /** the maximum number of messages that can be enqueued in a client-queue. */
-  private int maximumMessageCount;
-
-  /**
-   * the time (in seconds) after which a message in the client queue will expire.
-   */
-  private int messageTimeToLive;
-
-  /**
-   * A listener which receives notifications about queues that are added or removed
-   */
-  private ConnectionListener _connectionListener;
-
-  private CacheServerStats acceptorStats;
-
-  /**
-   * haContainer can hold either the name of the client-messages-region (in case of eviction
-   * policies "mem" or "entry") or an instance of HashMap (in case of eviction policy "none"). In
-   * both the cases, it'll store HAEventWrapper as its key and ClientUpdateMessage as its value.
-   */
-  private volatile HAContainerWrapper haContainer;
-
-  /**
-   * The size of the server-to-client communication socket buffers. This can be modified using the
-   * BridgeServer.SOCKET_BUFFER_SIZE system property.
-   */
-  static final private int socketBufferSize =
-      Integer.getInteger("BridgeServer.SOCKET_BUFFER_SIZE", 32768).intValue();
-
-  /**
-   * The statistics for this notifier
-   */
-  protected final CacheClientNotifierStats _statistics;
-
-  /**
-   * The <code>InterestRegistrationListener</code> instances registered in this VM. This is used
-   * when modifying the set of listeners.
-   */
-  private final Set writableInterestRegistrationListeners = new CopyOnWriteArraySet();
-
-  /**
-   * The <code>InterestRegistrationListener</code> instances registered in this VM. This is used to
-   * provide a read-only <code>Set</code> of listeners.
-   */
-  private final Set readableInterestRegistrationListeners =
-      Collections.unmodifiableSet(writableInterestRegistrationListeners);
-
-  /**
-   * System property name for indicating how much frequently the "Queue full" message should be
-   * logged.
-   */
-  public static final String MAX_QUEUE_LOG_FREQUENCY =
-      DistributionConfig.GEMFIRE_PREFIX + "logFrequency.clientQueueReachedMaxLimit";
-
-  public static final long DEFAULT_LOG_FREQUENCY = 1000;
-
-  public static final String EVENT_ENQUEUE_WAIT_TIME_NAME =
-      DistributionConfig.GEMFIRE_PREFIX + "subscription.EVENT_ENQUEUE_WAIT_TIME";
-
-  public static final int DEFAULT_EVENT_ENQUEUE_WAIT_TIME = 100;
-
-  /**
-   * System property value denoting the time in milliseconds. Any thread putting an event into a
-   * subscription queue, which is full, will wait this much time for the queue to make space. It'll
-   * then enque the event possibly causing the queue to grow beyond its capacity/max-size. See
-   * #51400.
-   */
-  public static int eventEnqueueWaitTime;
-
-  /**
-   * The frequency of logging the "Queue full" message.
-   */
-  private long logFrequency = DEFAULT_LOG_FREQUENCY;
-
-  private final ConcurrentHashMap<String, DefaultQuery> compiledQueries =
-      new ConcurrentHashMap<String, DefaultQuery>();
-
-  private volatile boolean isCompiledQueryCleanupThreadStarted = false;
-
-  private final Object lockIsCompiledQueryCleanupThreadStarted = new Object();
-
-  private SystemTimer.SystemTimerTask clientPingTask;
-
-  private final SocketCloser socketCloser;
-
-  private static final long CLIENT_PING_TASK_PERIOD =
-      Long.getLong(DistributionConfig.GEMFIRE_PREFIX + "serverToClientPingPeriod", 60000);
-
-  private static final long CLIENT_PING_TASK_COUNTER =
-      Long.getLong(DistributionConfig.GEMFIRE_PREFIX + "serverToClientPingCounter", 3);
-
   public long getLogFrequency() {
     return this.logFrequency;
   }
@@ -2291,64 +2122,153 @@ public class CacheClientNotifier {
    * @return the haContainer
    */
   public Map getHaContainer() {
-    return haContainer;
+    return this.haContainer;
   }
 
-  public void initHaContainer(List overflowAttributesList) {
+  private void initHaContainer(List overflowAttributesList) {
     // lazily initialize haContainer in case this CCN instance was created by a gateway receiver
     if (overflowAttributesList != null
         && !HARegionQueue.HA_EVICTION_POLICY_NONE.equals(overflowAttributesList.get(0))) {
-      haContainer = new HAContainerRegion(_cache.getRegion(Region.SEPARATOR
-          + CacheServerImpl.clientMessagesRegion(_cache, (String) overflowAttributesList.get(0),
-              ((Integer) overflowAttributesList.get(1)).intValue(),
-              ((Integer) overflowAttributesList.get(2)).intValue(),
+      this.haContainer = new HAContainerRegion(this.cache.getRegion(Region.SEPARATOR
+          + CacheServerImpl.clientMessagesRegion(this.cache, (String) overflowAttributesList.get(0),
+              (Integer) overflowAttributesList.get(1), (Integer) overflowAttributesList.get(2),
               (String) overflowAttributesList.get(3), (Boolean) overflowAttributesList.get(4))));
     } else {
-      haContainer = new HAContainerMap(new ConcurrentHashMap());
+      this.haContainer = new HAContainerMap(new ConcurrentHashMap());
     }
-    assert haContainer != null;
+    assert this.haContainer != null;
 
     if (logger.isDebugEnabled()) {
-      logger.debug("ha container ({}) has been created.", haContainer.getName());
+      logger.debug("ha container ({}) has been created.", this.haContainer.getName());
     }
   }
 
-  private final Set blackListedClients = new CopyOnWriteArraySet();
-
-  public void addToBlacklistedClient(ClientProxyMembershipID proxyID) {
-    blackListedClients.add(proxyID);
+  void addToBlacklistedClient(ClientProxyMembershipID proxyID) {
+    this.blackListedClients.add(proxyID);
     // ensure that cache and distributed system state are current and open
-    this.getCache();
+    getCache();
     new ScheduledThreadPoolExecutor(1).schedule(new ExpireBlackListTask(proxyID), 120,
         TimeUnit.SECONDS);
   }
 
-  public Set getBlacklistedClient() {
-    return blackListedClients;
+  Set getBlacklistedClient() {
+    return this.blackListedClients;
   }
 
   /**
-   * @param _cache the _cache to set
+   * @param cache the cache to set
    */
-  private void setCache(InternalCache _cache) {
-    this._cache = _cache;
+  private void setCache(InternalCache cache) {
+    this.cache = cache;
   }
 
+  /**
+   * Non-static inner class ExpireBlackListTask
+   */
   private class ExpireBlackListTask extends PoolTask {
-    private ClientProxyMembershipID proxyID;
+    private final ClientProxyMembershipID proxyID;
 
-    public ExpireBlackListTask(ClientProxyMembershipID proxyID) {
+    ExpireBlackListTask(ClientProxyMembershipID proxyID) {
       this.proxyID = proxyID;
     }
 
     @Override
     public void run2() {
-      if (blackListedClients.remove(proxyID)) {
+      if (blackListedClients.remove(this.proxyID)) {
         if (logger.isDebugEnabled()) {
-          logger.debug("{} client is no longer blacklisted", proxyID);
+          logger.debug("{} client is no longer blacklisted", this.proxyID);
         }
       }
     }
   }
+
+  /**
+   * Static inner-class ServerInterestRegistrationMessage
+   * <p>
+   * this message is used to send interest registration to another server. Since interest
+   * registration performs a state-flush operation this message must not transmitted on an ordered
+   * socket
+   */
+  public static class ServerInterestRegistrationMessage extends HighPriorityDistributionMessage
+      implements MessageWithReply {
+
+    ClientProxyMembershipID clientId;
+    ClientInterestMessageImpl clientMessage;
+    int processorId;
+
+    ServerInterestRegistrationMessage(ClientProxyMembershipID clientID,
+        ClientInterestMessageImpl msg) {
+      this.clientId = clientID;
+      this.clientMessage = msg;
+    }
+
+    public ServerInterestRegistrationMessage() {
+      // nothing
+    }
+
+    static void sendInterestChange(DM dm, ClientProxyMembershipID clientID,
+        ClientInterestMessageImpl msg) {
+      ServerInterestRegistrationMessage registrationMessage =
+          new ServerInterestRegistrationMessage(clientID, msg);
+      Set recipients = dm.getOtherDistributionManagerIds();
+      registrationMessage.setRecipients(recipients);
+      ReplyProcessor21 rp = new ReplyProcessor21(dm, recipients);
+      registrationMessage.processorId = rp.getProcessorId();
+      dm.putOutgoing(registrationMessage);
+      try {
+        rp.waitForReplies();
+      } catch (InterruptedException ignore) {
+        Thread.currentThread().interrupt();
+      }
+    }
+
+    @Override
+    protected void process(DistributionManager dm) {
+      // Get the proxy for the proxy id
+      try {
+        CacheClientNotifier clientNotifier = CacheClientNotifier.getInstance();
+        if (clientNotifier != null) {
+          CacheClientProxy proxy = clientNotifier.getClientProxy(this.clientId);
+          // If this VM contains a proxy for the requested proxy id, forward the
+          // message on to the proxy f

<TRUNCATED>

[32/43] geode git commit: Cleanup HARegionQueueJUnitTest and BlockingHARegionQueueJUnitTest

Posted by kl...@apache.org.
Cleanup HARegionQueueJUnitTest and BlockingHARegionQueueJUnitTest


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/659b9d4e
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/659b9d4e
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/659b9d4e

Branch: refs/heads/feature/GEODE-2632-17
Commit: 659b9d4eb94468959bb2de0e926940ab87b8e481
Parents: 07efaa8
Author: Kirk Lund <kl...@apache.org>
Authored: Mon May 22 17:23:46 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Tue May 30 10:21:10 2017 -0700

----------------------------------------------------------------------
 .../ha/BlockingHARegionQueueJUnitTest.java      |  169 +-
 .../cache/ha/HARegionQueueJUnitTest.java        | 2307 +++++++++---------
 2 files changed, 1167 insertions(+), 1309 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/659b9d4e/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionQueueJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionQueueJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionQueueJUnitTest.java
index 39aa1e6..b529f0c 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionQueueJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionQueueJUnitTest.java
@@ -14,166 +14,141 @@
  */
 package org.apache.geode.internal.cache.ha;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.hamcrest.CoreMatchers.*;
+import static org.junit.Assert.*;
 
-import java.io.IOException;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.awaitility.Awaitility;
-
-import org.apache.geode.test.junit.categories.ClientSubscriptionTest;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import org.apache.geode.cache.CacheException;
 import org.apache.geode.internal.cache.Conflatable;
 import org.apache.geode.internal.cache.EventID;
+import org.apache.geode.test.junit.categories.ClientSubscriptionTest;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 
 /**
  * Test runs all tests of HARegionQueueJUnitTest using BlockingHARegionQueue instead of
  * HARegionQueue.
- * 
- * 
  */
 @Category({IntegrationTest.class, ClientSubscriptionTest.class})
 public class BlockingHARegionQueueJUnitTest extends HARegionQueueJUnitTest {
 
-  /**
-   * Creates Blocking HA region-queue object
-   * 
-   * @return Blocking HA region-queue object
-   * @throws IOException
-   * @throws ClassNotFoundException
-   * @throws CacheException
-   * @throws InterruptedException
-   */
-  protected HARegionQueue createHARegionQueue(String name)
-      throws IOException, ClassNotFoundException, CacheException, InterruptedException {
-    HARegionQueue regionqueue =
-        HARegionQueue.getHARegionQueueInstance(name, cache, HARegionQueue.BLOCKING_HA_QUEUE, false);
-    return regionqueue;
-  }
-
-  /**
-   * Creates Blocking HA region-queue object
-   * 
-   * @return Blocking HA region-queue object
-   * @throws IOException
-   * @throws ClassNotFoundException
-   * @throws CacheException
-   * @throws InterruptedException
-   */
-  protected HARegionQueue createHARegionQueue(String name, HARegionQueueAttributes attrs)
-      throws IOException, ClassNotFoundException, CacheException, InterruptedException {
-    HARegionQueue regionqueue = HARegionQueue.getHARegionQueueInstance(name, cache, attrs,
-        HARegionQueue.BLOCKING_HA_QUEUE, false);
-    return regionqueue;
+  @Override
+  protected int queueType() {
+    return HARegionQueue.BLOCKING_HA_QUEUE;
   }
 
   /**
    * Tests the effect of a put which is blocked because of capacity constraint & subsequent passage
    * because of take operation
-   * 
    */
   @Test
-  public void testBlockingPutAndTake()
-      throws InterruptedException, IOException, ClassNotFoundException {
+  public void testBlockingPutAndTake() throws Exception {
     HARegionQueueAttributes hrqa = new HARegionQueueAttributes();
     hrqa.setBlockingQueueCapacity(1);
-    final HARegionQueue hrq = this.createHARegionQueue("testBlockingPutAndTake", hrqa);
-    hrq.setPrimary(true);// fix for 40314 - capacity constraint is checked for primary only.
+
+    HARegionQueue hrq = createHARegionQueue(this.testName.getMethodName(), hrqa);
+    hrq.setPrimary(true); // fix for 40314 - capacity constraint is checked for primary only.
+
     EventID id1 = new EventID(new byte[] {1}, 1, 1);
     hrq.put(new ConflatableObject("key1", "val1", id1, false, "testing"));
-    Thread t1 = new Thread(new Runnable() {
-      public void run() {
-        try {
-          EventID id2 = new EventID(new byte[] {1}, 1, 2);
-          hrq.put(new ConflatableObject("key1", "val2", id2, false, "testing"));
-        } catch (Exception e) {
-          encounteredException = true;
-        }
+
+    AtomicBoolean threadStarted = new AtomicBoolean(false);
+
+    Thread thread = new Thread(() -> {
+      try {
+        threadStarted.set(true);
+        EventID id2 = new EventID(new byte[] {1}, 1, 2);
+        hrq.put(new ConflatableObject("key1", "val2", id2, false, "testing"));
+      } catch (InterruptedException e) {
+        errorCollector.addError(e);
       }
     });
-    t1.start();
-    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> t1.isAlive());
+    thread.start();
+
+    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> threadStarted.get());
+
     Conflatable conf = (Conflatable) hrq.take();
-    assertNotNull(conf);
-    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> !t1.isAlive());
+    assertThat(conf, notNullValue());
+
+    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> !thread.isAlive());
   }
 
   /**
    * Test Scenario : BlockingQueue capacity is 1. The first put should be successful. The second put
    * should block till a peek/remove happens.
-   * 
    */
   @Test
-  public void testBlockingPutAndPeekRemove()
-      throws InterruptedException, IOException, ClassNotFoundException {
+  public void testBlockingPutAndPeekRemove() throws Exception {
     HARegionQueueAttributes hrqa = new HARegionQueueAttributes();
     hrqa.setBlockingQueueCapacity(1);
-    final HARegionQueue hrq = this.createHARegionQueue("testBlockingPutAndPeekRemove", hrqa);
+
+    HARegionQueue hrq = createHARegionQueue(this.testName.getMethodName(), hrqa);
     hrq.setPrimary(true);// fix for 40314 - capacity constraint is checked for primary only.
+
     EventID id1 = new EventID(new byte[] {1}, 1, 1);
     hrq.put(new ConflatableObject("key1", "val1", id1, false, "testing"));
-    Thread t1 = new Thread(new Runnable() {
-      public void run() {
-        try {
-          EventID id2 = new EventID(new byte[] {1}, 1, 2);
-          hrq.put(new ConflatableObject("key1", "val2", id2, false, "testing"));
-        } catch (Exception e) {
-          encounteredException = true;
-        }
+
+    AtomicBoolean threadStarted = new AtomicBoolean(false);
+
+    Thread thread = new Thread(() -> {
+      try {
+        threadStarted.set(true);
+        EventID id2 = new EventID(new byte[] {1}, 1, 2);
+        hrq.put(new ConflatableObject("key1", "val2", id2, false, "testing"));
+      } catch (Exception e) {
+        errorCollector.addError(e);
       }
     });
-    t1.start();
-    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> t1.isAlive());
+    thread.start();
+
+    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> threadStarted.get());
+
     Conflatable conf = (Conflatable) hrq.peek();
-    assertNotNull(conf);
+    assertThat(conf, notNullValue());
+
     hrq.remove();
-    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> !t1.isAlive());
-    assertFalse("Exception occurred in put-thread", encounteredException);
 
+    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> !thread.isAlive());
   }
 
   /**
    * Test Scenario :Blocking Queue capacity is 1. The first put should be successful.The second put
    * should block till the first put expires.
-   * 
+   * <p>
+   * fix for 40314 - capacity constraint is checked for primary only and expiry is not applicable on
+   * primary so marking this test as invalid.
    */
-  // fix for 40314 - capacity constraint is checked for primary only and
-  // expiry is not applicable on primary so marking this test as invalid.
-  @Ignore
   @Test
-  public void testBlockingPutAndExpiry()
-      throws InterruptedException, IOException, ClassNotFoundException {
+  public void testBlockingPutAndExpiry() throws Exception {
     HARegionQueueAttributes hrqa = new HARegionQueueAttributes();
     hrqa.setBlockingQueueCapacity(1);
     hrqa.setExpiryTime(1);
-    final HARegionQueue hrq = this.createHARegionQueue("testBlockingPutAndExpiry", hrqa);
+
+    HARegionQueue hrq = this.createHARegionQueue(this.testName.getMethodName(), hrqa);
 
     EventID id1 = new EventID(new byte[] {1}, 1, 1);
-    long start = System.currentTimeMillis();
+
     hrq.put(new ConflatableObject("key1", "val1", id1, false, "testing"));
-    Thread t1 = new Thread(new Runnable() {
-      public void run() {
-        try {
-          EventID id2 = new EventID(new byte[] {1}, 1, 2);
-          hrq.put(new ConflatableObject("key1", "val2", id2, false, "testing"));
-        } catch (Exception e) {
-          encounteredException = true;
-        }
+
+    AtomicBoolean threadStarted = new AtomicBoolean(false);
+
+    Thread thread = new Thread(() -> {
+      try {
+        threadStarted.set(true);
+        EventID id2 = new EventID(new byte[] {1}, 1, 2);
+        hrq.put(new ConflatableObject("key1", "val2", id2, false, "testing"));
+      } catch (Exception e) {
+        errorCollector.addError(e);
       }
     });
-    t1.start();
-    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> t1.isAlive());
-    waitAtLeast(1000, start, () -> {
-      assertFalse("Put-thread blocked unexpectedly", t1.isAlive());
-    });
-    assertFalse("Exception occurred in put-thread", encounteredException);
+    thread.start();
+
+    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> threadStarted.get());
+
+    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> !thread.isAlive());
   }
 }


[08/43] geode git commit: GEODE-2977: make group/name option values consistent

Posted by kl...@apache.org.
GEODE-2977: make group/name option values consistent

* this closes #536


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/c1ab3ffe
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/c1ab3ffe
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/c1ab3ffe

Branch: refs/heads/feature/GEODE-2632-17
Commit: c1ab3ffecb0c0a435fab64e94a97c2efc28ab085
Parents: 7b34cfd
Author: YehEmily <em...@gmail.com>
Authored: Tue May 23 12:27:40 2017 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Thu May 25 15:02:14 2017 -0700

----------------------------------------------------------------------
 .../apache/geode/cache/AttributesFactory.java   |  17 +--
 .../geode/internal/cache/BucketRegion.java      |  22 ---
 .../apache/geode/internal/lang/StringUtils.java |   2 -
 .../geode/management/internal/cli/CliUtil.java  |  21 ---
 .../internal/cli/commands/ConfigCommands.java   |  12 +-
 .../CreateAlterDestroyRegionCommands.java       |   9 +-
 .../internal/cli/commands/DeployCommands.java   |  20 ++-
 .../cli/commands/DiskStoreCommands.java         |  18 ++-
 .../cli/commands/DurableClientCommands.java     |  56 ++++----
 .../internal/cli/commands/FunctionCommands.java |  13 +-
 .../internal/cli/commands/IndexCommands.java    |  56 ++++----
 .../cli/commands/MiscellaneousCommands.java     |  28 ++--
 .../internal/cli/commands/QueueCommands.java    |  18 ++-
 .../internal/cli/commands/RegionCommands.java   |  13 +-
 .../internal/cli/commands/WanCommands.java      | 143 ++++++++++++-------
 .../internal/cli/CliUtilDUnitTest.java          |  81 +++++------
 16 files changed, 240 insertions(+), 289 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/cache/AttributesFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/cache/AttributesFactory.java b/geode-core/src/main/java/org/apache/geode/cache/AttributesFactory.java
index 69f1087..0d59279 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/AttributesFactory.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/AttributesFactory.java
@@ -1496,27 +1496,12 @@ public class AttributesFactory<K, V> {
             LocalizedStrings.AttributesFactory_TOTAL_SIZE_OF_PARTITION_REGION_MUST_BE_0
                 .toLocalizedString());
       }
-      // listeners are supported here as of v5.1
-      // if (attrs.getCacheListeners().length > 0) {
-      // throw new IllegalStateException(
-      // "Can not add cache listeners to RegionAttributes when PartitionAttributes are set.");
-      // }
-      // loaders are supported here as of v5.1
-      // if (attrs.getCacheLoader() != null) {
-      // throw new IllegalStateException(
-      // "Can not set CacheLoader in RegionAttributes when PartitionAttributes are set.");
-      // }
+
       if (!PartitionedRegionHelper.ALLOWED_DATA_POLICIES.contains(attrs.getDataPolicy())) {
         throw new IllegalStateException(
             LocalizedStrings.AttributesFactory_DATA_POLICIES_OTHER_THAN_0_ARE_NOT_ALLOWED_IN_PARTITIONED_REGIONS
                 .toLocalizedString(PartitionedRegionHelper.ALLOWED_DATA_POLICIES));
       }
-      // if ( attrs.getDataPolicy().isEmpty() && pa.getLocalMaxMemory() != 0) {
-      // throw new IllegalStateException(
-      // "A non-zero PartitionAttributes localMaxMemory setting is not compatible" +
-      // " with an empty DataPolicy. Please use DataPolicy.NORMAL instead.");
-      // }
-
       // fix bug #52033 by invoking getLocalMaxMemoryForValidation here
       if (((PartitionAttributesImpl) pa).getLocalMaxMemoryForValidation() < 0) {
         throw new IllegalStateException(

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
index 886d678..7bfffb7 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
@@ -1848,28 +1848,6 @@ public class BucketRegion extends DistributedRegion implements Bucket {
     // if (!anyWithRouting) {
     Set failures = this.partitionedRegion.getDistributionManager().putOutgoing(prMsg);
 
-    // } else {
-    // // Send message to each member. We set a FilterRoutingInfo serialization
-    // // target so that serialization of the PutAllData objects held in the
-    // // message will only serialize the routing entry for the message recipient
-    // Iterator rIter = recipients.iterator();
-    // failures = new HashSet();
-    // while (rIter.hasNext()){
-    // InternalDistributedMember member = (InternalDistributedMember)rIter.next();
-    // FilterRoutingInfo.setSerializationTarget(member);
-    // try {
-    // prMsg.resetRecipients();
-    // prMsg.setRecipient(member);
-    // Set fs = this.partitionedRegion.getDistributionManager().putOutgoing(prMsg);
-    // if (fs != null && !fs.isEmpty()) {
-    // failures.addAll(fs);
-    // }
-    // } finally {
-    // FilterRoutingInfo.clearSerializationTarget();
-    // }
-    // }
-    // }
-
     return failures;
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/internal/lang/StringUtils.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/lang/StringUtils.java b/geode-core/src/main/java/org/apache/geode/internal/lang/StringUtils.java
index 8a44564..298f44f 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/lang/StringUtils.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/lang/StringUtils.java
@@ -65,8 +65,6 @@ public class StringUtils extends org.apache.commons.lang.StringUtils {
     return buffer.toString();
   }
 
-
-
   /**
    * Gets the value of the specified Object as a String. If the Object is null then the first
    * non-null String value from the array of default String value is returned. If the array of

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/CliUtil.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/CliUtil.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/CliUtil.java
index c63b10b..038e069 100755
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/CliUtil.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/CliUtil.java
@@ -281,27 +281,6 @@ public class CliUtil {
     return sb.toString();
   }
 
-  public static Set<DistributedMember> findMembersOrThrow(final String groups, final String members)
-      throws CommandResultException {
-
-    String[] groupsArray = (groups == null ? new String[] {} : groups.split(","));
-    String[] membersArray = (members == null ? new String[] {} : members.split(","));
-
-    return findMembersOrThrow(groupsArray, membersArray);
-  }
-
-  public static Set<DistributedMember> findMembersOrThrow(final String[] groups,
-      final String[] members) throws CommandResultException {
-
-    Set<DistributedMember> matchingMembers = findMembers(groups, members);
-    if (matchingMembers.isEmpty()) {
-      throw new CommandResultException(
-          ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE));
-    }
-
-    return matchingMembers;
-  }
-
   /**
    * Finds all Members (including both servers and locators) which belong to the given arrays of
    * groups or members.

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigCommands.java
index 6d3f50f..52a0a9d 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigCommands.java
@@ -199,18 +199,16 @@ public class ConfigCommands extends AbstractCommandsSupport {
   public Result exportConfig(
       @CliOption(key = {CliStrings.EXPORT_CONFIG__MEMBER},
           optionContext = ConverterHint.ALL_MEMBER_IDNAME,
-          help = CliStrings.EXPORT_CONFIG__MEMBER__HELP) String member,
+          help = CliStrings.EXPORT_CONFIG__MEMBER__HELP) String[] member,
       @CliOption(key = {CliStrings.EXPORT_CONFIG__GROUP}, optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.EXPORT_CONFIG__GROUP__HELP) String group,
+          help = CliStrings.EXPORT_CONFIG__GROUP__HELP) String[] group,
       @CliOption(key = {CliStrings.EXPORT_CONFIG__DIR},
           help = CliStrings.EXPORT_CONFIG__DIR__HELP) String dir) {
     InfoResultData infoData = ResultBuilder.createInfoResultData();
 
-    Set<DistributedMember> targetMembers;
-    try {
-      targetMembers = CliUtil.findMembersOrThrow(group, member);
-    } catch (CommandResultException crex) {
-      return crex.getResult();
+    Set<DistributedMember> targetMembers = CliUtil.findMembers(group, member);
+    if (targetMembers.isEmpty()) {
+      return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
     }
 
     try {

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
index b8ebc49..6e1a74e 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
@@ -522,11 +522,10 @@ public class CreateAlterDestroyRegionCommands extends AbstractCommandsSupport {
             new Object[] {evictionMax}));
       }
 
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(groups, null);
-      } catch (CommandResultException crex) {
-        return crex.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(groups, null);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
 
       ResultCollector<?, ?> resultCollector =

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DeployCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DeployCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DeployCommands.java
index 4018beb..544a517 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DeployCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DeployCommands.java
@@ -163,11 +163,10 @@ public class DeployCommands extends AbstractCommandsSupport {
       TabularResultData tabularData = ResultBuilder.createTabularResultData();
       boolean accumulatedData = false;
 
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(groups, null);
-      } catch (CommandResultException crex) {
-        return crex.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(groups, null);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
 
       ResultCollector<?, ?> rc =
@@ -224,17 +223,16 @@ public class DeployCommands extends AbstractCommandsSupport {
   @CliMetaData(relatedTopic = {CliStrings.TOPIC_GEODE_CONFIG})
   @ResourceOperation(resource = Resource.CLUSTER, operation = Operation.READ)
   public Result listDeployed(@CliOption(key = {CliStrings.LIST_DEPLOYED__GROUP},
-      help = CliStrings.LIST_DEPLOYED__GROUP__HELP) String group) {
+      help = CliStrings.LIST_DEPLOYED__GROUP__HELP) String[] group) {
 
     try {
       TabularResultData tabularData = ResultBuilder.createTabularResultData();
       boolean accumulatedData = false;
 
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(group, null);
-      } catch (CommandResultException crex) {
-        return crex.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(group, null);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
 
       ResultCollector<?, ?> rc =

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DiskStoreCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DiskStoreCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DiskStoreCommands.java
index 4232d91..226cfaf 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DiskStoreCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DiskStoreCommands.java
@@ -353,11 +353,10 @@ public class DiskStoreCommands extends AbstractCommandsSupport {
       TabularResultData tabularData = ResultBuilder.createTabularResultData();
       boolean accumulatedData = false;
 
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(groups, null);
-      } catch (CommandResultException crex) {
-        return crex.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(groups, null);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
 
       ResultCollector<?, ?> rc = CliUtil.executeFunction(new CreateDiskStoreFunction(),
@@ -1410,11 +1409,10 @@ public class DiskStoreCommands extends AbstractCommandsSupport {
       TabularResultData tabularData = ResultBuilder.createTabularResultData();
       boolean accumulatedData = false;
 
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(groups, null);
-      } catch (CommandResultException crex) {
-        return crex.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(groups, null);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
 
       ResultCollector<?, ?> rc = CliUtil.executeFunction(new DestroyDiskStoreFunction(),

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DurableClientCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DurableClientCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DurableClientCommands.java
index bcbfcf0..9cb87ac 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DurableClientCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DurableClientCommands.java
@@ -73,21 +73,21 @@ public class DurableClientCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.LIST_DURABLE_CQS__MEMBER,
           help = CliStrings.LIST_DURABLE_CQS__MEMBER__HELP,
-          optionContext = ConverterHint.MEMBERIDNAME) final String memberNameOrId,
+          optionContext = ConverterHint.MEMBERIDNAME) final String[] memberNameOrId,
 
       @CliOption(key = CliStrings.LIST_DURABLE_CQS__GROUP,
           help = CliStrings.LIST_DURABLE_CQS__GROUP__HELP,
-          optionContext = ConverterHint.MEMBERGROUP) final String group) {
+          optionContext = ConverterHint.MEMBERGROUP) final String[] group) {
     Result result = null;
     try {
 
       boolean noResults = true;
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(group, memberNameOrId);
-      } catch (CommandResultException e) {
-        return e.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrId);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
+
       final ResultCollector<?, ?> rc =
           CliUtil.executeFunction(new ListDurableCqNamesFunction(), durableClientId, targetMembers);
       final List<DurableCqNamesResult> results = (List<DurableCqNamesResult>) rc.getResult();
@@ -153,19 +153,19 @@ public class DurableClientCommands extends AbstractCommandsSupport {
           help = CliStrings.COUNT_DURABLE_CQ_EVENTS__DURABLE__CQ__NAME__HELP) final String cqName,
       @CliOption(key = CliStrings.COUNT_DURABLE_CQ_EVENTS__MEMBER, mandatory = false,
           help = CliStrings.COUNT_DURABLE_CQ_EVENTS__MEMBER__HELP,
-          optionContext = ConverterHint.MEMBERIDNAME) final String memberNameOrId,
+          optionContext = ConverterHint.MEMBERIDNAME) final String[] memberNameOrId,
       @CliOption(key = CliStrings.COUNT_DURABLE_CQ_EVENTS__GROUP, mandatory = false,
           help = CliStrings.COUNT_DURABLE_CQ_EVENTS__GROUP__HELP,
-          optionContext = ConverterHint.MEMBERGROUP) final String group) {
+          optionContext = ConverterHint.MEMBERGROUP) final String[] group) {
 
     Result result = null;
     try {
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(group, memberNameOrId);
-      } catch (CommandResultException e) {
-        return e.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrId);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
+
       String[] params = new String[2];
       params[0] = durableClientId;
       params[1] = cqName;
@@ -200,19 +200,20 @@ public class DurableClientCommands extends AbstractCommandsSupport {
           help = CliStrings.CLOSE_DURABLE_CLIENTS__CLIENT__ID__HELP) final String durableClientId,
       @CliOption(key = CliStrings.CLOSE_DURABLE_CLIENTS__MEMBER, mandatory = false,
           help = CliStrings.CLOSE_DURABLE_CLIENTS__MEMBER__HELP,
-          optionContext = ConverterHint.MEMBERIDNAME) final String memberNameOrId,
+          optionContext = ConverterHint.MEMBERIDNAME) final String[] memberNameOrId,
       @CliOption(key = CliStrings.CLOSE_DURABLE_CLIENTS__GROUP, mandatory = false,
           help = CliStrings.COUNT_DURABLE_CQ_EVENTS__GROUP__HELP,
-          optionContext = ConverterHint.MEMBERGROUP) final String group) {
+          optionContext = ConverterHint.MEMBERGROUP) final String[] group) {
 
     Result result = null;
     try {
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(group, memberNameOrId);
-      } catch (CommandResultException e) {
-        return e.getResult();
+
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrId);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
+
       final ResultCollector<?, ?> rc =
           CliUtil.executeFunction(new CloseDurableClientFunction(), durableClientId, targetMembers);
       final List<MemberResult> results = (List<MemberResult>) rc.getResult();
@@ -240,18 +241,17 @@ public class DurableClientCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.CLOSE_DURABLE_CQS__MEMBER, mandatory = false,
           help = CliStrings.CLOSE_DURABLE_CQS__MEMBER__HELP,
-          optionContext = ConverterHint.MEMBERIDNAME) final String memberNameOrId,
+          optionContext = ConverterHint.MEMBERIDNAME) final String[] memberNameOrId,
 
       @CliOption(key = CliStrings.CLOSE_DURABLE_CQS__GROUP, mandatory = false,
           help = CliStrings.CLOSE_DURABLE_CQS__GROUP__HELP,
-          optionContext = ConverterHint.MEMBERGROUP) final String group) {
+          optionContext = ConverterHint.MEMBERGROUP) final String[] group) {
     Result result = null;
     try {
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(group, memberNameOrId);
-      } catch (CommandResultException e) {
-        return e.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrId);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
 
       String[] params = new String[2];

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/FunctionCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/FunctionCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/FunctionCommands.java
index 2007e4a..8ea65ff 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/FunctionCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/FunctionCommands.java
@@ -560,19 +560,18 @@ public class FunctionCommands implements CommandMarker {
       @CliOption(key = CliStrings.LIST_FUNCTION__MATCHES,
           help = CliStrings.LIST_FUNCTION__MATCHES__HELP) String matches,
       @CliOption(key = CliStrings.LIST_FUNCTION__GROUP, optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.LIST_FUNCTION__GROUP__HELP) String groups,
+          help = CliStrings.LIST_FUNCTION__GROUP__HELP) String[] groups,
       @CliOption(key = CliStrings.LIST_FUNCTION__MEMBER, optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.LIST_FUNCTION__MEMBER__HELP) String members) {
+          help = CliStrings.LIST_FUNCTION__MEMBER__HELP) String[] members) {
     TabularResultData tabularData = ResultBuilder.createTabularResultData();
     boolean accumulatedData = false;
 
     InternalCache cache = getCache();
 
-    Set<DistributedMember> targetMembers;
-    try {
-      targetMembers = CliUtil.findMembersOrThrow(groups, members);
-    } catch (CommandResultException crex) {
-      return crex.getResult();
+    Set<DistributedMember> targetMembers = CliUtil.findMembers(groups, members);
+
+    if (targetMembers.isEmpty()) {
+      return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
     }
 
     try {

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/IndexCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/IndexCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/IndexCommands.java
index a4ba64c..407424a 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/IndexCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/IndexCommands.java
@@ -14,6 +14,7 @@
  */
 package org.apache.geode.management.internal.cli.commands;
 
+import org.apache.commons.lang.ArrayUtils;
 import org.apache.geode.SystemFailure;
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheFactory;
@@ -188,7 +189,7 @@ public class IndexCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.CREATE_INDEX__MEMBER, mandatory = false,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.CREATE_INDEX__MEMBER__HELP) final String memberNameOrID,
+          help = CliStrings.CREATE_INDEX__MEMBER__HELP) final String[] memberNameOrID,
 
       @CliOption(key = CliStrings.CREATE_INDEX__TYPE, mandatory = false,
           unspecifiedDefaultValue = "range", optionContext = ConverterHint.INDEX_TYPE,
@@ -196,7 +197,7 @@ public class IndexCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.CREATE_INDEX__GROUP, mandatory = false,
           optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.CREATE_INDEX__GROUP__HELP) final String group) {
+          help = CliStrings.CREATE_INDEX__GROUP__HELP) final String[] group) {
 
     Result result = null;
     AtomicReference<XmlEntity> xmlEntity = new AtomicReference<>();
@@ -237,8 +238,12 @@ public class IndexCommands extends AbstractCommandsSupport {
 
       IndexInfo indexInfo = new IndexInfo(indexName, indexedExpression, regionPath, idxType);
 
-      final Set<DistributedMember> targetMembers =
-          CliUtil.findMembersOrThrow(group, memberNameOrID);
+      final Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrID);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
+
       final ResultCollector<?, ?> rc =
           CliUtil.executeFunction(createIndexFunction, indexInfo, targetMembers);
 
@@ -309,16 +314,14 @@ public class IndexCommands extends AbstractCommandsSupport {
         }
         result = ResultBuilder.buildResult(erd);
       }
-    } catch (CommandResultException crex) {
-      result = crex.getResult();
     } catch (Exception e) {
       result = ResultBuilder.createGemFireErrorResult(e.getMessage());
     }
 
 
     if (xmlEntity.get() != null) {
-      persistClusterConfiguration(result, () -> getSharedConfiguration()
-          .addXmlEntity(xmlEntity.get(), group != null ? group.split(",") : null));
+      persistClusterConfiguration(result,
+          () -> getSharedConfiguration().addXmlEntity(xmlEntity.get(), group));
     }
 
     return result;
@@ -337,16 +340,16 @@ public class IndexCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.DESTROY_INDEX__MEMBER, mandatory = false,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.DESTROY_INDEX__MEMBER__HELP) final String memberNameOrID,
+          help = CliStrings.DESTROY_INDEX__MEMBER__HELP) final String[] memberNameOrID,
 
       @CliOption(key = CliStrings.DESTROY_INDEX__GROUP, mandatory = false,
           optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.DESTROY_INDEX__GROUP__HELP) final String group) {
+          help = CliStrings.DESTROY_INDEX__GROUP__HELP) final String[] group) {
 
     Result result = null;
 
     if (StringUtils.isBlank(indexName) && StringUtils.isBlank(regionPath)
-        && StringUtils.isBlank(memberNameOrID) && StringUtils.isBlank(group)) {
+        && ArrayUtils.isEmpty(group) && ArrayUtils.isEmpty(memberNameOrID)) {
       return ResultBuilder.createUserErrorResult(
           CliStrings.format(CliStrings.PROVIDE_ATLEAST_ONE_OPTION, CliStrings.DESTROY_INDEX));
     }
@@ -364,12 +367,10 @@ public class IndexCommands extends AbstractCommandsSupport {
     }
 
     IndexInfo indexInfo = new IndexInfo(indexName, regionName);
-    Set<DistributedMember> targetMembers = null;
+    Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrID);
 
-    try {
-      targetMembers = CliUtil.findMembersOrThrow(group, memberNameOrID);
-    } catch (CommandResultException e) {
-      return e.getResult();
+    if (targetMembers.isEmpty()) {
+      return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
     }
 
     ResultCollector rc = CliUtil.executeFunction(destroyIndexFunction, indexInfo, targetMembers);
@@ -456,8 +457,8 @@ public class IndexCommands extends AbstractCommandsSupport {
       result = ResultBuilder.buildResult(erd);
     }
     if (xmlEntity.get() != null) {
-      persistClusterConfiguration(result, () -> getSharedConfiguration()
-          .deleteXmlEntity(xmlEntity.get(), group != null ? group.split(",") : null));
+      persistClusterConfiguration(result,
+          () -> getSharedConfiguration().deleteXmlEntity(xmlEntity.get(), group));
     }
 
     return result;
@@ -539,11 +540,11 @@ public class IndexCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.CREATE_DEFINED_INDEXES__MEMBER, mandatory = false,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.CREATE_DEFINED_INDEXES__MEMBER__HELP) final String memberNameOrID,
+          help = CliStrings.CREATE_DEFINED_INDEXES__MEMBER__HELP) final String[] memberNameOrID,
 
       @CliOption(key = CliStrings.CREATE_DEFINED_INDEXES__GROUP, mandatory = false,
           optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.CREATE_DEFINED_INDEXES__GROUP__HELP) final String group) {
+          help = CliStrings.CREATE_DEFINED_INDEXES__GROUP__HELP) final String[] group) {
 
     Result result = null;
     AtomicReference<XmlEntity> xmlEntity = new AtomicReference<>();
@@ -555,10 +556,13 @@ public class IndexCommands extends AbstractCommandsSupport {
     }
 
     try {
-      final Cache cache = CacheFactory.getAnyInstance();
+      final Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrID);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
 
-      final Set<DistributedMember> targetMembers =
-          CliUtil.findMembersOrThrow(group, memberNameOrID);
+      final Cache cache = CacheFactory.getAnyInstance();
       final ResultCollector<?, ?> rc =
           CliUtil.executeFunction(createDefinedIndexesFunction, indexDefinitions, targetMembers);
 
@@ -622,15 +626,13 @@ public class IndexCommands extends AbstractCommandsSupport {
         }
         result = ResultBuilder.buildResult(erd);
       }
-    } catch (CommandResultException crex) {
-      result = crex.getResult();
     } catch (Exception e) {
       result = ResultBuilder.createGemFireErrorResult(e.getMessage());
     }
 
     if (xmlEntity.get() != null) {
-      persistClusterConfiguration(result, () -> getSharedConfiguration()
-          .addXmlEntity(xmlEntity.get(), group != null ? group.split(",") : null));
+      persistClusterConfiguration(result,
+          () -> getSharedConfiguration().addXmlEntity(xmlEntity.get(), group));
     }
     return result;
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/MiscellaneousCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/MiscellaneousCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/MiscellaneousCommands.java
index 0d714f4..a23afd0 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/MiscellaneousCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/MiscellaneousCommands.java
@@ -672,11 +672,11 @@ public class MiscellaneousCommands implements CommandMarker {
   @ResourceOperation(resource = Resource.CLUSTER, operation = Operation.READ)
   public Result exportStackTrace(@CliOption(key = CliStrings.EXPORT_STACKTRACE__MEMBER,
       optionContext = ConverterHint.ALL_MEMBER_IDNAME,
-      help = CliStrings.EXPORT_STACKTRACE__HELP) String memberNameOrId,
+      help = CliStrings.EXPORT_STACKTRACE__HELP) String[] memberNameOrId,
 
       @CliOption(key = CliStrings.EXPORT_STACKTRACE__GROUP,
           optionContext = ConverterHint.ALL_MEMBER_IDNAME,
-          help = CliStrings.EXPORT_STACKTRACE__GROUP) String group,
+          help = CliStrings.EXPORT_STACKTRACE__GROUP) String[] group,
 
       @CliOption(key = CliStrings.EXPORT_STACKTRACE__FILE,
           help = CliStrings.EXPORT_STACKTRACE__FILE__HELP) String fileName,
@@ -687,29 +687,27 @@ public class MiscellaneousCommands implements CommandMarker {
 
     Result result = null;
     StringBuffer filePrefix = new StringBuffer("stacktrace");
+
+    if (fileName == null) {
+      fileName = filePrefix.append("_").append(System.currentTimeMillis()).toString();
+    }
+    final File outFile = new File(fileName);
     try {
-      if (fileName == null) {
-        fileName = filePrefix.append("_").append(System.currentTimeMillis()).toString();
-      }
-      final File outFile = new File(fileName);
       if (outFile.exists() && failIfFilePresent) {
         return ResultBuilder.createShellClientErrorResult(CliStrings.format(
             CliStrings.EXPORT_STACKTRACE__ERROR__FILE__PRESENT, outFile.getCanonicalPath()));
       }
 
+
       InternalCache cache = getCache();
       InternalDistributedSystem ads = cache.getInternalDistributedSystem();
 
       InfoResultData resultData = ResultBuilder.createInfoResultData();
 
       Map<String, byte[]> dumps = new HashMap<String, byte[]>();
-      Set<DistributedMember> targetMembers = null;
-
-      if ((group == null || group.isEmpty())
-          && (memberNameOrId == null || memberNameOrId.isEmpty())) {
-        targetMembers = CliUtil.getAllMembers(cache);
-      } else {
-        targetMembers = CliUtil.findMembersOrThrow(group, memberNameOrId);
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrId);
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
 
       ResultCollector<?, ?> rc =
@@ -728,9 +726,7 @@ public class MiscellaneousCommands implements CommandMarker {
       resultData.addLine(CliStrings.EXPORT_STACKTRACE__HOST + ads.getDistributedMember().getHost());
 
       result = ResultBuilder.buildResult(resultData);
-    } catch (CommandResultException crex) {
-      return crex.getResult();
-    } catch (Exception ex) {
+    } catch (IOException ex) {
       result = ResultBuilder
           .createGemFireErrorResult(CliStrings.EXPORT_STACKTRACE__ERROR + ex.getMessage());
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/QueueCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/QueueCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/QueueCommands.java
index 6208adb..f4dee75 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/QueueCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/QueueCommands.java
@@ -123,11 +123,10 @@ public class QueueCommands extends AbstractCommandsSupport {
       TabularResultData tabularData = ResultBuilder.createTabularResultData();
       boolean accumulatedData = false;
 
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(groups, null);
-      } catch (CommandResultException crex) {
-        return crex.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(groups, null);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
 
       AsyncEventQueueFunctionArgs aeqArgs = new AsyncEventQueueFunctionArgs(id, parallel,
@@ -188,11 +187,10 @@ public class QueueCommands extends AbstractCommandsSupport {
       TabularResultData tabularData = ResultBuilder.createTabularResultData();
       boolean accumulatedData = false;
 
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow((String) null, (String) null);
-      } catch (CommandResultException crex) {
-        return crex.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(null, null);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
 
       ResultCollector<?, ?> rc = CliUtil.executeFunction(new ListAsyncEventQueuesFunction(),

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/RegionCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/RegionCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/RegionCommands.java
index 561d4b8..6f5b047 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/RegionCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/RegionCommands.java
@@ -80,19 +80,18 @@ public class RegionCommands implements CommandMarker {
   @ResourceOperation(resource = Resource.DATA, operation = Operation.READ)
   public Result listRegion(
       @CliOption(key = {CliStrings.LIST_REGION__GROUP}, optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.LIST_REGION__GROUP__HELP) String group,
+          help = CliStrings.LIST_REGION__GROUP__HELP) String[] group,
       @CliOption(key = {CliStrings.LIST_REGION__MEMBER}, optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.LIST_REGION__MEMBER__HELP) String memberNameOrId) {
+          help = CliStrings.LIST_REGION__MEMBER__HELP) String[] memberNameOrId) {
     Result result = null;
     try {
       Set<RegionInformation> regionInfoSet = new LinkedHashSet<RegionInformation>();
       ResultCollector<?, ?> rc = null;
 
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(group, memberNameOrId);
-      } catch (CommandResultException crex) {
-        return crex.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrId);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
 
       TabularResultData resultData = ResultBuilder.createTabularResultData();

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/WanCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/WanCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/WanCommands.java
index feeb353..57080ba 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/WanCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/WanCommands.java
@@ -78,7 +78,7 @@ public class WanCommands extends AbstractCommandsSupport {
       @CliOption(key = CliStrings.CREATE_GATEWAYSENDER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
           unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-          help = CliStrings.CREATE_GATEWAYSENDER__MEMBER__HELP) String onMember,
+          help = CliStrings.CREATE_GATEWAYSENDER__MEMBER__HELP) String[] onMember,
 
       @CliOption(key = CliStrings.CREATE_GATEWAYSENDER__ID, mandatory = true,
           help = CliStrings.CREATE_GATEWAYSENDER__ID__HELP) String id,
@@ -145,7 +145,11 @@ public class WanCommands extends AbstractCommandsSupport {
           gatewayEventFilters, gatewayTransportFilter);
 
       Set<DistributedMember> membersToCreateGatewaySenderOn =
-          CliUtil.findMembersOrThrow(onGroups, onMember == null ? null : onMember.split(","));
+          CliUtil.findMembers(onGroups, onMember);
+
+      if (membersToCreateGatewaySenderOn.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
 
       ResultCollector<?, ?> resultCollector =
           CliUtil.executeFunction(GatewaySenderCreateFunction.INSTANCE, gatewaySenderFunctionArgs,
@@ -170,8 +174,6 @@ public class WanCommands extends AbstractCommandsSupport {
     } catch (IllegalArgumentException e) {
       LogWrapper.getInstance().info(e.getMessage());
       result = ResultBuilder.createUserErrorResult(e.getMessage());
-    } catch (CommandResultException crex) {
-      result = handleCommandResultException(crex);
     }
 
     if (xmlEntity.get() != null) {
@@ -190,11 +192,11 @@ public class WanCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.START_GATEWAYSENDER__GROUP,
           optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.START_GATEWAYSENDER__GROUP__HELP) String onGroup,
+          help = CliStrings.START_GATEWAYSENDER__GROUP__HELP) String[] onGroup,
 
       @CliOption(key = CliStrings.START_GATEWAYSENDER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.START_GATEWAYSENDER__MEMBER__HELP) String onMember) {
+          help = CliStrings.START_GATEWAYSENDER__MEMBER__HELP) String[] onMember) {
 
     Result result = null;
     final String id = senderId.trim();
@@ -205,7 +207,12 @@ public class WanCommands extends AbstractCommandsSupport {
           (SystemManagementService) ManagementService.getExistingManagementService(cache);
 
       TabularResultData resultData = ResultBuilder.createTabularResultData();
-      Set<DistributedMember> dsMembers = CliUtil.findMembersOrThrow(onGroup, onMember);
+
+      Set<DistributedMember> dsMembers = CliUtil.findMembers(onGroup, onMember);
+
+      if (dsMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
 
       ExecutorService execService = Executors.newCachedThreadPool(new ThreadFactory() {
         AtomicInteger threadNum = new AtomicInteger();
@@ -293,8 +300,6 @@ public class WanCommands extends AbstractCommandsSupport {
       }
       execService.shutdown();
       result = ResultBuilder.buildResult(resultData);
-    } catch (CommandResultException crex) {
-      result = handleCommandResultException(crex);
     } catch (Exception e) {
       LogWrapper.getInstance().warning(CliStrings.GATEWAY_ERROR + CliUtil.stackTraceAsString(e));
       result = ResultBuilder.createGemFireErrorResult(CliStrings.GATEWAY_ERROR + e.getMessage());
@@ -312,11 +317,11 @@ public class WanCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.PAUSE_GATEWAYSENDER__GROUP,
           optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.PAUSE_GATEWAYSENDER__GROUP__HELP) String onGroup,
+          help = CliStrings.PAUSE_GATEWAYSENDER__GROUP__HELP) String[] onGroup,
 
       @CliOption(key = CliStrings.PAUSE_GATEWAYSENDER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.PAUSE_GATEWAYSENDER__MEMBER__HELP) String onMember) {
+          help = CliStrings.PAUSE_GATEWAYSENDER__MEMBER__HELP) String[] onMember) {
 
     Result result = null;
     if (senderId != null) {
@@ -331,9 +336,13 @@ public class WanCommands extends AbstractCommandsSupport {
       GatewaySenderMXBean bean = null;
 
       TabularResultData resultData = ResultBuilder.createTabularResultData();
-      Set<DistributedMember> dsMembers = null;
 
-      dsMembers = CliUtil.findMembersOrThrow(onGroup, onMember);
+      Set<DistributedMember> dsMembers = CliUtil.findMembers(onGroup, onMember);
+
+      if (dsMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
+
       for (DistributedMember member : dsMembers) {
         if (cache.getDistributedSystem().getDistributedMember().getId().equals(member.getId())) {
           bean = service.getLocalGatewaySenderMXBean(senderId);
@@ -365,8 +374,6 @@ public class WanCommands extends AbstractCommandsSupport {
         }
       }
       result = ResultBuilder.buildResult(resultData);
-    } catch (CommandResultException crex) {
-      result = handleCommandResultException(crex);
     } catch (Exception e) {
       LogWrapper.getInstance().warning(CliStrings.GATEWAY_ERROR + CliUtil.stackTraceAsString(e));
       result = ResultBuilder.createGemFireErrorResult(CliStrings.GATEWAY_ERROR + e.getMessage());
@@ -383,10 +390,10 @@ public class WanCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.RESUME_GATEWAYSENDER__GROUP,
           optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.RESUME_GATEWAYSENDER__GROUP__HELP) String onGroup,
+          help = CliStrings.RESUME_GATEWAYSENDER__GROUP__HELP) String[] onGroup,
       @CliOption(key = CliStrings.RESUME_GATEWAYSENDER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.RESUME_GATEWAYSENDER__MEMBER__HELP) String onMember) {
+          help = CliStrings.RESUME_GATEWAYSENDER__MEMBER__HELP) String[] onMember) {
 
     Result result = null;
     if (senderId != null) {
@@ -402,7 +409,12 @@ public class WanCommands extends AbstractCommandsSupport {
 
       TabularResultData resultData = ResultBuilder.createTabularResultData();
 
-      Set<DistributedMember> dsMembers = CliUtil.findMembersOrThrow(onGroup, onMember);
+      Set<DistributedMember> dsMembers = CliUtil.findMembers(onGroup, onMember);
+
+      if (dsMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
+
       for (DistributedMember member : dsMembers) {
         if (cache.getDistributedSystem().getDistributedMember().getId().equals(member.getId())) {
           bean = service.getLocalGatewaySenderMXBean(senderId);
@@ -434,8 +446,6 @@ public class WanCommands extends AbstractCommandsSupport {
         }
       }
       result = ResultBuilder.buildResult(resultData);
-    } catch (CommandResultException crex) {
-      result = handleCommandResultException(crex);
     } catch (Exception e) {
       LogWrapper.getInstance().warning(CliStrings.GATEWAY_ERROR + CliUtil.stackTraceAsString(e));
       result = ResultBuilder.createGemFireErrorResult(CliStrings.GATEWAY_ERROR + e.getMessage());
@@ -452,11 +462,11 @@ public class WanCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.STOP_GATEWAYSENDER__GROUP,
           optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.STOP_GATEWAYSENDER__GROUP__HELP) String onGroup,
+          help = CliStrings.STOP_GATEWAYSENDER__GROUP__HELP) String[] onGroup,
 
       @CliOption(key = CliStrings.STOP_GATEWAYSENDER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.STOP_GATEWAYSENDER__MEMBER__HELP) String onMember) {
+          help = CliStrings.STOP_GATEWAYSENDER__MEMBER__HELP) String[] onMember) {
 
     Result result = null;
     if (senderId != null)
@@ -470,7 +480,12 @@ public class WanCommands extends AbstractCommandsSupport {
       GatewaySenderMXBean bean = null;
 
       TabularResultData resultData = ResultBuilder.createTabularResultData();
-      Set<DistributedMember> dsMembers = CliUtil.findMembersOrThrow(onGroup, onMember);
+
+      Set<DistributedMember> dsMembers = CliUtil.findMembers(onGroup, onMember);
+
+      if (dsMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
 
       for (DistributedMember member : dsMembers) {
         if (cache.getDistributedSystem().getDistributedMember().getId().equals(member.getId())) {
@@ -498,8 +513,6 @@ public class WanCommands extends AbstractCommandsSupport {
         }
       }
       result = ResultBuilder.buildResult(resultData);
-    } catch (CommandResultException crex) {
-      result = handleCommandResultException(crex);
     } catch (Exception e) {
       LogWrapper.getInstance().warning(CliStrings.GATEWAY_ERROR + CliUtil.stackTraceAsString(e));
       result = ResultBuilder.createGemFireErrorResult(CliStrings.GATEWAY_ERROR + e.getMessage());
@@ -518,7 +531,7 @@ public class WanCommands extends AbstractCommandsSupport {
       @CliOption(key = CliStrings.CREATE_GATEWAYRECEIVER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
           unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-          help = CliStrings.CREATE_GATEWAYRECEIVER__MEMBER__HELP) String onMember,
+          help = CliStrings.CREATE_GATEWAYRECEIVER__MEMBER__HELP) String[] onMember,
 
       @CliOption(key = CliStrings.CREATE_GATEWAYRECEIVER__MANUALSTART,
           help = CliStrings.CREATE_GATEWAYRECEIVER__MANUALSTART__HELP) Boolean manualStart,
@@ -550,7 +563,11 @@ public class WanCommands extends AbstractCommandsSupport {
               socketBufferSize, maximumTimeBetweenPings, gatewayTransportFilters);
 
       Set<DistributedMember> membersToCreateGatewayReceiverOn =
-          CliUtil.findMembersOrThrow(onGroups, onMember == null ? null : onMember.split(","));
+          CliUtil.findMembers(onGroups, onMember);
+
+      if (membersToCreateGatewayReceiverOn.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
 
       ResultCollector<?, ?> resultCollector =
           CliUtil.executeFunction(GatewayReceiverCreateFunction.INSTANCE,
@@ -576,8 +593,6 @@ public class WanCommands extends AbstractCommandsSupport {
     } catch (IllegalArgumentException e) {
       LogWrapper.getInstance().info(e.getMessage());
       result = ResultBuilder.createUserErrorResult(e.getMessage());
-    } catch (CommandResultException crex) {
-      result = handleCommandResultException(crex);
     }
 
     if (xmlEntity.get() != null) {
@@ -653,11 +668,11 @@ public class WanCommands extends AbstractCommandsSupport {
   @ResourceOperation(resource = Resource.DATA, operation = Operation.MANAGE)
   public Result startGatewayReceiver(@CliOption(key = CliStrings.START_GATEWAYRECEIVER__GROUP,
       optionContext = ConverterHint.MEMBERGROUP,
-      help = CliStrings.START_GATEWAYRECEIVER__GROUP__HELP) String onGroup,
+      help = CliStrings.START_GATEWAYRECEIVER__GROUP__HELP) String[] onGroup,
 
       @CliOption(key = CliStrings.START_GATEWAYRECEIVER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.START_GATEWAYRECEIVER__MEMBER__HELP) String onMember) {
+          help = CliStrings.START_GATEWAYRECEIVER__MEMBER__HELP) String[] onMember) {
     Result result = null;
 
     try {
@@ -668,7 +683,12 @@ public class WanCommands extends AbstractCommandsSupport {
       GatewayReceiverMXBean receieverBean = null;
 
       TabularResultData resultData = ResultBuilder.createTabularResultData();
-      Set<DistributedMember> dsMembers = CliUtil.findMembersOrThrow(onGroup, onMember);
+
+      Set<DistributedMember> dsMembers = CliUtil.findMembers(onGroup, onMember);
+
+      if (dsMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
 
       for (DistributedMember member : dsMembers) {
         ObjectName gatewayReceiverObjectName = MBeanJMXAdapter.getGatewayReceiverMBeanName(member);
@@ -715,11 +735,11 @@ public class WanCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.STOP_GATEWAYRECEIVER__GROUP,
           optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.STOP_GATEWAYRECEIVER__GROUP__HELP) String onGroup,
+          help = CliStrings.STOP_GATEWAYRECEIVER__GROUP__HELP) String[] onGroup,
 
       @CliOption(key = CliStrings.STOP_GATEWAYRECEIVER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.STOP_GATEWAYRECEIVER__MEMBER__HELP) String onMember) {
+          help = CliStrings.STOP_GATEWAYRECEIVER__MEMBER__HELP) String[] onMember) {
 
     Result result = null;
 
@@ -731,7 +751,12 @@ public class WanCommands extends AbstractCommandsSupport {
       GatewayReceiverMXBean receieverBean = null;
 
       TabularResultData resultData = ResultBuilder.createTabularResultData();
-      Set<DistributedMember> dsMembers = CliUtil.findMembersOrThrow(onGroup, onMember);
+
+      Set<DistributedMember> dsMembers = CliUtil.findMembers(onGroup, onMember);
+
+      if (dsMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
 
       for (DistributedMember member : dsMembers) {
         ObjectName gatewayReceiverObjectName = MBeanJMXAdapter.getGatewayReceiverMBeanName(member);
@@ -776,9 +801,9 @@ public class WanCommands extends AbstractCommandsSupport {
   @ResourceOperation(resource = Resource.CLUSTER, operation = Operation.READ)
   public Result listGateway(
       @CliOption(key = CliStrings.LIST_GATEWAY__MEMBER, optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.LIST_GATEWAY__MEMBER__HELP) String onMember,
+          help = CliStrings.LIST_GATEWAY__MEMBER__HELP) String[] onMember,
       @CliOption(key = CliStrings.LIST_GATEWAY__GROUP, optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.LIST_GATEWAY__GROUP__HELP) String onGroup) {
+          help = CliStrings.LIST_GATEWAY__GROUP__HELP) String[] onGroup) {
 
     Result result = null;
     InternalCache cache = getCache();
@@ -786,7 +811,11 @@ public class WanCommands extends AbstractCommandsSupport {
       SystemManagementService service =
           (SystemManagementService) ManagementService.getExistingManagementService(cache);
 
-      Set<DistributedMember> dsMembers = CliUtil.findMembersOrThrow(onGroup, onMember);
+      Set<DistributedMember> dsMembers = CliUtil.findMembers(onGroup, onMember);
+
+      if (dsMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
 
       Map<String, Map<String, GatewaySenderMXBean>> gatewaySenderBeans =
           new TreeMap<String, Map<String, GatewaySenderMXBean>>();
@@ -838,8 +867,6 @@ public class WanCommands extends AbstractCommandsSupport {
       crd.setHeader(CliStrings.HEADER_GATEWAYS);
       accumulateListGatewayResult(crd, gatewaySenderBeans, gatewayReceiverBeans);
       result = ResultBuilder.buildResult(crd);
-    } catch (CommandResultException crex) {
-      result = handleCommandResultException(crex);
     } catch (Exception e) {
       LogWrapper.getInstance().warning(CliStrings.GATEWAY_ERROR + CliUtil.stackTraceAsString(e));
       result = ResultBuilder.createGemFireErrorResult(CliStrings.GATEWAY_ERROR + e.getMessage());
@@ -856,11 +883,11 @@ public class WanCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.STATUS_GATEWAYSENDER__GROUP,
           optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.STATUS_GATEWAYSENDER__GROUP__HELP) String onGroup,
+          help = CliStrings.STATUS_GATEWAYSENDER__GROUP__HELP) String[] onGroup,
 
       @CliOption(key = CliStrings.STATUS_GATEWAYSENDER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.STATUS_GATEWAYSENDER__MEMBER__HELP) String onMember) {
+          help = CliStrings.STATUS_GATEWAYSENDER__MEMBER__HELP) String[] onMember) {
 
     Result result = null;
     if (senderId != null)
@@ -881,8 +908,12 @@ public class WanCommands extends AbstractCommandsSupport {
           crd.addSection(CliStrings.SECTION_GATEWAY_SENDER_NOT_AVAILABLE)
               .addTable(CliStrings.TABLE_GATEWAY_SENDER);
 
-      Set<DistributedMember> dsMembers = null;
-      dsMembers = CliUtil.findMembersOrThrow(onGroup, onMember);
+      Set<DistributedMember> dsMembers = CliUtil.findMembers(onGroup, onMember);
+
+      if (dsMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
+
       for (DistributedMember member : dsMembers) {
         if (cache.getDistributedSystem().getDistributedMember().getId().equals(member.getId())) {
           bean = service.getLocalGatewaySenderMXBean(senderId);
@@ -897,8 +928,6 @@ public class WanCommands extends AbstractCommandsSupport {
         }
       }
       result = ResultBuilder.buildResult(crd);
-    } catch (CommandResultException crex) {
-      result = handleCommandResultException(crex);
     } catch (Exception e) {
       LogWrapper.getInstance().warning(CliStrings.GATEWAY_ERROR + CliUtil.stackTraceAsString(e));
       result = ResultBuilder.createGemFireErrorResult(CliStrings.GATEWAY_ERROR + e.getMessage());
@@ -912,11 +941,11 @@ public class WanCommands extends AbstractCommandsSupport {
   @ResourceOperation(resource = Resource.CLUSTER, operation = Operation.READ)
   public Result statusGatewayReceiver(@CliOption(key = CliStrings.STATUS_GATEWAYRECEIVER__GROUP,
       optionContext = ConverterHint.MEMBERGROUP,
-      help = CliStrings.STATUS_GATEWAYRECEIVER__GROUP__HELP) String onGroup,
+      help = CliStrings.STATUS_GATEWAYRECEIVER__GROUP__HELP) String[] onGroup,
 
       @CliOption(key = CliStrings.STATUS_GATEWAYRECEIVER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.STATUS_GATEWAYRECEIVER__MEMBER__HELP) String onMember) {
+          help = CliStrings.STATUS_GATEWAYRECEIVER__MEMBER__HELP) String[] onMember) {
 
     Result result = null;
 
@@ -934,7 +963,11 @@ public class WanCommands extends AbstractCommandsSupport {
           crd.addSection(CliStrings.SECTION_GATEWAY_RECEIVER_NOT_AVAILABLE)
               .addTable(CliStrings.TABLE_GATEWAY_RECEIVER);
 
-      Set<DistributedMember> dsMembers = CliUtil.findMembersOrThrow(onGroup, onMember);
+      Set<DistributedMember> dsMembers = CliUtil.findMembers(onGroup, onMember);
+
+      if (dsMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
 
       for (DistributedMember member : dsMembers) {
         ObjectName gatewayReceiverObjectName = MBeanJMXAdapter.getGatewayReceiverMBeanName(member);
@@ -949,8 +982,6 @@ public class WanCommands extends AbstractCommandsSupport {
         buildReceiverStatus(member.getId(), null, notAvailableReceiverData);
       }
       result = ResultBuilder.buildResult(crd);
-    } catch (CommandResultException crex) {
-      result = handleCommandResultException(crex);
     } catch (Exception e) {
       LogWrapper.getInstance().warning(CliStrings.GATEWAY_ERROR + CliUtil.stackTraceAsString(e));
       result = ResultBuilder.createGemFireErrorResult(CliStrings.GATEWAY_ERROR + e.getMessage());
@@ -969,7 +1000,7 @@ public class WanCommands extends AbstractCommandsSupport {
       @CliOption(key = CliStrings.DESTROY_GATEWAYSENDER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
           unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-          help = CliStrings.DESTROY_GATEWAYSENDER__MEMBER__HELP) String onMember,
+          help = CliStrings.DESTROY_GATEWAYSENDER__MEMBER__HELP) String[] onMember,
       @CliOption(key = CliStrings.DESTROY_GATEWAYSENDER__ID, mandatory = true,
           optionContext = ConverterHint.GATEWAY_SENDER_ID,
           help = CliStrings.DESTROY_GATEWAYSENDER__ID__HELP) String id) {
@@ -979,7 +1010,11 @@ public class WanCommands extends AbstractCommandsSupport {
           new GatewaySenderDestroyFunctionArgs(id);
 
       Set<DistributedMember> membersToDestroyGatewaySenderOn =
-          CliUtil.findMembersOrThrow(onGroups, onMember == null ? null : onMember.split(","));
+          CliUtil.findMembers(onGroups, onMember);
+
+      if (membersToDestroyGatewaySenderOn.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
 
       ResultCollector<?, ?> resultCollector =
           CliUtil.executeFunction(GatewaySenderDestroyFunction.INSTANCE,
@@ -1000,8 +1035,6 @@ public class WanCommands extends AbstractCommandsSupport {
     } catch (IllegalArgumentException e) {
       LogWrapper.getInstance().info(e.getMessage());
       result = ResultBuilder.createUserErrorResult(e.getMessage());
-    } catch (CommandResultException crex) {
-      result = handleCommandResultException(crex);
     }
     return result;
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/test/java/org/apache/geode/management/internal/cli/CliUtilDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/cli/CliUtilDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/cli/CliUtilDUnitTest.java
index d610ca0..489be28 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/cli/CliUtilDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/cli/CliUtilDUnitTest.java
@@ -261,35 +261,30 @@ public class CliUtilDUnitTest extends JUnit4CacheTestCase {
   }
 
   public void verifyFindAllMatchingMembers() {
-    try {
-      Set<DistributedMember> set = CliUtil.findMembersOrThrow(GROUP1, null);
-      assertNotNull(set);
-      assertEquals(2, set.size());
-      assertEquals(true, containsMember(set, MEMBER_1_GROUP1));
-      assertEquals(true, containsMember(set, MEMBER_2_GROUP1));
-
-      set = CliUtil.findMembersOrThrow("group1,group2", null);
-      assertNotNull(set);
-      assertEquals(4, set.size());
-      assertEquals(true, containsMember(set, MEMBER_1_GROUP1));
-      assertEquals(true, containsMember(set, MEMBER_2_GROUP1));
-      assertEquals(true, containsMember(set, MEMBER_1_GROUP2));
-      assertEquals(true, containsMember(set, MEMBER_2_GROUP2));
-
-      set = CliUtil.findMembersOrThrow(null, MEMBER_1_GROUP1);
-      assertNotNull(set);
-      assertEquals(1, set.size());
-      assertEquals(true, containsMember(set, MEMBER_1_GROUP1));
-
-      set = CliUtil.findMembersOrThrow(null, "member1_group1,member2_group2");
-      assertNotNull(set);
-      assertEquals(2, set.size());
-      assertEquals(true, containsMember(set, MEMBER_1_GROUP1));
-      assertEquals(true, containsMember(set, MEMBER_2_GROUP2));
-
-    } catch (CommandResultException e) {
-      Assert.fail("CliUtil failed with exception", e);
-    }
+    Set<DistributedMember> set = CliUtil.findMembers(GROUP1.split(","), null);
+    assertNotNull(set);
+    assertEquals(2, set.size());
+    assertEquals(true, containsMember(set, MEMBER_1_GROUP1));
+    assertEquals(true, containsMember(set, MEMBER_2_GROUP1));
+
+    set = CliUtil.findMembers(new String[] {"group1", "group2"}, null);
+    assertNotNull(set);
+    assertEquals(4, set.size());
+    assertEquals(true, containsMember(set, MEMBER_1_GROUP1));
+    assertEquals(true, containsMember(set, MEMBER_2_GROUP1));
+    assertEquals(true, containsMember(set, MEMBER_1_GROUP2));
+    assertEquals(true, containsMember(set, MEMBER_2_GROUP2));
+
+    set = CliUtil.findMembers(null, MEMBER_1_GROUP1.split(","));
+    assertNotNull(set);
+    assertEquals(1, set.size());
+    assertEquals(true, containsMember(set, MEMBER_1_GROUP1));
+
+    set = CliUtil.findMembers(null, new String[] {"member1_group1", "member2_group2"});
+    assertNotNull(set);
+    assertEquals(2, set.size());
+    assertEquals(true, containsMember(set, MEMBER_1_GROUP1));
+    assertEquals(true, containsMember(set, MEMBER_2_GROUP2));
   }
 
   private Object containsMember(Set<DistributedMember> set, String string) {
@@ -311,22 +306,18 @@ public class CliUtilDUnitTest extends JUnit4CacheTestCase {
   public void verifyExecuteFunction() {
     DunitFunction function = new DunitFunction("myfunction");
     Set<DistributedMember> set;
-    try {
-      @SuppressWarnings("rawtypes")
-      Region region1 = getCache().getRegion(COMMON_REGION);
-      region1.clear();
-      set = CliUtil.findMembersOrThrow(GROUP1, null);
-      assertEquals(2, set.size());
-      ResultCollector collector = CliUtil.executeFunction(function, "executeOnGroup", set);
-      collector.getResult();
-      assertEquals(2, region1.size());
-      assertTrue(region1.containsKey(MEMBER_1_GROUP1));
-      assertTrue(region1.containsKey(MEMBER_2_GROUP1));
-      assertEquals("executeOnGroup", region1.get(MEMBER_1_GROUP1));
-      assertEquals("executeOnGroup", region1.get(MEMBER_2_GROUP1));
-    } catch (CommandResultException e) {
-      Assert.fail("Error during querying members", e);
-    }
+    @SuppressWarnings("rawtypes")
+    Region region1 = getCache().getRegion(COMMON_REGION);
+    region1.clear();
+    set = CliUtil.findMembers(GROUP1.split(","), null);
+    assertEquals(2, set.size());
+    ResultCollector collector = CliUtil.executeFunction(function, "executeOnGroup", set);
+    collector.getResult();
+    assertEquals(2, region1.size());
+    assertTrue(region1.containsKey(MEMBER_1_GROUP1));
+    assertTrue(region1.containsKey(MEMBER_2_GROUP1));
+    assertEquals("executeOnGroup", region1.get(MEMBER_1_GROUP1));
+    assertEquals("executeOnGroup", region1.get(MEMBER_2_GROUP1));
   }
 
   public void getRegionAssociatedMembers() {


[43/43] geode git commit: Resolve conflict

Posted by kl...@apache.org.
Resolve conflict


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/283215f9
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/283215f9
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/283215f9

Branch: refs/heads/feature/GEODE-2632-17
Commit: 283215f9f348918cec2b85e97eb2aec977f10492
Parents: 91c13da
Author: Kirk Lund <kl...@apache.org>
Authored: Tue May 30 10:41:29 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Tue May 30 10:41:29 2017 -0700

----------------------------------------------------------------------
 .../sanctionedDataSerializables.txt             | 1074 +++++++++---------
 1 file changed, 535 insertions(+), 539 deletions(-)
----------------------------------------------------------------------



[13/43] geode git commit: GEODE-2941 Pulse doc update - add logging config

Posted by kl...@apache.org.
GEODE-2941 Pulse doc update - add logging config


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/6e56a732
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/6e56a732
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/6e56a732

Branch: refs/heads/feature/GEODE-2632-17
Commit: 6e56a7325336f5c9579a2ac90b7a74e760f52357
Parents: 56f976c
Author: Dave Barnes <db...@pivotal.io>
Authored: Fri May 26 10:46:58 2017 -0700
Committer: Dave Barnes <db...@pivotal.io>
Committed: Fri May 26 14:51:36 2017 -0700

----------------------------------------------------------------------
 .../tools_modules/pulse/pulse-auth.html.md.erb  | 11 -----
 .../pulse/pulse-hosted.html.md.erb              | 44 +++++++++++++++-----
 2 files changed, 33 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/6e56a732/geode-docs/tools_modules/pulse/pulse-auth.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/pulse-auth.html.md.erb b/geode-docs/tools_modules/pulse/pulse-auth.html.md.erb
index d834592..1d791e0 100644
--- a/geode-docs/tools_modules/pulse/pulse-auth.html.md.erb
+++ b/geode-docs/tools_modules/pulse/pulse-auth.html.md.erb
@@ -50,14 +50,3 @@ When the `http` SSL component is enabled, all HTTP services become
 SSL-enabled and you must configure your client applications
 accordingly. For SSL-enabled Pulse, you will need to configure your
 browsers with proper certificates.
-
-If a JMX manager or locator is configured to use SSL, you can configure Pulse to connect to these
-processes. Create a file named `pulsesecurity.properties` and save it somewhere in the classpath of
-your Web application server. Include standard Java SSL properties, such as:
-
-```
-javax.net.ssl.keyStore={KeyStorePath}
-javax.net.ssl.keyStorePassword={KeyStorePassword}
-javax.net.ssl.trustStore={TrustStorePath}
-javax.net.ssl.trustStorePassword={TrustStorePassword}
-```

http://git-wip-us.apache.org/repos/asf/geode/blob/6e56a732/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb b/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb
index ceed530..af9b1f5 100644
--- a/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb
+++ b/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb
@@ -24,9 +24,15 @@ Host Pulse on a dedicated Web application server to make the Pulse application a
 To host Pulse on a Web application server:
 
 1.  Set the `http-service-port` property to zero (`-Dgemfire.http-service-port=0`) when you start your Geode JMX Manager nodes. Setting this property to zero disables the embedded Web server for hosting the Pulse application.
-2.  Create a `pulse.properties` file somewhere in the classpath of your Web application server. For example, if you are hosting Pulse on Tomcat, create the `pulse.properties` file in the `$TOMCAT_SERVER/lib` directory.
 
-3.  Define the following configuration properties in the `pulse.properties` file:
+2.  Deploy the Pulse Web application to your application server. Geode installs the
+`geode-pulse-n.n.n.war` file (where `n.n.n` is a version number) in the `tools/Pulse` subdirectory
+of your Geode installation directory. Depending on your application server, you may need to copy the
+`pulse.war` file to a deployment directory or use a configuration tool to deploy the file.
+
+3.  Stop the Web application server and locate the Pulse configuration in the `WEB-INF/classes` subdirectory.
+
+4.  Edit `pulse.properties`, defining or redefining any of the following configuration properties as needed for your application:
 
     <table>
     <colgroup>
@@ -58,32 +64,48 @@ To host Pulse on a Web application server:
     </tbody>
     </table>
 
-    For example, with this configuration Pulse connects to the locator at mylocator\[10334\] and accesses any available JMX Manager:
+    &nbsp;
+
+    For example, with the default `pulse.properties` configuration, Pulse connects to the locator at port 10334 and accesses any available JMX Manager:
 
     ``` pre
     pulse.useLocator=true
-    pulse.host=locsrv.gemstone.com
+    pulse.host=localhost
     pulse.port=10334
     ```
 
-    With this configuration Pulse accesses only the JMX Manager instance at manager1\[8080\]:
+    With this modified configuration, Pulse accesses only the JMX Manager instance at port 8080:
 
     ``` pre
     pulse.useLocator=false
-    pulse.host=jmxsrv.gemstone.com
+    pulse.host=jmxsrv.mycluster.com
     pulse.port=8080
     ```
 
-4.  (Optional.) Configure authentication for the Pulse Web application using the instructions in [Configuring Pulse Authentication](pulse-auth.html).
+5.  If a JMX manager or locator is configured to use SSL, you can configure Pulse to connect to these
+    processes. Edit `pulsesecurity.properties` to un-comment the standard Java SSL properties:
+
+    ```
+    javax.net.ssl.keyStore={KeyStorePath}
+    javax.net.ssl.keyStorePassword={KeyStorePassword}
+    javax.net.ssl.trustStore={TrustStorePath}
+    javax.net.ssl.trustStorePassword={TrustStorePassword}
+    ```
+
+    Substitute the appropriate paths and passwords for the bracketed placeholders.
+
+7.  Restart the Web application server.
 
-5.  Deploy the Pulse Web application to your application server. Geode installs the `pulse.war` file in the `tools/Pulse` subdirectory of your Geode installation directory. Depending on your application server, you may need to copy the `pulse.war` file to a deployment directory or use a configuration tool to deploy the file.
-6.  Access the Pulse application using the address, port, and application URL that you configure in your Web application server. For example, with Tomcat the default URL is http://*address*:8080/pulse. Your application server provides options for configuring the address, port, and application name; substitute the correct items to access the deployed Pulse application.
+8.  Access the Pulse application using the address, port, and application URL that you configured in
+your Web application server. For example, with Tomcat the default URL is
+`http://*address*:8080/pulse`. Your application server provides options for configuring the address,
+port, and application name; substitute the correct items to access the deployed Pulse application.
 
     Pulse connects to the locator or JMX Manager that you configured in the `pulse.properties` file, authenticating using the credentials that you configured in the file.
 
-7.  If you have configured authentication for the Pulse application, enter the username and password of a valid Pulse account in the login screen. Otherwise, enter the default "admin" in both fields. Click **Sign In** to continue.
+9.  If you have configured authentication for the Pulse application, enter the username and password of a valid Pulse account in the login screen. Otherwise, enter the default "admin" in both fields. Click **Sign In** to continue.
 
     See [Configuring Pulse Authentication](pulse-auth.html).
 
-8.  After you log in, Pulse displays the main cluster view for the distributed system to which it has connected. See [Using Pulse Views](pulse-views.html).
+10.  After you log in, Pulse displays the main cluster view for the distributed system to which it has connected. See [Using Pulse Views](pulse-views.html).
 


[28/43] geode git commit: Run spotlessApply

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterDataSerializers.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterDataSerializers.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterDataSerializers.java
index d1c101f..053b2a8 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterDataSerializers.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterDataSerializers.java
@@ -41,7 +41,8 @@ public class RegisterDataSerializers extends BaseCommand {
       throws IOException, ClassNotFoundException {
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received register dataserializer request ({} parts) from {}",
-          serverConnection.getName(), clientMessage.getNumberOfParts(), serverConnection.getSocketString());
+          serverConnection.getName(), clientMessage.getNumberOfParts(),
+          serverConnection.getSocketString());
     }
     int noOfParts = clientMessage.getNumberOfParts();
 
@@ -49,10 +50,12 @@ public class RegisterDataSerializers extends BaseCommand {
     int noOfDataSerializers = (noOfParts - 1) / 2;
 
     // retrieve eventID from the last Part
-    ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(clientMessage.getPart(noOfParts - 1).getSerializedForm());
+    ByteBuffer eventIdPartsBuffer =
+        ByteBuffer.wrap(clientMessage.getPart(noOfParts - 1).getSerializedForm());
     long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
     long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-    EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
+    EventID eventId =
+        new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
     byte[][] serializedDataSerializers = new byte[noOfDataSerializers * 2][];
     boolean caughtCNFE = false;
@@ -101,7 +104,8 @@ public class RegisterDataSerializers extends BaseCommand {
     }
 
     if (logger.isDebugEnabled()) {
-      logger.debug("Registered dataserializer for MembershipId = {}", serverConnection.getMembershipID());
+      logger.debug("Registered dataserializer for MembershipId = {}",
+          serverConnection.getMembershipID());
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInstantiators.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInstantiators.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInstantiators.java
index 2b63337..df5a46c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInstantiators.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInstantiators.java
@@ -53,7 +53,8 @@ public class RegisterInstantiators extends BaseCommand {
       throws IOException, ClassNotFoundException {
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received register instantiator request ({} parts) from {}",
-          serverConnection.getName(), clientMessage.getNumberOfParts(), serverConnection.getSocketString());
+          serverConnection.getName(), clientMessage.getNumberOfParts(),
+          serverConnection.getSocketString());
     }
     int noOfParts = clientMessage.getNumberOfParts();
     // Assert parts
@@ -62,10 +63,12 @@ public class RegisterInstantiators extends BaseCommand {
     int noOfInstantiators = (noOfParts - 1) / 3;
 
     // retrieve eventID from the last Part
-    ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(clientMessage.getPart(noOfParts - 1).getSerializedForm());
+    ByteBuffer eventIdPartsBuffer =
+        ByteBuffer.wrap(clientMessage.getPart(noOfParts - 1).getSerializedForm());
     long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
     long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-    EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
+    EventID eventId =
+        new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
     byte[][] serializedInstantiators = new byte[noOfInstantiators * 3][];
     boolean caughtCNFE = false;
@@ -102,7 +105,7 @@ public class RegisterInstantiators extends BaseCommand {
       }
     } catch (Exception e) {
       logger.warn(LocalizedMessage.create(LocalizedStrings.RegisterInstantiators_BAD_CLIENT,
-          new Object[] { serverConnection.getMembershipID(), e.getLocalizedMessage()}));
+          new Object[] {serverConnection.getMembershipID(), e.getLocalizedMessage()}));
       writeException(clientMessage, e, false, serverConnection);
       serverConnection.setAsTrue(RESPONDED);
     }
@@ -134,7 +137,8 @@ public class RegisterInstantiators extends BaseCommand {
     }
 
     if (logger.isDebugEnabled()) {
-      logger.debug("Registered instantiators for MembershipId = {}", serverConnection.getMembershipID());
+      logger.debug("Registered instantiators for MembershipId = {}",
+          serverConnection.getMembershipID());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterest.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterest.java
index afb0f2c..edd917a 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterest.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterest.java
@@ -112,7 +112,8 @@ public class RegisterInterest extends BaseCommand {
 
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received register interest request ({} bytes) from {} for region {} key {}",
-          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName, key);
+          serverConnection.getName(), clientMessage.getPayloadLength(),
+          serverConnection.getSocketString(), regionName, key);
     }
 
     // Process the register interest request
@@ -138,7 +139,7 @@ public class RegisterInterest extends BaseCommand {
     if (region == null) {
       logger.info(LocalizedMessage.create(
           LocalizedStrings.RegisterInterest_0_REGION_NAMED_1_WAS_NOT_FOUND_DURING_REGISTER_INTEREST_REQUEST,
-          new Object[] { serverConnection.getName(), regionName}));
+          new Object[] {serverConnection.getName(), regionName}));
       // writeChunkedErrorResponse(msg,
       // MessageType.REGISTER_INTEREST_DATA_ERROR, message);
       // responded = true;
@@ -159,8 +160,9 @@ public class RegisterInterest extends BaseCommand {
           key = registerContext.getKey();
         }
       }
-      serverConnection.getAcceptor().getCacheClientNotifier().registerClientInterest(regionName, key,
-          serverConnection.getProxyID(), interestType, isDurable, sendUpdatesAsInvalidates, false, 0, true);
+      serverConnection.getAcceptor().getCacheClientNotifier().registerClientInterest(regionName,
+          key, serverConnection.getProxyID(), interestType, isDurable, sendUpdatesAsInvalidates,
+          false, 0, true);
     } catch (Exception e) {
       // If an interrupted exception is thrown , rethrow it
       checkForInterrupt(serverConnection, e);
@@ -177,8 +179,8 @@ public class RegisterInterest extends BaseCommand {
     // DistributionStats.getStatTime() - start);
     // start = DistributionStats.getStatTime();
 
-    CacheClientProxy ccp =
-        serverConnection.getAcceptor().getCacheClientNotifier().getClientProxy(serverConnection.getProxyID());
+    CacheClientProxy ccp = serverConnection.getAcceptor().getCacheClientNotifier()
+        .getClientProxy(serverConnection.getProxyID());
     if (ccp == null) {
       // fix for 37593
       IOException ioex = new IOException(
@@ -212,7 +214,8 @@ public class RegisterInterest extends BaseCommand {
 
       // Send chunk response
       try {
-        fillAndSendRegisterInterestResponseChunks(region, key, interestType, policy, serverConnection);
+        fillAndSendRegisterInterestResponseChunks(region, key, interestType, policy,
+            serverConnection);
         serverConnection.setAsTrue(RESPONDED);
       } catch (Exception e) {
         writeChunkedException(clientMessage, e, serverConnection, chunkedResponseMsg);
@@ -224,8 +227,8 @@ public class RegisterInterest extends BaseCommand {
         // logger.debug(getName() + ": Sent chunk (1 of 1) of register interest
         // response (" + chunkedResponseMsg.getBufferLength() + " bytes) for
         // region " + regionName + " key " + key);
-        logger.debug("{}: Sent register interest response for region {} key {}", serverConnection.getName(),
-            regionName, key);
+        logger.debug("{}: Sent register interest response for region {} key {}",
+            serverConnection.getName(), regionName, key);
       }
       // bserverStats.incLong(writeDestroyResponseTimeId,
       // DistributionStats.getStatTime() - start);

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterest61.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterest61.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterest61.java
index af423ca..bad3bed 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterest61.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterest61.java
@@ -143,7 +143,8 @@ public class RegisterInterest61 extends BaseCommand {
     if (logger.isDebugEnabled()) {
       logger.debug(
           "{}: Received register interest 61 request ({} bytes) from {} for region {} key {}",
-          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName, key);
+          serverConnection.getName(), clientMessage.getPayloadLength(),
+          serverConnection.getSocketString(), regionName, key);
     }
 
     // test hook to trigger vMotion during register Interest
@@ -176,7 +177,7 @@ public class RegisterInterest61 extends BaseCommand {
     if (region == null) {
       logger.info(LocalizedMessage.create(
           LocalizedStrings.RegisterInterest_0_REGION_NAMED_1_WAS_NOT_FOUND_DURING_REGISTER_INTEREST_REQUEST,
-          new Object[] { serverConnection.getName(), regionName}));
+          new Object[] {serverConnection.getName(), regionName}));
       // writeChunkedErrorResponse(msg,
       // MessageType.REGISTER_INTEREST_DATA_ERROR, message);
       // responded = true;
@@ -198,9 +199,9 @@ public class RegisterInterest61 extends BaseCommand {
           key = registerContext.getKey();
         }
       }
-      serverConnection.getAcceptor().getCacheClientNotifier().registerClientInterest(regionName, key,
-          serverConnection.getProxyID(), interestType, isDurable, sendUpdatesAsInvalidates, true,
-          regionDataPolicyPartBytes[0], true);
+      serverConnection.getAcceptor().getCacheClientNotifier().registerClientInterest(regionName,
+          key, serverConnection.getProxyID(), interestType, isDurable, sendUpdatesAsInvalidates,
+          true, regionDataPolicyPartBytes[0], true);
     } catch (Exception e) {
       // If an interrupted exception is thrown , rethrow it
       checkForInterrupt(serverConnection, e);
@@ -217,8 +218,8 @@ public class RegisterInterest61 extends BaseCommand {
     // DistributionStats.getStatTime() - start);
     // start = DistributionStats.getStatTime();
 
-    CacheClientProxy ccp =
-        serverConnection.getAcceptor().getCacheClientNotifier().getClientProxy(serverConnection.getProxyID());
+    CacheClientProxy ccp = serverConnection.getAcceptor().getCacheClientNotifier()
+        .getClientProxy(serverConnection.getProxyID());
     if (ccp == null) {
       // fix for 37593
       IOException ioex = new IOException(
@@ -265,8 +266,8 @@ public class RegisterInterest61 extends BaseCommand {
         // logger.debug(getName() + ": Sent chunk (1 of 1) of register interest
         // response (" + chunkedResponseMsg.getBufferLength() + " bytes) for
         // region " + regionName + " key " + key);
-        logger.debug("{}: Sent register interest response for region {} key {}", serverConnection.getName(),
-            regionName, key);
+        logger.debug("{}: Sent register interest response for region {} key {}",
+            serverConnection.getName(), regionName, key);
       }
       // bserverStats.incLong(writeDestroyResponseTimeId,
       // DistributionStats.getStatTime() - start);

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList.java
index 4206e19..5f5fafa 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList.java
@@ -124,8 +124,8 @@ public class RegisterInterestList extends BaseCommand {
     if (logger.isDebugEnabled()) {
       logger.debug(
           "{}: Received register interest request ({} bytes) from {} for the following {} keys in region {}: {}",
-          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), numberOfKeys,
-          regionName, keys);
+          serverConnection.getName(), clientMessage.getPayloadLength(),
+          serverConnection.getSocketString(), numberOfKeys, regionName, keys);
     }
 
     /*
@@ -155,7 +155,8 @@ public class RegisterInterestList extends BaseCommand {
       }
       String s = errMessage.toLocalizedString();
       logger.warn("{}: {}", serverConnection.getName(), s);
-      writeChunkedErrorResponse(clientMessage, MessageType.REGISTER_INTEREST_DATA_ERROR, s, serverConnection);
+      writeChunkedErrorResponse(clientMessage, MessageType.REGISTER_INTEREST_DATA_ERROR, s,
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }
@@ -165,7 +166,7 @@ public class RegisterInterestList extends BaseCommand {
     if (region == null) {
       logger.info(LocalizedMessage.create(
           LocalizedStrings.RegisterInterestList_0_REGION_NAMED_1_WAS_NOT_FOUND_DURING_REGISTER_INTEREST_LIST_REQUEST,
-          new Object[] { serverConnection.getName(), regionName}));
+          new Object[] {serverConnection.getName(), regionName}));
       // writeChunkedErrorResponse(msg,
       // MessageType.REGISTER_INTEREST_DATA_ERROR, message);
       // responded = true;
@@ -181,8 +182,8 @@ public class RegisterInterestList extends BaseCommand {
         }
       }
       // Register interest
-      serverConnection.getAcceptor().getCacheClientNotifier().registerClientInterest(regionName, keys,
-          serverConnection.getProxyID(), isDurable, sendUpdatesAsInvalidates, false, 0, true);
+      serverConnection.getAcceptor().getCacheClientNotifier().registerClientInterest(regionName,
+          keys, serverConnection.getProxyID(), isDurable, sendUpdatesAsInvalidates, false, 0, true);
     } catch (Exception ex) {
       // If an interrupted exception is thrown , rethrow it
       checkForInterrupt(serverConnection, ex);
@@ -198,7 +199,7 @@ public class RegisterInterestList extends BaseCommand {
     // start = DistributionStats.getStatTime();
 
     boolean isPrimary = serverConnection.getAcceptor().getCacheClientNotifier()
-                                        .getClientProxy(serverConnection.getProxyID()).isPrimary();
+        .getClientProxy(serverConnection.getProxyID()).isPrimary();
     if (!isPrimary) {
       chunkedResponseMsg.setMessageType(MessageType.RESPONSE_FROM_SECONDARY);
       chunkedResponseMsg.setTransactionId(clientMessage.getTransactionId());
@@ -218,7 +219,8 @@ public class RegisterInterestList extends BaseCommand {
 
       // Send chunk response
       try {
-        fillAndSendRegisterInterestResponseChunks(region, keys, InterestType.KEY, policy, serverConnection);
+        fillAndSendRegisterInterestResponseChunks(region, keys, InterestType.KEY, policy,
+            serverConnection);
         serverConnection.setAsTrue(RESPONDED);
       } catch (Exception e) {
         // If an interrupted exception is thrown , rethrow it

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList61.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList61.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList61.java
index 8eb6c4a..40a3c25 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList61.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList61.java
@@ -134,8 +134,8 @@ public class RegisterInterestList61 extends BaseCommand {
     if (logger.isDebugEnabled()) {
       logger.debug(
           "{}: Received register interest 61 request ({} bytes) from {} for the following {} keys in region {}: {}",
-          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), numberOfKeys,
-          regionName, keys);
+          serverConnection.getName(), clientMessage.getPayloadLength(),
+          serverConnection.getSocketString(), numberOfKeys, regionName, keys);
     }
 
     /*
@@ -165,7 +165,8 @@ public class RegisterInterestList61 extends BaseCommand {
       }
       String s = errMessage.toLocalizedString();
       logger.warn("{}: {}", serverConnection.getName(), s);
-      writeChunkedErrorResponse(clientMessage, MessageType.REGISTER_INTEREST_DATA_ERROR, s, serverConnection);
+      writeChunkedErrorResponse(clientMessage, MessageType.REGISTER_INTEREST_DATA_ERROR, s,
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }
@@ -175,7 +176,7 @@ public class RegisterInterestList61 extends BaseCommand {
     if (region == null) {
       logger.info(LocalizedMessage.create(
           LocalizedStrings.RegisterInterestList_0_REGION_NAMED_1_WAS_NOT_FOUND_DURING_REGISTER_INTEREST_LIST_REQUEST,
-          new Object[] { serverConnection.getName(), regionName}));
+          new Object[] {serverConnection.getName(), regionName}));
       // writeChunkedErrorResponse(msg,
       // MessageType.REGISTER_INTEREST_DATA_ERROR, message);
       // responded = true;
@@ -191,8 +192,8 @@ public class RegisterInterestList61 extends BaseCommand {
         }
       }
       // Register interest
-      serverConnection.getAcceptor().getCacheClientNotifier().registerClientInterest(regionName, keys,
-          serverConnection.getProxyID(), isDurable, sendUpdatesAsInvalidates, true,
+      serverConnection.getAcceptor().getCacheClientNotifier().registerClientInterest(regionName,
+          keys, serverConnection.getProxyID(), isDurable, sendUpdatesAsInvalidates, true,
           regionDataPolicyPartBytes[0], true);
     } catch (Exception ex) {
       // If an interrupted exception is thrown , rethrow it
@@ -209,7 +210,7 @@ public class RegisterInterestList61 extends BaseCommand {
     // start = DistributionStats.getStatTime();
 
     boolean isPrimary = serverConnection.getAcceptor().getCacheClientNotifier()
-                                        .getClientProxy(serverConnection.getProxyID()).isPrimary();
+        .getClientProxy(serverConnection.getProxyID()).isPrimary();
     if (!isPrimary) {
       chunkedResponseMsg.setMessageType(MessageType.RESPONSE_FROM_SECONDARY);
       chunkedResponseMsg.setTransactionId(clientMessage.getTransactionId());
@@ -229,7 +230,8 @@ public class RegisterInterestList61 extends BaseCommand {
 
       // Send chunk response
       try {
-        fillAndSendRegisterInterestResponseChunks(region, keys, InterestType.KEY, policy, serverConnection);
+        fillAndSendRegisterInterestResponseChunks(region, keys, InterestType.KEY, policy,
+            serverConnection);
         serverConnection.setAsTrue(RESPONDED);
       } catch (Exception e) {
         // If an interrupted exception is thrown , rethrow it

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList66.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList66.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList66.java
index 14198cc..6a2ad95 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList66.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RegisterInterestList66.java
@@ -135,8 +135,8 @@ public class RegisterInterestList66 extends BaseCommand {
     if (logger.isDebugEnabled()) {
       logger.debug(
           "{}: Received register interest 66 request ({} bytes) from {} for the following {} keys in region {}: {}",
-          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), numberOfKeys,
-          regionName, keys);
+          serverConnection.getName(), clientMessage.getPayloadLength(),
+          serverConnection.getSocketString(), numberOfKeys, regionName, keys);
     }
 
     /*
@@ -166,7 +166,8 @@ public class RegisterInterestList66 extends BaseCommand {
       }
       String s = errMessage.toLocalizedString();
       logger.warn("{}: {}", serverConnection.getName(), s);
-      writeChunkedErrorResponse(clientMessage, MessageType.REGISTER_INTEREST_DATA_ERROR, s, serverConnection);
+      writeChunkedErrorResponse(clientMessage, MessageType.REGISTER_INTEREST_DATA_ERROR, s,
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
     }
 
@@ -175,7 +176,7 @@ public class RegisterInterestList66 extends BaseCommand {
     if (region == null) {
       logger.info(LocalizedMessage.create(
           LocalizedStrings.RegisterInterestList_0_REGION_NAMED_1_WAS_NOT_FOUND_DURING_REGISTER_INTEREST_LIST_REQUEST,
-          new Object[] { serverConnection.getName(), regionName}));
+          new Object[] {serverConnection.getName(), regionName}));
       // writeChunkedErrorResponse(msg,
       // MessageType.REGISTER_INTEREST_DATA_ERROR, message);
       // responded = true;
@@ -191,8 +192,8 @@ public class RegisterInterestList66 extends BaseCommand {
         }
       }
       // Register interest
-      serverConnection.getAcceptor().getCacheClientNotifier().registerClientInterest(regionName, keys,
-          serverConnection.getProxyID(), isDurable, sendUpdatesAsInvalidates, true,
+      serverConnection.getAcceptor().getCacheClientNotifier().registerClientInterest(regionName,
+          keys, serverConnection.getProxyID(), isDurable, sendUpdatesAsInvalidates, true,
           regionDataPolicyPartBytes[0], true);
     } catch (Exception ex) {
       // If an interrupted exception is thrown , rethrow it
@@ -209,7 +210,7 @@ public class RegisterInterestList66 extends BaseCommand {
     // start = DistributionStats.getStatTime();
 
     boolean isPrimary = serverConnection.getAcceptor().getCacheClientNotifier()
-                                        .getClientProxy(serverConnection.getProxyID()).isPrimary();
+        .getClientProxy(serverConnection.getProxyID()).isPrimary();
     if (!isPrimary) {
       chunkedResponseMsg.setMessageType(MessageType.RESPONSE_FROM_SECONDARY);
       chunkedResponseMsg.setTransactionId(clientMessage.getTransactionId());

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RemoveAll.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RemoveAll.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RemoveAll.java
index 52a1df3..a295c54 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RemoveAll.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RemoveAll.java
@@ -95,9 +95,10 @@ public class RemoveAll extends BaseCommand {
             LocalizedStrings.RemoveAll_THE_INPUT_REGION_NAME_FOR_THE_REMOVEALL_REQUEST_IS_NULL
                 .toLocalizedString();
         logger.warn(LocalizedMessage.create(LocalizedStrings.TWO_ARG_COLON,
-            new Object[] { serverConnection.getName(), txt}));
+            new Object[] {serverConnection.getName(), txt}));
         errMessage.append(txt);
-        writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+        writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(),
+            serverConnection);
         serverConnection.setAsTrue(RESPONDED);
         return;
       }
@@ -114,7 +115,8 @@ public class RemoveAll extends BaseCommand {
       ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
       long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
       long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
-      EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
+      EventID eventId =
+          new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
 
       Breadcrumbs.setEventId(eventId);
 
@@ -133,9 +135,9 @@ public class RemoveAll extends BaseCommand {
       if (logger.isDebugEnabled()) {
         StringBuilder buffer = new StringBuilder();
         buffer.append(serverConnection.getName()).append(": Received removeAll request from ")
-              .append(serverConnection.getSocketString()).append(" for region ").append(regionName)
-              .append(callbackArg != null ? (" callbackArg " + callbackArg) : "").append(" with ")
-              .append(numberOfKeys).append(" keys.");
+            .append(serverConnection.getSocketString()).append(" for region ").append(regionName)
+            .append(callbackArg != null ? (" callbackArg " + callbackArg) : "").append(" with ")
+            .append(numberOfKeys).append(" keys.");
         logger.debug(buffer);
       }
       ArrayList<Object> keys = new ArrayList<Object>(numberOfKeys);
@@ -148,9 +150,10 @@ public class RemoveAll extends BaseCommand {
               LocalizedStrings.RemoveAll_ONE_OF_THE_INPUT_KEYS_FOR_THE_REMOVEALL_REQUEST_IS_NULL
                   .toLocalizedString();
           logger.warn(LocalizedMessage.create(LocalizedStrings.TWO_ARG_COLON,
-              new Object[] { serverConnection.getName(), txt}));
+              new Object[] {serverConnection.getName(), txt}));
           errMessage.append(txt);
-          writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
+          writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR,
+              errMessage.toString(), serverConnection);
           serverConnection.setAsTrue(RESPONDED);
           return;
         }
@@ -180,8 +183,9 @@ public class RemoveAll extends BaseCommand {
         keys.add(key);
       } // for
 
-      if (clientMessage.getNumberOfParts() == (5 + numberOfKeys + 1)) {// it means optional timeout has been
-                                                             // added
+      if (clientMessage.getNumberOfParts() == (5 + numberOfKeys + 1)) {// it means optional timeout
+                                                                       // has been
+        // added
         int timeout = clientMessage.getPart(5 + numberOfKeys).getInt();
         serverConnection.setRequestSpecificTimeout(timeout);
       }
@@ -199,8 +203,8 @@ public class RemoveAll extends BaseCommand {
         }
       }
 
-      response = region.basicBridgeRemoveAll(keys, retryVersions, serverConnection.getProxyID(), eventId,
-          callbackArg);
+      response = region.basicBridgeRemoveAll(keys, retryVersions, serverConnection.getProxyID(),
+          eventId, callbackArg);
       if (!region.getConcurrencyChecksEnabled() || clientIsEmpty || !clientHasCCEnabled) {
         // the client only needs this if versioning is being used and the client
         // has storage
@@ -215,7 +219,8 @@ public class RemoveAll extends BaseCommand {
       if (region instanceof PartitionedRegion) {
         PartitionedRegion pr = (PartitionedRegion) region;
         if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
-          writeReplyWithRefreshMetadata(clientMessage, response, serverConnection, pr, pr.getNetworkHopType());
+          writeReplyWithRefreshMetadata(clientMessage, response, serverConnection, pr,
+              pr.getNetworkHopType());
           pr.clearNetworkHopData();
           replyWithMetaData = true;
         }
@@ -250,8 +255,9 @@ public class RemoveAll extends BaseCommand {
       stats.incProcessRemoveAllTime(start - oldStart);
     }
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sending removeAll response back to {} for region {}{}", serverConnection.getName(),
-          serverConnection.getSocketString(), regionName, (logger.isTraceEnabled() ? ": " + response : ""));
+      logger.debug("{}: Sending removeAll response back to {} for region {}{}",
+          serverConnection.getName(), serverConnection.getSocketString(), regionName,
+          (logger.isTraceEnabled() ? ": " + response : ""));
     }
 
     // Increment statistics and write the reply

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RemoveUserAuth.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RemoveUserAuth.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RemoveUserAuth.java
index 16333ac..cc42e0d 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RemoveUserAuth.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RemoveUserAuth.java
@@ -53,7 +53,7 @@ public class RemoveUserAuth extends BaseCommand {
     } catch (GemFireSecurityException gfse) {
       if (serverConnection.getSecurityLogWriter().warningEnabled()) {
         serverConnection.getSecurityLogWriter().warning(LocalizedStrings.ONE_ARG,
-          serverConnection.getName() + ": Security exception: " + gfse.getMessage());
+            serverConnection.getName() + ": Security exception: " + gfse.getMessage());
       }
       writeException(clientMessage, gfse, false, serverConnection);
     } catch (Exception ex) {
@@ -61,7 +61,7 @@ public class RemoveUserAuth extends BaseCommand {
       if (serverConnection.getLogWriter().warningEnabled()) {
         serverConnection.getLogWriter().warning(
             LocalizedStrings.CacheClientNotifier_AN_EXCEPTION_WAS_THROWN_FOR_CLIENT_0_1,
-            new Object[] { serverConnection.getProxyID(), ""}, ex);
+            new Object[] {serverConnection.getProxyID(), ""}, ex);
       }
       writeException(clientMessage, ex, false, serverConnection);
     } finally {

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Request.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Request.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Request.java
index 964b7a4..6f97d31 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Request.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Request.java
@@ -52,7 +52,8 @@ public class Request extends BaseCommand {
   Request() {}
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
+      throws IOException {
     Part regionNamePart = null, keyPart = null, valuePart = null;
     String regionName = null;
     Object callbackArg = null, key = null;
@@ -94,8 +95,8 @@ public class Request extends BaseCommand {
     }
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received get request ({} bytes) from {} for region {} key {} txId {}",
-          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName, key,
-          clientMessage.getTransactionId());
+          serverConnection.getName(), clientMessage.getPayloadLength(),
+          serverConnection.getSocketString(), regionName, key, clientMessage.getTransactionId());
     }
 
     // Process the get request
@@ -179,8 +180,8 @@ public class Request extends BaseCommand {
         if (region instanceof PartitionedRegion) {
           PartitionedRegion pr = (PartitionedRegion) region;
           if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
-            writeResponseWithRefreshMetadata(data, callbackArg, clientMessage, isObject, serverConnection, pr,
-                pr.getNetworkHopType());
+            writeResponseWithRefreshMetadata(data, callbackArg, clientMessage, isObject,
+                serverConnection, pr, pr.getNetworkHopType());
             pr.clearNetworkHopData();
           } else {
             writeResponse(data, callbackArg, clientMessage, isObject, serverConnection);
@@ -192,7 +193,8 @@ public class Request extends BaseCommand {
         serverConnection.setAsTrue(RESPONDED);
         if (logger.isDebugEnabled()) {
           logger.debug("{}: Wrote get response back to {} for region {} key {} value: {}",
-              serverConnection.getName(), serverConnection.getSocketString(), regionName, key, data);
+              serverConnection.getName(), serverConnection.getSocketString(), regionName, key,
+              data);
         }
         stats.incWriteGetResponseTime(DistributionStats.getStatTime() - start);
       }

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RequestEventValue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RequestEventValue.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RequestEventValue.java
index 3753ed6..a6d6578 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RequestEventValue.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/RequestEventValue.java
@@ -49,7 +49,8 @@ public class RequestEventValue extends BaseCommand {
 
   private RequestEventValue() {}
 
-  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
+      throws IOException {
     Part eventIDPart = null, valuePart = null;
     EventID event = null;
     Object callbackArg = null;
@@ -67,7 +68,8 @@ public class RequestEventValue extends BaseCommand {
           LocalizedStrings.RequestEventValue_0_THE_EVENT_ID_FOR_THE_GET_EVENT_VALUE_REQUEST_IS_NULL,
           serverConnection.getName()));
       errMessage.append(" The event id for the get event value request is null.");
-      writeErrorResponse(clientMessage, MessageType.REQUESTDATAERROR, errMessage.toString(), serverConnection);
+      writeErrorResponse(clientMessage, MessageType.REQUESTDATAERROR, errMessage.toString(),
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
     } else {
       try {
@@ -90,8 +92,9 @@ public class RequestEventValue extends BaseCommand {
         }
       }
       if (logger.isTraceEnabled()) {
-        logger.trace("{}: Received get event value request ({} bytes) from {}", serverConnection.getName(),
-            clientMessage.getPayloadLength(), serverConnection.getSocketString());
+        logger.trace("{}: Received get event value request ({} bytes) from {}",
+            serverConnection.getName(), clientMessage.getPayloadLength(),
+            serverConnection.getSocketString());
       }
       CacheClientNotifier ccn = serverConnection.getAcceptor().getCacheClientNotifier();
       // Get the ha container.
@@ -110,7 +113,8 @@ public class RequestEventValue extends BaseCommand {
                 LocalizedStrings.RequestEventValue_UNABLE_TO_FIND_A_CLIENT_UPDATE_MESSAGE_FOR_0,
                 event));
             String msgStr = "No value found for " + event + " in " + haContainer.getName();
-            writeErrorResponse(clientMessage, MessageType.REQUEST_EVENT_VALUE_ERROR, msgStr, serverConnection);
+            writeErrorResponse(clientMessage, MessageType.REQUEST_EVENT_VALUE_ERROR, msgStr,
+                serverConnection);
             serverConnection.setAsTrue(RESPONDED);
             return;
           } else {
@@ -140,10 +144,12 @@ public class RequestEventValue extends BaseCommand {
 
         writeResponse(data, callbackArg, clientMessage, isObject, serverConnection);
         serverConnection.setAsTrue(RESPONDED);
-        ccn.getClientProxy(serverConnection.getProxyID()).getStatistics().incDeltaFullMessagesSent();
+        ccn.getClientProxy(serverConnection.getProxyID()).getStatistics()
+            .incDeltaFullMessagesSent();
         if (logger.isDebugEnabled()) {
           logger.debug("{}: Wrote get event value response back to {} for ha container {}",
-              serverConnection.getName(), serverConnection.getSocketString(), haContainer.getName());
+              serverConnection.getName(), serverConnection.getSocketString(),
+              haContainer.getName());
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Size.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Size.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Size.java
index 42e14a3..c4515ab 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Size.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/Size.java
@@ -76,7 +76,8 @@ public class Size extends BaseCommand {
       errMessage
           .append(LocalizedStrings.BaseCommand__THE_INPUT_REGION_NAME_FOR_THE_0_REQUEST_IS_NULL
               .toLocalizedString("size"));
-      writeErrorResponse(clientMessage, MessageType.SIZE_ERROR, errMessage.toString(), serverConnection);
+      writeErrorResponse(clientMessage, MessageType.SIZE_ERROR, errMessage.toString(),
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }
@@ -114,7 +115,8 @@ public class Size extends BaseCommand {
       }
     } finally {
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: Sent size response for region {}", serverConnection.getName(), regionName);
+        logger.debug("{}: Sent size response for region {}", serverConnection.getName(),
+            regionName);
       }
       serverConnection.setAsTrue(RESPONDED);
       stats.incWriteSizeResponseTime(DistributionStats.getStatTime() - start);

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/TXSynchronizationCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/TXSynchronizationCommand.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/TXSynchronizationCommand.java
index c5b9fc5..03270d6 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/TXSynchronizationCommand.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/TXSynchronizationCommand.java
@@ -53,7 +53,8 @@ public class TXSynchronizationCommand extends BaseCommand {
    * org.apache.geode.internal.cache.tier.sockets.ServerConnection)
    */
   @Override
-  protected boolean shouldMasqueradeForTx(Message clientMessage, ServerConnection serverConnection) {
+  protected boolean shouldMasqueradeForTx(Message clientMessage,
+      ServerConnection serverConnection) {
     // masquerading is done in the waiting thread pool
     return false;
   }
@@ -67,13 +68,14 @@ public class TXSynchronizationCommand extends BaseCommand {
    * long)
    */
   @Override
-  public void cmdExecute(final Message clientMessage, final ServerConnection serverConnection, long start)
-      throws IOException, ClassNotFoundException, InterruptedException {
+  public void cmdExecute(final Message clientMessage, final ServerConnection serverConnection,
+      long start) throws IOException, ClassNotFoundException, InterruptedException {
 
     serverConnection.setAsTrue(REQUIRES_RESPONSE);
 
     CompletionType type = CompletionType.values()[clientMessage.getPart(0).getInt()];
-    /* int txIdInt = */ clientMessage.getPart(1).getInt(); // [bruce] not sure if we need to transmit this
+    /* int txIdInt = */ clientMessage.getPart(1).getInt(); // [bruce] not sure if we need to
+                                                           // transmit this
     final Part statusPart;
     if (type == CompletionType.AFTER_COMPLETION) {
       statusPart = clientMessage.getPart(2);
@@ -81,7 +83,8 @@ public class TXSynchronizationCommand extends BaseCommand {
       statusPart = null;
     }
 
-    final TXManagerImpl txMgr = (TXManagerImpl) serverConnection.getCache().getCacheTransactionManager();
+    final TXManagerImpl txMgr =
+        (TXManagerImpl) serverConnection.getCache().getCacheTransactionManager();
     final InternalDistributedMember member =
         (InternalDistributedMember) serverConnection.getProxyID().getDistributedMember();
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UnregisterInterest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UnregisterInterest.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UnregisterInterest.java
index 597f92b..199ac18 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UnregisterInterest.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UnregisterInterest.java
@@ -81,7 +81,8 @@ public class UnregisterInterest extends BaseCommand {
     if (logger.isDebugEnabled()) {
       logger.debug(
           "{}: Received unregister interest request ({} bytes) from {} for region {} key {}",
-          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName, key);
+          serverConnection.getName(), clientMessage.getPayloadLength(),
+          serverConnection.getSocketString(), regionName, key);
     }
 
     // Process the unregister interest request
@@ -96,7 +97,8 @@ public class UnregisterInterest extends BaseCommand {
           LocalizedStrings.UnRegisterInterest_THE_INPUT_REGION_NAME_FOR_THE_UNREGISTER_INTEREST_REQUEST_IS_NULL;
       String s = errMessage.toLocalizedString();
       logger.warn("{}: {}", serverConnection.getName(), s);
-      writeErrorResponse(clientMessage, MessageType.UNREGISTER_INTEREST_DATA_ERROR, s, serverConnection);
+      writeErrorResponse(clientMessage, MessageType.UNREGISTER_INTEREST_DATA_ERROR, s,
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }
@@ -141,8 +143,8 @@ public class UnregisterInterest extends BaseCommand {
      */
     // Unregister interest irrelevent of whether the region is present it or
     // not
-    serverConnection.getAcceptor().getCacheClientNotifier().unregisterClientInterest(regionName, key,
-        interestType, isClosing, serverConnection.getProxyID(), keepalive);
+    serverConnection.getAcceptor().getCacheClientNotifier().unregisterClientInterest(regionName,
+        key, interestType, isClosing, serverConnection.getProxyID(), keepalive);
 
     // Update the statistics and write the reply
     // bserverStats.incLong(processDestroyTimeId,
@@ -151,8 +153,8 @@ public class UnregisterInterest extends BaseCommand {
     writeReply(clientMessage, serverConnection);
     serverConnection.setAsTrue(RESPONDED);
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Sent unregister interest response for region {} key {}", serverConnection.getName(),
-          regionName, key);
+      logger.debug("{}: Sent unregister interest response for region {} key {}",
+          serverConnection.getName(), regionName, key);
     }
     // bserverStats.incLong(writeDestroyResponseTimeId,
     // DistributionStats.getStatTime() - start);

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UnregisterInterestList.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UnregisterInterestList.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UnregisterInterestList.java
index 76cbba2..1968bff 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UnregisterInterestList.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UnregisterInterestList.java
@@ -95,8 +95,8 @@ public class UnregisterInterestList extends BaseCommand {
     if (logger.isDebugEnabled()) {
       logger.debug(
           "{}: Received unregister interest request ({} bytes) from {} for the following {} keys in region {}: {}",
-          serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), numberOfKeys,
-          regionName, keys);
+          serverConnection.getName(), clientMessage.getPayloadLength(),
+          serverConnection.getSocketString(), numberOfKeys, regionName, keys);
     }
 
     // Process the unregister interest request
@@ -114,7 +114,8 @@ public class UnregisterInterestList extends BaseCommand {
       }
       String s = errMessage.toLocalizedString();
       logger.warn("{}: {}", serverConnection.getName(), s);
-      writeErrorResponse(clientMessage, MessageType.UNREGISTER_INTEREST_DATA_ERROR, s, serverConnection);
+      writeErrorResponse(clientMessage, MessageType.UNREGISTER_INTEREST_DATA_ERROR, s,
+          serverConnection);
       serverConnection.setAsTrue(RESPONDED);
       return;
     }
@@ -155,8 +156,8 @@ public class UnregisterInterestList extends BaseCommand {
      * responded = true; } else {
      */
     // Register interest
-    serverConnection.getAcceptor().getCacheClientNotifier().unregisterClientInterest(regionName, keys,
-        isClosingList, serverConnection.getProxyID(), keepalive);
+    serverConnection.getAcceptor().getCacheClientNotifier().unregisterClientInterest(regionName,
+        keys, isClosingList, serverConnection.getProxyID(), keepalive);
 
     // Update the statistics and write the reply
     // bserverStats.incLong(processDestroyTimeId,

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UpdateClientNotification.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UpdateClientNotification.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UpdateClientNotification.java
index b870a96..2f434fb 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UpdateClientNotification.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/UpdateClientNotification.java
@@ -35,7 +35,8 @@ public class UpdateClientNotification extends BaseCommand {
   private UpdateClientNotification() {}
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
+      throws IOException {
     CacheServerStats stats = serverConnection.getCacheServerStats();
     {
       long oldStart = start;

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CloseCQ.java
----------------------------------------------------------------------
diff --git a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CloseCQ.java b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CloseCQ.java
index 72719b2..18929eb 100644
--- a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CloseCQ.java
+++ b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/CloseCQ.java
@@ -44,7 +44,8 @@ public class CloseCQ extends BaseCQCommand {
   private CloseCQ() {}
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
+      throws IOException {
     CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
     ClientProxyMembershipID id = serverConnection.getProxyID();
     CacheServerStats stats = serverConnection.getCacheServerStats();
@@ -67,7 +68,8 @@ public class CloseCQ extends BaseCQCommand {
     if (cqName == null) {
       String err =
           LocalizedStrings.CloseCQ_THE_CQNAME_FOR_THE_CQ_CLOSE_REQUEST_IS_NULL.toLocalizedString();
-      sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, clientMessage.getTransactionId(), null, serverConnection);
+      sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, clientMessage.getTransactionId(), null,
+          serverConnection);
       return;
     }
 
@@ -104,19 +106,21 @@ public class CloseCQ extends BaseCQCommand {
       if (cqQuery != null)
         serverConnection.removeCq(cqName, cqQuery.isDurable());
     } catch (CqException cqe) {
-      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", clientMessage.getTransactionId(), cqe, serverConnection);
+      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", clientMessage.getTransactionId(), cqe,
+          serverConnection);
       return;
     } catch (Exception e) {
       String err =
           LocalizedStrings.CloseCQ_EXCEPTION_WHILE_CLOSING_CQ_CQNAME_0.toLocalizedString(cqName);
-      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, err, clientMessage.getTransactionId(), e, serverConnection);
+      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, err, clientMessage.getTransactionId(), e,
+          serverConnection);
       return;
     }
 
     // Send OK to client
     sendCqResponse(MessageType.REPLY,
-        LocalizedStrings.CloseCQ_CQ_CLOSED_SUCCESSFULLY.toLocalizedString(), clientMessage.getTransactionId(),
-        null, serverConnection);
+        LocalizedStrings.CloseCQ_CQ_CLOSED_SUCCESSFULLY.toLocalizedString(),
+        clientMessage.getTransactionId(), null, serverConnection);
     serverConnection.setAsTrue(RESPONDED);
 
     {

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteCQ.java
----------------------------------------------------------------------
diff --git a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteCQ.java b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteCQ.java
index d2a4453..86d53f5 100644
--- a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteCQ.java
+++ b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteCQ.java
@@ -71,9 +71,9 @@ public class ExecuteCQ extends BaseCQCommand {
     byte[] isDurableByte = isDurablePart.getSerializedForm();
     boolean isDurable = (isDurableByte == null || isDurableByte[0] == 0) ? false : true;
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Received {} request from {} CqName: {} queryString: {}", serverConnection.getName(),
-          MessageType.getString(clientMessage.getMessageType()), serverConnection.getSocketString(), cqName,
-          cqQueryString);
+      logger.debug("{}: Received {} request from {} CqName: {} queryString: {}",
+          serverConnection.getName(), MessageType.getString(clientMessage.getMessageType()),
+          serverConnection.getSocketString(), cqName, cqQueryString);
     }
 
     DefaultQueryService qService = null;
@@ -108,7 +108,8 @@ public class ExecuteCQ extends BaseCQCommand {
       cqQuery = cqServiceForExec.executeCq(cqName, cqQueryString, cqState, id,
           acceptor.getCacheClientNotifier(), isDurable, false, 0, null);
     } catch (CqException cqe) {
-      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", clientMessage.getTransactionId(), cqe, serverConnection);
+      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", clientMessage.getTransactionId(), cqe,
+          serverConnection);
       return;
     } catch (Exception e) {
       writeChunkedException(clientMessage, e, serverConnection);
@@ -130,8 +131,8 @@ public class ExecuteCQ extends BaseCQCommand {
         cqRegionNames = ((DefaultQuery) query).getRegionsInQuery(null);
       }
       ((DefaultQuery) query).setIsCqQuery(true);
-      successQuery = processQuery(clientMessage, query, cqQueryString, cqRegionNames, start, cqQuery,
-          executeCQContext, serverConnection, sendResults);
+      successQuery = processQuery(clientMessage, query, cqQueryString, cqRegionNames, start,
+          cqQuery, executeCQContext, serverConnection, sendResults);
 
       // Update the CQ statistics.
       cqQuery.getVsdStats().setCqInitialResultsTime((DistributionStats.getStatTime()) - oldstart);

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteCQ61.java
----------------------------------------------------------------------
diff --git a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteCQ61.java b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteCQ61.java
index 805ee48..fcc45de 100755
--- a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteCQ61.java
+++ b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteCQ61.java
@@ -82,9 +82,9 @@ public class ExecuteCQ61 extends BaseCQCommand {
     Part regionDataPolicyPart = clientMessage.getPart(clientMessage.getNumberOfParts() - 1);
     byte[] regionDataPolicyPartBytes = regionDataPolicyPart.getSerializedForm();
     if (logger.isDebugEnabled()) {
-      logger.debug("{}: Received {} request from {} CqName: {} queryString: {}", serverConnection.getName(),
-          MessageType.getString(clientMessage.getMessageType()), serverConnection.getSocketString(), cqName,
-          cqQueryString);
+      logger.debug("{}: Received {} request from {} CqName: {} queryString: {}",
+          serverConnection.getName(), MessageType.getString(clientMessage.getMessageType()),
+          serverConnection.getSocketString(), cqName, cqQueryString);
     }
 
     // Check if the Server is running in NotifyBySubscription=true mode.
@@ -96,7 +96,8 @@ public class ExecuteCQ61 extends BaseCQCommand {
         String err =
             LocalizedStrings.ExecuteCQ_SERVER_NOTIFYBYSUBSCRIPTION_MODE_IS_SET_TO_FALSE_CQ_EXECUTION_IS_NOT_SUPPORTED_IN_THIS_MODE
                 .toLocalizedString();
-        sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, clientMessage.getTransactionId(), null, serverConnection);
+        sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, clientMessage.getTransactionId(),
+            null, serverConnection);
         return;
       }
     }
@@ -144,7 +145,8 @@ public class ExecuteCQ61 extends BaseCQCommand {
       cqQuery = (ServerCQImpl) cqServiceForExec.executeCq(cqName, cqQueryString, cqState, id, ccn,
           isDurable, true, regionDataPolicyPartBytes[0], null);
     } catch (CqException cqe) {
-      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", clientMessage.getTransactionId(), cqe, serverConnection);
+      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", clientMessage.getTransactionId(), cqe,
+          serverConnection);
       serverConnection.removeCq(cqName, isDurable);
       return;
     } catch (Exception e) {
@@ -172,8 +174,8 @@ public class ExecuteCQ61 extends BaseCQCommand {
           cqRegionNames = ((DefaultQuery) query).getRegionsInQuery(null);
         }
         ((DefaultQuery) query).setIsCqQuery(true);
-        successQuery = processQuery(clientMessage, query, cqQueryString, cqRegionNames, start, cqQuery,
-            executeCQContext, serverConnection, sendResults);
+        successQuery = processQuery(clientMessage, query, cqQueryString, cqRegionNames, start,
+            cqQuery, executeCQContext, serverConnection, sendResults);
 
 
         // Update the CQ statistics.

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetCQStats.java
----------------------------------------------------------------------
diff --git a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetCQStats.java b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetCQStats.java
index b1faeee..0fab303 100644
--- a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetCQStats.java
+++ b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetCQStats.java
@@ -36,7 +36,8 @@ public class GetCQStats extends BaseCQCommand {
   private GetCQStats() {}
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
+      throws IOException {
     CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
 
     CacheServerStats stats = serverConnection.getCacheServerStats();
@@ -61,7 +62,8 @@ public class GetCQStats extends BaseCQCommand {
     // Process the query request
     if (cqName == null) {
       String err = "The cqName for the cq stats request is null";
-      sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, clientMessage.getTransactionId(), null, serverConnection);
+      sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, clientMessage.getTransactionId(), null,
+          serverConnection);
       return;
     }
 
@@ -74,11 +76,13 @@ public class GetCQStats extends BaseCQCommand {
       cqService.start();
     } catch (Exception e) {
       String err = "Exception while Getting the CQ Statistics. ";
-      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, err, clientMessage.getTransactionId(), e, serverConnection);
+      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, err, clientMessage.getTransactionId(), e,
+          serverConnection);
       return;
     }
     // Send OK to client
-    sendCqResponse(MessageType.REPLY, "cq stats sent successfully.", clientMessage.getTransactionId(), null, serverConnection);
+    sendCqResponse(MessageType.REPLY, "cq stats sent successfully.",
+        clientMessage.getTransactionId(), null, serverConnection);
     serverConnection.setAsTrue(RESPONDED);
 
     {

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetDurableCQs.java
----------------------------------------------------------------------
diff --git a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetDurableCQs.java b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetDurableCQs.java
index e39c8e1..c14bd72 100755
--- a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetDurableCQs.java
+++ b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetDurableCQs.java
@@ -56,7 +56,8 @@ public class GetDurableCQs extends BaseCQCommand {
 
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received {} request from {}", serverConnection.getName(),
-          MessageType.getString(clientMessage.getMessageType()), serverConnection.getSocketString());
+          MessageType.getString(clientMessage.getMessageType()),
+          serverConnection.getSocketString());
     }
 
     DefaultQueryService qService = null;
@@ -87,8 +88,8 @@ public class GetDurableCQs extends BaseCQCommand {
         Object durableCqName = it.next();
         durableCqList.add(durableCqName);
         if (isTraceEnabled) {
-          logger.trace("{}: getDurableCqsResponse <{}>; list size was {}", serverConnection.getName(),
-              durableCqName, durableCqList.size());
+          logger.trace("{}: getDurableCqsResponse <{}>; list size was {}",
+              serverConnection.getName(), durableCqName, durableCqList.size());
         }
         if (durableCqList.size() == MAXIMUM_CHUNK_SIZE) {
           // Send the chunk and clear the list
@@ -100,7 +101,8 @@ public class GetDurableCQs extends BaseCQCommand {
       sendDurableCqsResponseChunk(durableCqList, true, serverConnection);
 
     } catch (CqException cqe) {
-      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", clientMessage.getTransactionId(), cqe, serverConnection);
+      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", clientMessage.getTransactionId(), cqe,
+          serverConnection);
       return;
     } catch (Exception e) {
       writeChunkedException(clientMessage, e, serverConnection);

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/MonitorCQ.java
----------------------------------------------------------------------
diff --git a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/MonitorCQ.java b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/MonitorCQ.java
index 5393e81..5f0118b 100644
--- a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/MonitorCQ.java
+++ b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/MonitorCQ.java
@@ -36,7 +36,8 @@ public class MonitorCQ extends BaseCQCommand {
   private MonitorCQ() {}
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
+      throws IOException {
     CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
     serverConnection.setAsTrue(REQUIRES_RESPONSE);
     serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
@@ -47,7 +48,8 @@ public class MonitorCQ extends BaseCQCommand {
       // This should have been taken care at the client - remove?
       String err = LocalizedStrings.MonitorCQ__0_THE_MONITORCQ_OPERATION_IS_INVALID
           .toLocalizedString(serverConnection.getName());
-      sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, clientMessage.getTransactionId(), null, serverConnection);
+      sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, clientMessage.getTransactionId(), null,
+          serverConnection);
       return;
     }
 
@@ -60,14 +62,16 @@ public class MonitorCQ extends BaseCQCommand {
         String err =
             LocalizedStrings.MonitorCQ__0_A_NULL_REGION_NAME_WAS_PASSED_FOR_MONITORCQ_OPERATION
                 .toLocalizedString(serverConnection.getName());
-        sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, clientMessage.getTransactionId(), null, serverConnection);
+        sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, clientMessage.getTransactionId(),
+            null, serverConnection);
         return;
       }
     }
 
     if (logger.isDebugEnabled()) {
       logger.debug("{}: Received MonitorCq request from {} op: {}{}", serverConnection.getName(),
-          serverConnection.getSocketString(), op, (regionName != null) ? " RegionName: " + regionName : "");
+          serverConnection.getSocketString(), op,
+          (regionName != null) ? " RegionName: " + regionName : "");
     }
 
     this.securityService.authorizeClusterRead();
@@ -84,12 +88,14 @@ public class MonitorCQ extends BaseCQCommand {
       throw new CqException(
           LocalizedStrings.CqService_INVALID_CQ_MONITOR_REQUEST_RECEIVED.toLocalizedString());
     } catch (CqException cqe) {
-      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", clientMessage.getTransactionId(), cqe, serverConnection);
+      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", clientMessage.getTransactionId(), cqe,
+          serverConnection);
       return;
     } catch (Exception e) {
       String err = LocalizedStrings.MonitorCQ_EXCEPTION_WHILE_HANDLING_THE_MONITOR_REQUEST_OP_IS_0
           .toLocalizedString(Integer.valueOf(op));
-      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, err, clientMessage.getTransactionId(), e, serverConnection);
+      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, err, clientMessage.getTransactionId(), e,
+          serverConnection);
       return;
     }
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/7ca7c2cc/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/StopCQ.java
----------------------------------------------------------------------
diff --git a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/StopCQ.java b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/StopCQ.java
index 070cb04..99fbef1 100644
--- a/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/StopCQ.java
+++ b/geode-cq/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/StopCQ.java
@@ -44,7 +44,8 @@ public class StopCQ extends BaseCQCommand {
   private StopCQ() {}
 
   @Override
-  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
+  public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start)
+      throws IOException {
     CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
     ClientProxyMembershipID id = serverConnection.getProxyID();
     CacheServerStats stats = serverConnection.getCacheServerStats();
@@ -67,7 +68,8 @@ public class StopCQ extends BaseCQCommand {
     if (cqName == null) {
       String err =
           LocalizedStrings.StopCQ_THE_CQNAME_FOR_THE_CQ_STOP_REQUEST_IS_NULL.toLocalizedString();
-      sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, clientMessage.getTransactionId(), null, serverConnection);
+      sendCqResponse(MessageType.CQDATAERROR_MSG_TYPE, err, clientMessage.getTransactionId(), null,
+          serverConnection);
       return;
     }
 
@@ -102,19 +104,21 @@ public class StopCQ extends BaseCQCommand {
       if (cqQuery != null)
         serverConnection.removeCq(cqName, cqQuery.isDurable());
     } catch (CqException cqe) {
-      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", clientMessage.getTransactionId(), cqe, serverConnection);
+      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, "", clientMessage.getTransactionId(), cqe,
+          serverConnection);
       return;
     } catch (Exception e) {
       String err =
           LocalizedStrings.StopCQ_EXCEPTION_WHILE_STOPPING_CQ_NAMED_0.toLocalizedString(cqName);
-      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, err, clientMessage.getTransactionId(), e, serverConnection);
+      sendCqResponse(MessageType.CQ_EXCEPTION_TYPE, err, clientMessage.getTransactionId(), e,
+          serverConnection);
       return;
     }
 
     // Send OK to client
     sendCqResponse(MessageType.REPLY,
-        LocalizedStrings.StopCQ_CQ_STOPPED_SUCCESSFULLY.toLocalizedString(), clientMessage.getTransactionId(),
-        null, serverConnection);
+        LocalizedStrings.StopCQ_CQ_STOPPED_SUCCESSFULLY.toLocalizedString(),
+        clientMessage.getTransactionId(), null, serverConnection);
 
     serverConnection.setAsTrue(RESPONDED);
 


[41/43] geode git commit: Cleanup test that recursed infinitely due to failure in precheckin

Posted by kl...@apache.org.
Cleanup test that recursed infinitely due to failure in precheckin


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/9a97112d
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/9a97112d
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/9a97112d

Branch: refs/heads/feature/GEODE-2632-17
Commit: 9a97112da8931a90e51a060054678a130eb4ec58
Parents: 9ea9f4e
Author: Kirk Lund <kl...@apache.org>
Authored: Wed May 24 16:40:29 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Tue May 30 10:21:11 2017 -0700

----------------------------------------------------------------------
 .../cache/ha/BlockingHARegionJUnitTest.java     | 488 +++++++++----------
 .../SerializableErrorCollector.java             |  24 +
 2 files changed, 242 insertions(+), 270 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/9a97112d/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionJUnitTest.java
index d0f5793..3c1adc3 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/ha/BlockingHARegionJUnitTest.java
@@ -14,76 +14,114 @@
  */
 package org.apache.geode.internal.cache.ha;
 
+import static java.util.concurrent.TimeUnit.*;
 import static org.apache.geode.distributed.ConfigurationProperties.*;
+import static org.apache.geode.internal.cache.ha.HARegionQueue.*;
 import static org.junit.Assert.*;
 
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Properties;
 
-import org.apache.geode.internal.cache.InternalCache;
-import org.apache.geode.test.junit.categories.ClientSubscriptionTest;
+import org.awaitility.Awaitility;
+import org.awaitility.core.ConditionFactory;
+import org.junit.After;
 import org.junit.Before;
 import org.junit.Ignore;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheFactory;
 import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.internal.cache.EventID;
+import org.apache.geode.internal.cache.InternalCache;
 import org.apache.geode.test.dunit.ThreadUtils;
-import org.apache.geode.test.dunit.Wait;
-import org.apache.geode.test.dunit.WaitCriterion;
+import org.apache.geode.test.junit.categories.ClientSubscriptionTest;
 import org.apache.geode.test.junit.categories.IntegrationTest;
+import org.apache.geode.test.junit.rules.serializable.SerializableErrorCollector;
 
+/**
+ * Integration tests for Blocking HARegionQueue.
+ *
+ * <p>
+ * #40314: Filled up queue causes all publishers to block
+ *
+ * <p>
+ * #37627: In case of out of order messages, (sequence Id violation), in spite of HARQ not full, the capacity (putPermits) of the HARQ exhausted.
+ */
 @Category({IntegrationTest.class, ClientSubscriptionTest.class})
 public class BlockingHARegionJUnitTest {
 
-  private static InternalCache cache = null;
+  public static final String REGION = "BlockingHARegionJUnitTest_Region";
+  private static final long THREAD_TIMEOUT = 2 * 60 * 1000;
+
+  private final Object numberForThreadsLock = new Object();
+  private int numberForDoPuts;
+  private int numberForDoTakes;
+
+  volatile boolean stopThreads;
 
-  /** boolean to record an exception occurence in another thread **/
-  private static volatile boolean exceptionOccurred = false;
-  /** StringBuffer to store the exception **/
-  private static StringBuffer exceptionString = new StringBuffer();
-  /** boolen to quit the for loop **/
-  private static volatile boolean quitForLoop = false;
+  private InternalCache cache;
+  private HARegionQueueAttributes queueAttributes;
+  private List<Thread> threads;
+  private ThreadGroup threadGroup;
+
+  @Rule
+  public SerializableErrorCollector errorCollector = new SerializableErrorCollector();
 
   @Before
   public void setUp() throws Exception {
-    Properties props = new Properties();
-    props.setProperty(MCAST_PORT, "0");
-    if (cache != null) {
-      cache.close(); // fault tolerance
+    synchronized (this.numberForThreadsLock) {
+      this.numberForDoPuts = 0;
+      this.numberForDoTakes = 0;
+    }
+
+    this.stopThreads = false;
+    this.threads = new ArrayList<>();
+    this.threadGroup = new ThreadGroup(getClass().getSimpleName()) {
+      @Override
+      public void uncaughtException(Thread t, Throwable e) {
+        errorCollector.addError(e);
+      }
+    };
+
+    this.queueAttributes = new HARegionQueueAttributes();
+
+    Properties config = new Properties();
+    config.setProperty(MCAST_PORT, "0");
+
+    this.cache = (InternalCache) CacheFactory.create(DistributedSystem.connect(config));
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    try {
+      this.stopThreads = true;
+      for (Thread thread : this.threads) {
+        thread.interrupt();
+        ThreadUtils.join(thread, THREAD_TIMEOUT);
+      }
+    } finally {
+      if (this.cache != null) {
+        this.cache.close();
+      }
     }
-    cache = (InternalCache) CacheFactory.create(DistributedSystem.connect(props));
   }
 
   /**
-   * This test has a scenario where the HAReqionQueue capacity is just 1. There will be two thread.
+   * This test has a scenario where the HARegionQueue capacity is just 1. There will be two thread.
    * One doing a 1000 puts and the other doing a 1000 takes. The validation for this test is that it
    * should not encounter any exceptions
    */
   @Test
   public void testBoundedPuts() throws Exception {
-    exceptionOccurred = false;
-    HARegionQueueAttributes harqa = new HARegionQueueAttributes();
-    harqa.setBlockingQueueCapacity(1);
-    HARegionQueue hrq = HARegionQueue.getHARegionQueueInstance("BlockingHARegionJUnitTest_Region",
-        cache, harqa, HARegionQueue.BLOCKING_HA_QUEUE, false);
-    hrq.setPrimary(true);// fix for 40314 - capacity constraint is checked for primary only.
-    Thread thread1 = new DoPuts(hrq, 1000);
-    Thread thread2 = new DoTake(hrq, 1000);
-
-    thread1.start();
-    thread2.start();
-
-    ThreadUtils.join(thread1, 30 * 1000);
-    ThreadUtils.join(thread2, 30 * 1000);
-
-    if (exceptionOccurred) {
-      fail(" Test failed due to " + exceptionString);
-    }
+    this.queueAttributes.setBlockingQueueCapacity(1);
+    HARegionQueue hrq = getHARegionQueueInstance(REGION, this.cache, this.queueAttributes, BLOCKING_HA_QUEUE, false);
+    hrq.setPrimary(true); // fix for 40314 - capacity constraint is checked for primary only
 
-    cache.close();
+    startDoPuts(hrq, 1000);
+    startDoTakes(hrq, 1000);
   }
 
   /**
@@ -96,62 +134,22 @@ public class BlockingHARegionJUnitTest {
    */
   @Test
   public void testPutBeingBlocked() throws Exception {
-    exceptionOccurred = false;
-    quitForLoop = false;
-    HARegionQueueAttributes harqa = new HARegionQueueAttributes();
-    harqa.setBlockingQueueCapacity(1);
-    final HARegionQueue hrq = HARegionQueue.getHARegionQueueInstance(
-        "BlockingHARegionJUnitTest_Region", cache, harqa, HARegionQueue.BLOCKING_HA_QUEUE, false);
-    hrq.setPrimary(true);// fix for 40314 - capacity constraint is checked for primary only.
-    final Thread thread1 = new DoPuts(hrq, 2);
-    thread1.start();
-    WaitCriterion ev = new WaitCriterion() {
-      public boolean done() {
-        return hrq.region.size() == 2;
-      }
+    this.queueAttributes.setBlockingQueueCapacity(1);
+    HARegionQueue hrq = getHARegionQueueInstance(REGION, this.cache, this.queueAttributes, BLOCKING_HA_QUEUE, false);
+    hrq.setPrimary(true); // fix for 40314 - capacity constraint is checked for primary only
 
-      public String description() {
-        return null;
-      }
-    };
-    Wait.waitForCriterion(ev, 1000, 200, true);
-    assertTrue(thread1.isAlive()); // thread should still be alive (in wait state)
-
-    Thread thread2 = new DoTake(hrq, 1);
-    thread2.start(); // start take thread
-    ev = new WaitCriterion() {
-      public boolean done() {
-        return hrq.region.size() == 3;
-      }
+    Thread doPuts = startDoPuts(hrq, 2);
 
-      public String description() {
-        return null;
-      }
-    };
-    // sleep. take will proceed and so will sleeping put
-    Wait.waitForCriterion(ev, 3 * 1000, 200, true);
+    await().until(() -> assertTrue(hrq.region.size() == 2));
 
-    // thread should have died since put should have proceeded
-    ev = new WaitCriterion() {
-      public boolean done() {
-        return !thread1.isAlive();
-      }
+    // thread should still be alive (in wait state)
+    assertTrue(doPuts.isAlive());
 
-      public String description() {
-        return "thread1 still alive";
-      }
-    };
-    Wait.waitForCriterion(ev, 30 * 1000, 1000, true);
+    startDoTakes(hrq, 1);
 
-    ThreadUtils.join(thread1, 30 * 1000); // for completeness
-    ThreadUtils.join(thread2, 30 * 1000);
-    if (exceptionOccurred) {
-      fail(" Test failed due to " + exceptionString);
-    }
-    cache.close();
+    await().until(() -> assertTrue(hrq.region.size() == 3));
   }
 
-
   /**
    * This test tests that the region capacity is never exceeded even in highly concurrent
    * environments. The region capacity is set to 10000. Then 5 threads start doing put
@@ -161,62 +159,25 @@ public class BlockingHARegionJUnitTest {
    */
   @Test
   public void testConcurrentPutsNotExceedingLimit() throws Exception {
-    exceptionOccurred = false;
-    quitForLoop = false;
-    HARegionQueueAttributes harqa = new HARegionQueueAttributes();
-    harqa.setBlockingQueueCapacity(10000);
-    final HARegionQueue hrq = HARegionQueue.getHARegionQueueInstance(
-        "BlockingHARegionJUnitTest_Region", cache, harqa, HARegionQueue.BLOCKING_HA_QUEUE, false);
-    hrq.setPrimary(true);// fix for 40314 - capacity constraint is checked for primary only.
-    Thread thread1 = new DoPuts(hrq, 20000, 1);
-    Thread thread2 = new DoPuts(hrq, 20000, 2);
-    Thread thread3 = new DoPuts(hrq, 20000, 3);
-    Thread thread4 = new DoPuts(hrq, 20000, 4);
-    Thread thread5 = new DoPuts(hrq, 20000, 5);
-
-    thread1.start();
-    thread2.start();
-    thread3.start();
-    thread4.start();
-    thread5.start();
-
-    WaitCriterion ev = new WaitCriterion() {
-      public boolean done() {
-        return hrq.region.size() == 20000;
-      }
-
-      public String description() {
-        return null;
-      }
-    };
-    Wait.waitForCriterion(ev, 30 * 1000, 200, true);
-
-    assertTrue(thread1.isAlive());
-    assertTrue(thread2.isAlive());
-    assertTrue(thread3.isAlive());
-    assertTrue(thread4.isAlive());
-    assertTrue(thread5.isAlive());
-
-    assertTrue(hrq.region.size() == 20000);
+    this.queueAttributes.setBlockingQueueCapacity(10000);
+    HARegionQueue hrq = getHARegionQueueInstance(REGION, this.cache, this.queueAttributes, BLOCKING_HA_QUEUE, false);
+    hrq.setPrimary(true); // fix for 40314 - capacity constraint is checked for primary only
 
-    quitForLoop = true;
-    Thread.sleep(20000);
+    Thread doPuts1 = startDoPuts(hrq, 20000, 1);
+    Thread doPuts2 = startDoPuts(hrq, 20000, 2);
+    Thread doPuts3 = startDoPuts(hrq, 20000, 3);
+    Thread doPuts4 = startDoPuts(hrq, 20000, 4);
+    Thread doPuts5 = startDoPuts(hrq, 20000, 5);
 
-    thread1.interrupt();
-    thread2.interrupt();
-    thread3.interrupt();
-    thread4.interrupt();
-    thread5.interrupt();
+    await().until(() -> assertTrue(hrq.region.size() == 20000));
 
-    Thread.sleep(2000);
+    assertTrue(doPuts1.isAlive());
+    assertTrue(doPuts2.isAlive());
+    assertTrue(doPuts3.isAlive());
+    assertTrue(doPuts4.isAlive());
+    assertTrue(doPuts5.isAlive());
 
-    ThreadUtils.join(thread1, 5 * 60 * 1000);
-    ThreadUtils.join(thread2, 5 * 60 * 1000);
-    ThreadUtils.join(thread3, 5 * 60 * 1000);
-    ThreadUtils.join(thread4, 5 * 60 * 1000);
-    ThreadUtils.join(thread5, 5 * 60 * 1000);
-
-    cache.close();
+    assertTrue(hrq.region.size() == 20000);
   }
 
   /**
@@ -226,84 +187,40 @@ public class BlockingHARegionJUnitTest {
    * state. the region size would be verified to be 20000 (10000 puts and 10000 DACE objects). then
    * the threads are interrupted and made to quit the loop
    */
-  @Ignore("TODO: test is disabled")
+  @Ignore("Test is disabled until/if blocking queue capacity becomes a hard limit")
   @Test
   public void testConcurrentPutsTakesNotExceedingLimit() throws Exception {
-    exceptionOccurred = false;
-    quitForLoop = false;
-    HARegionQueueAttributes harqa = new HARegionQueueAttributes();
-    harqa.setBlockingQueueCapacity(10000);
-    final HARegionQueue hrq = HARegionQueue.getHARegionQueueInstance(
-        "BlockingHARegionJUnitTest_Region", cache, harqa, HARegionQueue.BLOCKING_HA_QUEUE, false);
-    Thread thread1 = new DoPuts(hrq, 40000, 1);
-    Thread thread2 = new DoPuts(hrq, 40000, 2);
-    Thread thread3 = new DoPuts(hrq, 40000, 3);
-    Thread thread4 = new DoPuts(hrq, 40000, 4);
-    Thread thread5 = new DoPuts(hrq, 40000, 5);
-
-    Thread thread6 = new DoTake(hrq, 5000);
-    Thread thread7 = new DoTake(hrq, 5000);
-    Thread thread8 = new DoTake(hrq, 5000);
-    Thread thread9 = new DoTake(hrq, 5000);
-    Thread thread10 = new DoTake(hrq, 5000);
-
-    thread1.start();
-    thread2.start();
-    thread3.start();
-    thread4.start();
-    thread5.start();
-
-    thread6.start();
-    thread7.start();
-    thread8.start();
-    thread9.start();
-    thread10.start();
-
-    ThreadUtils.join(thread6, 30 * 1000);
-    ThreadUtils.join(thread7, 30 * 1000);
-    ThreadUtils.join(thread8, 30 * 1000);
-    ThreadUtils.join(thread9, 30 * 1000);
-    ThreadUtils.join(thread10, 30 * 1000);
-
-    WaitCriterion ev = new WaitCriterion() {
-      public boolean done() {
-        return hrq.region.size() == 20000;
-      }
-
-      public String description() {
-        return null;
-      }
-    };
-    Wait.waitForCriterion(ev, 30 * 1000, 200, true);
-
-    assertTrue(thread1.isAlive());
-    assertTrue(thread2.isAlive());
-    assertTrue(thread3.isAlive());
-    assertTrue(thread4.isAlive());
-    assertTrue(thread5.isAlive());
+    this.queueAttributes.setBlockingQueueCapacity(10000);
+    HARegionQueue hrq = getHARegionQueueInstance(REGION, this.cache, this.queueAttributes, BLOCKING_HA_QUEUE, false);
+    hrq.setPrimary(true); // fix for 40314 - capacity constraint is checked for primary only
+
+    Thread doPuts1 = startDoPuts(hrq, 40000, 1);
+    Thread doPuts2 = startDoPuts(hrq, 40000, 2);
+    Thread doPuts3 = startDoPuts(hrq, 40000, 3);
+    Thread doPuts4 = startDoPuts(hrq, 40000, 4);
+    Thread doPuts5 = startDoPuts(hrq, 40000, 5);
+
+    Thread doTakes1 = startDoTakes(hrq, 5000);
+    Thread doTakes2 = startDoTakes(hrq, 5000);
+    Thread doTakes3 = startDoTakes(hrq, 5000);
+    Thread doTakes4 = startDoTakes(hrq, 5000);
+    Thread doTakes5 = startDoTakes(hrq, 5000);
+
+    ThreadUtils.join(doTakes1, 30 * 1000);
+    ThreadUtils.join(doTakes2, 30 * 1000);
+    ThreadUtils.join(doTakes3, 30 * 1000);
+    ThreadUtils.join(doTakes4, 30 * 1000);
+    ThreadUtils.join(doTakes5, 30 * 1000);
+
+    await().until(() -> assertTrue(hrq.region.size() == 20000));
+
+    assertTrue(doPuts1.isAlive());
+    assertTrue(doPuts2.isAlive());
+    assertTrue(doPuts3.isAlive());
+    assertTrue(doPuts4.isAlive());
+    assertTrue(doPuts5.isAlive());
 
     assertTrue(hrq.region.size() == 20000);
-
-    quitForLoop = true;
-
-    Thread.sleep(2000);
-
-    thread1.interrupt();
-    thread2.interrupt();
-    thread3.interrupt();
-    thread4.interrupt();
-    thread5.interrupt();
-
-    Thread.sleep(2000);
-
-
-    ThreadUtils.join(thread1, 30 * 1000);
-    ThreadUtils.join(thread2, 30 * 1000);
-    ThreadUtils.join(thread3, 30 * 1000);
-    ThreadUtils.join(thread4, 30 * 1000);
-    ThreadUtils.join(thread5, 30 * 1000);
-
-    cache.close();
   }
 
   /**
@@ -315,62 +232,91 @@ public class BlockingHARegionJUnitTest {
    */
   @Test
   public void testHARQMaxCapacity_Bug37627() throws Exception {
-    try {
-      exceptionOccurred = false;
-      quitForLoop = false;
-      HARegionQueueAttributes harqa = new HARegionQueueAttributes();
-      harqa.setBlockingQueueCapacity(1);
-      harqa.setExpiryTime(180);
-      final HARegionQueue hrq = HARegionQueue.getHARegionQueueInstance(
-          "BlockingHARegionJUnitTest_Region", cache, harqa, HARegionQueue.BLOCKING_HA_QUEUE, false);
-      hrq.setPrimary(true);// fix for 40314 - capacity constraint is checked for primary only.
-      final EventID id1 = new EventID(new byte[] {1}, 1, 2); // violation
-      final EventID ignore = new EventID(new byte[] {1}, 1, 1); //
-      final EventID id2 = new EventID(new byte[] {1}, 1, 3); //
-      Thread t1 = new Thread() {
-        public void run() {
-          try {
-            hrq.put(new ConflatableObject("key1", "value1", id1, false, "region1"));
-            hrq.take();
-            hrq.put(new ConflatableObject("key2", "value1", ignore, false, "region1"));
-            hrq.put(new ConflatableObject("key3", "value1", id2, false, "region1"));
-          } catch (Exception e) {
-            exceptionString.append("First Put in region queue failed");
-            exceptionOccurred = true;
-          }
+    this.queueAttributes.setBlockingQueueCapacity(1);
+    this.queueAttributes.setExpiryTime(180);
+    HARegionQueue hrq = getHARegionQueueInstance(REGION, this.cache, this.queueAttributes, BLOCKING_HA_QUEUE, false);
+    hrq.setPrimary(true); // fix for 40314 - capacity constraint is checked for primary only
+
+    EventID event1 = new EventID(new byte[] {1}, 1, 2); // violation
+    EventID event2 = new EventID(new byte[] {1}, 1, 1); // ignored
+    EventID event3 = new EventID(new byte[] {1}, 1, 3);
+
+    newThread(new Runnable() {
+      @Override
+      public void run() {
+        try {
+          hrq.put(new ConflatableObject("key1", "value1", event1, false, "region1"));
+          hrq.take();
+          hrq.put(new ConflatableObject("key2", "value1", event2, false, "region1"));
+          hrq.put(new ConflatableObject("key3", "value1", event3, false, "region1"));
+        } catch (Exception e) {
+          errorCollector.addError(e);
         }
-      };
-      t1.start();
-      ThreadUtils.join(t1, 20 * 1000);
-      if (exceptionOccurred) {
-        fail(" Test failed due to " + exceptionString);
-      }
-    } finally {
-      if (cache != null) {
-        cache.close();
       }
+    });
+  }
+
+  private Thread newThread(Runnable runnable) {
+    Thread thread = new Thread(this.threadGroup, runnable);
+    this.threads.add(thread);
+    thread.start();
+    return thread;
+  }
+
+  private Thread startDoPuts(HARegionQueue haRegionQueue, int count) {
+    return startDoPuts(haRegionQueue, count, 0);
+  }
+
+  private Thread startDoPuts(HARegionQueue haRegionQueue, int count, int regionId) {
+    Thread thread = new DoPuts(this.threadGroup, haRegionQueue, count, regionId);
+    this.threads.add(thread);
+    thread.start();
+    return thread;
+  }
+
+  private Thread startDoTakes(HARegionQueue haRegionQueue, int count) {
+    Thread thread = new DoTakes(this.threadGroup, haRegionQueue, count);
+    this.threads.add(thread);
+    thread.start();
+    return thread;
+  }
+
+  private ConditionFactory await() {
+    return Awaitility.await().atMost(2, MINUTES);
+  }
+
+  int nextDoPutsThreadNum() {
+    synchronized (this.numberForThreadsLock) {
+      return numberForDoPuts++;
+    }
+  }
+
+  int nextDoTakesThreadNum() {
+    synchronized (this.numberForThreadsLock) {
+      return numberForDoTakes++;
     }
   }
 
   /**
    * class which does specified number of puts on the queue
    */
-  private static class DoPuts extends Thread {
+  private class DoPuts extends Thread {
 
-    HARegionQueue regionQueue = null;
-    final int numberOfPuts;
+    private final HARegionQueue regionQueue;
 
-    DoPuts(HARegionQueue haRegionQueue, int numberOfPuts) {
-      this.regionQueue = haRegionQueue;
-      this.numberOfPuts = numberOfPuts;
-    }
+    private final int numberOfPuts;
 
     /**
      * region id can be specified to generate Thread unique events
      */
-    int regionId = 0;
+    private final int regionId;
 
-    DoPuts(HARegionQueue haRegionQueue, int numberOfPuts, int regionId) {
+    DoPuts(ThreadGroup threadGroup, HARegionQueue haRegionQueue, int numberOfPuts) {
+      this(threadGroup, haRegionQueue, numberOfPuts, 0);
+    }
+
+    DoPuts(ThreadGroup threadGroup, HARegionQueue haRegionQueue, int numberOfPuts, int regionId) {
+      super(threadGroup, "DoPuts-" + nextDoPutsThreadNum());
       this.regionQueue = haRegionQueue;
       this.numberOfPuts = numberOfPuts;
       this.regionId = regionId;
@@ -378,19 +324,16 @@ public class BlockingHARegionJUnitTest {
 
     @Override
     public void run() {
-      for (int i = 0; i < numberOfPuts; i++) {
+      for (int i = 0; i < this.numberOfPuts; i++) {
+        if (stopThreads || Thread.currentThread().isInterrupted()) {
+          break;
+        }
         try {
           this.regionQueue.put(new ConflatableObject("" + i, "" + i,
-              new EventID(new byte[regionId], i, i), false, "BlockingHARegionJUnitTest_Region"));
-          if (quitForLoop) {
-            break;
-          }
-          if (Thread.currentThread().isInterrupted()) {
-            break;
-          }
+              new EventID(new byte[this.regionId], i, i), false, REGION));
         } catch (Exception e) {
-          exceptionOccurred = true;
-          exceptionString.append(" Exception occurred due to " + e);
+          errorCollector.addError(e);
+          break;
         }
       }
     }
@@ -399,24 +342,29 @@ public class BlockingHARegionJUnitTest {
   /**
    * class which does a specified number of takes
    */
-  private static class DoTake extends Thread {
+  private class DoTakes extends Thread {
 
-    final HARegionQueue regionQueue;
-    final int numberOfTakes;
+    private final HARegionQueue regionQueue;
 
-    DoTake(HARegionQueue haRegionQueue, int numberOfTakes) {
+    private final int numberOfTakes;
+
+    DoTakes(ThreadGroup threadGroup, HARegionQueue haRegionQueue, int numberOfTakes) {
+      super(threadGroup, "DoTakes-" + nextDoTakesThreadNum());
       this.regionQueue = haRegionQueue;
       this.numberOfTakes = numberOfTakes;
     }
 
     @Override
     public void run() {
-      for (int i = 0; i < numberOfTakes; i++) {
+      for (int i = 0; i < this.numberOfTakes; i++) {
+        if (stopThreads || Thread.currentThread().isInterrupted()) {
+          break;
+        }
         try {
           assertNotNull(this.regionQueue.take());
         } catch (Exception e) {
-          exceptionOccurred = true;
-          exceptionString.append(" Exception occurred due to " + e);
+          errorCollector.addError(e);
+          break;
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/9a97112d/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableErrorCollector.java
----------------------------------------------------------------------
diff --git a/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableErrorCollector.java b/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableErrorCollector.java
new file mode 100644
index 0000000..0abfdaf
--- /dev/null
+++ b/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableErrorCollector.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.test.junit.rules.serializable;
+
+import org.junit.rules.ErrorCollector;
+
+import java.io.Serializable;
+
+public class SerializableErrorCollector extends ErrorCollector implements Serializable {
+}


[34/43] geode git commit: Cleanup CacheClientUpdater

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/07efaa8e/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientUpdater.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientUpdater.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientUpdater.java
index 7698550..8915c55 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientUpdater.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CacheClientUpdater.java
@@ -94,9 +94,9 @@ import org.apache.geode.security.AuthenticationRequiredException;
 import org.apache.geode.security.GemFireSecurityException;
 
 /**
- * <code>CacheClientUpdater</code> is a thread that processes update messages from a cache server
- * and {@linkplain org.apache.geode.cache.Region#localInvalidate(Object) invalidates} the local
- * cache based on the contents of those messages.
+ * {@code CacheClientUpdater} is a thread that processes update messages from a cache server and
+ * {@linkplain org.apache.geode.cache.Region#localInvalidate(Object) invalidates} the local cache
+ * based on the contents of those messages.
  * 
  * @since GemFire 3.5
  */
@@ -104,6 +104,8 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
 
   private static final Logger logger = LogService.getLogger();
 
+  private static final int DEFAULT_SOCKET_BUFFER_SIZE = 32768;
+
   /**
    * true if the constructor successfully created a connection. If false, the run method for this
    * thread immediately exits.
@@ -129,6 +131,7 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
    * The input stream of the socket
    */
   private final InputStream in;
+
   /**
    * Failed updater from the endpoint previously known as the primary
    */
@@ -139,12 +142,12 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
    */
   private final ByteBuffer commBuffer;
 
-  private boolean commBufferReleased;
+  private boolean commBufferReleased; // TODO: fix synchronization
 
   private final CCUStats stats;
 
   /**
-   * Cache for which we provide service
+   * Cache for which we provide service TODO: lifecycle and synchronization need work
    */
   private /* final */ InternalCache cache;
 
@@ -175,18 +178,18 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
    */
   private boolean isOpCompleted;
 
-  public final static String CLIENT_UPDATER_THREAD_NAME = "Cache Client Updater Thread ";
+  public static final String CLIENT_UPDATER_THREAD_NAME = "Cache Client Updater Thread ";
 
   /**
-   * to enable test flag
+   * to enable test flag TODO: eliminate isUsedByTest
    */
   public static boolean isUsedByTest;
 
   /**
    * Indicates if full value was requested from server as a result of failure in applying delta
-   * bytes.
+   * bytes. TODO: only used for test assertion
    */
-  public static boolean fullValueRequested = false;
+  static boolean fullValueRequested = false;
 
   private final ServerLocation location;
 
@@ -195,8 +198,8 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
   private EndpointManager eManager = null;
   private Endpoint endpoint = null;
 
-  static private final long MAX_CACHE_WAIT = Long
-      .getLong(DistributionConfig.GEMFIRE_PREFIX + "CacheClientUpdater.MAX_WAIT", 120).longValue(); // seconds
+  private static final long MAX_CACHE_WAIT =
+      Long.getLong(DistributionConfig.GEMFIRE_PREFIX + "CacheClientUpdater.MAX_WAIT", 120); // seconds
 
   /**
    * Return true if cache appears
@@ -231,7 +234,7 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
       boolean interrupted = Thread.interrupted();
       try {
         Thread.sleep(1000);
-      } catch (InterruptedException e) {
+      } catch (InterruptedException ignore) {
         interrupted = true;
       } finally {
         if (interrupted) {
@@ -245,12 +248,12 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
   }
 
   /**
-   * Creates a new <code>CacheClientUpdater</code> with a given name that waits for a server to
-   * connect on a given port.
+   * Creates a new {@code CacheClientUpdater} with a given name that waits for a server to connect
+   * on a given port.
    *
    * @param name descriptive name, used for our ThreadGroup
    * @param location the endpoint we represent
-   * @param primary true if our endpoint is primary TODO ask the ep for this?
+   * @param primary true if our endpoint is primary
    * @param ids the system we are distributing messages through
    * 
    * @throws AuthenticationRequiredException when client is not configured to send credentials using
@@ -265,6 +268,7 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
       Endpoint endpoint, int handshakeTimeout, SocketCreator socketCreator)
       throws AuthenticationRequiredException, AuthenticationFailedException,
       ServerRefusedConnectionException {
+
     super(LoggingThreadGroup.createThreadGroup("Client update thread"), name);
     this.setDaemon(true);
     this.system = (InternalDistributedSystem) ids;
@@ -276,6 +280,7 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
     this.eManager = eManager;
     this.endpoint = endpoint;
     this.stats = new CCUStats(this.system, this.location);
+
     // Create the connection...
     final boolean isDebugEnabled = logger.isDebugEnabled();
     if (isDebugEnabled) {
@@ -291,7 +296,7 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
     try {
       // Size of the server-to-client communication socket buffers
       int socketBufferSize =
-          Integer.getInteger("BridgeServer.SOCKET_BUFFER_SIZE", 32768).intValue();
+          Integer.getInteger("BridgeServer.SOCKET_BUFFER_SIZE", DEFAULT_SOCKET_BUFFER_SIZE);
 
       mySock = socketCreator.connectForClient(location.getHostName(), location.getPort(),
           handshakeTimeout, socketBufferSize);
@@ -327,31 +332,27 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
         }
       }
 
-      {
-        int bufSize = 1024;
-        try {
-          bufSize = mySock.getSendBufferSize();
-          if (bufSize < 1024) {
-            bufSize = 1024;
-          }
-        } catch (SocketException ignore) {
+      int bufSize = 1024;
+      try {
+        bufSize = mySock.getSendBufferSize();
+        if (bufSize < 1024) {
+          bufSize = 1024;
         }
-        cb = ServerConnection.allocateCommBuffer(bufSize, mySock);
-      }
-      {
-        // create a "server" memberId we currently don't know much about the
-        // server.
-        // Would be nice for it to send us its member id
-        // TODO: change the serverId to use the endpoint's getMemberId() which
-        // returns a
-        // DistributedMember (once gfecq branch is merged to trunk).
-        MemberAttributes ma =
-            new MemberAttributes(0, -1, DistributionManager.NORMAL_DM_TYPE, -1, null, null, null);
-        sid = new InternalDistributedMember(mySock.getInetAddress(), mySock.getPort(), false, true,
-            ma);
+      } catch (SocketException ignore) {
       }
+      cb = ServerConnection.allocateCommBuffer(bufSize, mySock);
+
+      // create a "server" memberId we currently don't know much about the server.
+      // Would be nice for it to send us its member id
+      // TODO: change the serverId to use the endpoint's getMemberId() which returns a
+      // DistributedMember (once gfecq branch is merged to trunk).
+      MemberAttributes ma =
+          new MemberAttributes(0, -1, DistributionManager.NORMAL_DM_TYPE, -1, null, null, null);
+      sid =
+          new InternalDistributedMember(mySock.getInetAddress(), mySock.getPort(), false, true, ma);
+
       success = true;
-    } catch (ConnectException e) {
+    } catch (ConnectException ignore) {
       if (!quitting()) {
         logger.warn(LocalizedMessage
             .create(LocalizedStrings.CacheClientUpdater_0_CONNECTION_WAS_REFUSED, this));
@@ -385,20 +386,22 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
             e.getMessage()));
       }
     } finally {
-      connected = success;
+      this.connected = success;
       if (mySock != null) {
         try {
           mySock.setSoTimeout(0);
-        } catch (SocketException e) {
+        } catch (SocketException ignore) {
           // ignore: nothing we can do about this
         }
       }
-      if (connected) {
-        socket = mySock;
-        out = tmpOut;
-        in = tmpIn;
-        serverId = sid;
-        commBuffer = cb;
+
+      if (this.connected) {
+        this.socket = mySock;
+        this.out = tmpOut;
+        this.in = tmpIn;
+        this.serverId = sid;
+        this.commBuffer = cb;
+
         // Don't want the timeout after handshake
         if (mySock != null) {
           try {
@@ -406,12 +409,13 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
           } catch (SocketException ignore) {
           }
         }
+
       } else {
-        socket = null;
-        serverId = null;
-        commBuffer = null;
-        out = null;
-        in = null;
+        this.socket = null;
+        this.serverId = null;
+        this.commBuffer = null;
+        this.out = null;
+        this.in = null;
 
         if (mySock != null) {
           try {
@@ -439,29 +443,31 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
   }
 
   public boolean isConnected() {
-    return connected;
+    return this.connected;
   }
 
+  @Override
   public boolean isPrimary() {
-    return isPrimary;
+    return this.isPrimary;
   }
 
   public InternalLogWriter getSecurityLogger() {
     return this.qManager.getSecurityLogger();
   }
 
+  @Override
   public void setFailedUpdater(ClientUpdater failedUpdater) {
     this.failedUpdater = failedUpdater;
   }
 
   /**
-   * Performs the work of the client update thread. Creates a <code>ServerSocket</code> and waits
-   * for the server to connect to it.
+   * Performs the work of the client update thread. Creates a {@code ServerSocket} and waits for the
+   * server to connect to it.
    */
   @Override
   public void run() {
+    EntryLogger.setSource(this.serverId, "RI");
     boolean addedListener = false;
-    EntryLogger.setSource(serverId, "RI");
     try {
       this.system.addDisconnectListener(this);
       addedListener = true;
@@ -472,8 +478,10 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
         return;
       }
       processMessages();
-    } catch (CancelException e) {
-      return; // just bail
+
+    } catch (CancelException ignore) {
+      // just bail
+
     } finally {
       if (addedListener) {
         this.system.removeDisconnectListener(this);
@@ -486,8 +494,8 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
   /**
    * Notifies this thread to stop processing
    */
-  protected void stopProcessing() {
-    continueProcessing.set(false);// = false;
+  private void stopProcessing() {
+    this.continueProcessing.set(false);
   }
 
   /**
@@ -495,39 +503,27 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
    * duplicates. Note: this method is not named stop because this is a Thread which has a deprecated
    * stop method.
    */
-  public void stopUpdater() {
+  private void stopUpdater() {
     boolean isSelfDestroying = Thread.currentThread() == this;
-
     stopProcessing();
+
     // need to also close the socket for this interrupt to wakeup
     // the thread. This fixes bug 35691.
-    // this.close(); // this should not be done here.
 
     if (this.isAlive()) {
       if (logger.isDebugEnabled()) {
         logger.debug("{}: Stopping {}", this.location, this);
       }
+
       if (!isSelfDestroying) {
         interrupt();
         try {
-          if (socket != null) {
-            socket.close();
+          if (this.socket != null) {
+            this.socket.close();
           }
-        } catch (VirtualMachineError err) {
-          SystemFailure.initiateFailure(err);
-          // If this ever returns, rethrow the error. We're poisoned
-          // now, so don't let this thread continue.
-          throw err;
-        } catch (Throwable t) {
-          // Whenever you catch Error or Throwable, you must also
-          // catch VirtualMachineError (see above). However, there is
-          // _still_ a possibility that you are dealing with a cascading
-          // error condition, so you also need to check to see if the JVM
-          // is still usable:
-          SystemFailure.checkFailure();
-          // dont care...
+        } catch (IOException e) {
           if (logger.isDebugEnabled()) {
-            logger.debug(t.getMessage(), t);
+            logger.debug(e.getMessage(), e);
           }
         }
       } // !isSelfDestroying
@@ -537,32 +533,24 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
   /**
    * Signals the run thread to stop, closes underlying resources.
    */
+  @Override
   public void close() {
-    this.continueProcessing.set(false);// = false; // signals we are done.
+    this.continueProcessing.set(false); // signals we are done.
 
-    // Close the socket
-    // This will also cause the underlying streams to fail.
+    // Close the socket. This will also cause the underlying streams to fail.
     try {
-      if (socket != null) {
-        socket.close();
+      if (this.socket != null) {
+        this.socket.close();
       }
-    } catch (Exception e) {
+    } catch (IOException ignore) {
       // ignore
     }
 
-    try {
-      this.stats.close();
-    } catch (Exception e) {
-      // ignore
-    }
+    this.stats.close();
 
     // close the helper
-    try {
-      if (cacheHelper != null) {
-        cacheHelper.close();
-      }
-    } catch (Exception e) {
-      // ignore
+    if (this.cacheHelper != null) {
+      this.cacheHelper.close();
     }
     releaseCommBuffer();
   }
@@ -580,22 +568,24 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
   /* refinement of method inherited from Thread */
   @Override
   public String toString() {
-    return this.getName() + " (" + this.location.getHostName() + ":" + this.location.getPort()
-        + ")";
+    return getName() + " (" + this.location.getHostName() + ':' + this.location.getPort() + ')';
   }
 
   /**
    * Handle a marker message
    * 
-   * @param m message containing the data
+   * @param clientMessage message containing the data
    */
-  private void handleMarker(Message m) {
+  private void handleMarker(Message clientMessage) {
     try {
       final boolean isDebugEnabled = logger.isDebugEnabled();
       if (isDebugEnabled) {
-        logger.debug("Received marker message of length ({} bytes)", m.getPayloadLength());
+        logger.debug("Received marker message of length ({} bytes)",
+            clientMessage.getPayloadLength());
       }
+
       this.qManager.getState().processMarker();
+
       if (isDebugEnabled) {
         logger.debug("Processed marker message");
       }
@@ -610,41 +600,40 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
   /**
    * Create or update an entry
    * 
-   * @param m message containing the data
+   * @param clientMessage message containing the data
    */
-  private void handleUpdate(Message m) {
+  private void handleUpdate(Message clientMessage) {
     String regionName = null;
     Object key = null;
     Part valuePart = null;
-    Object newValue = null;
-    byte[] deltaBytes = null;
-    Object fullValue = null;
-    boolean isValueObject = false;
-    int partCnt = 0;
     final boolean isDebugEnabled = logger.isDebugEnabled();
+
     try {
       this.isOpCompleted = false;
+
       // Retrieve the data from the put message parts
       if (isDebugEnabled) {
-        logger.debug("Received put message of length ({} bytes)", m.getPayloadLength());
+        logger.debug("Received put message of length ({} bytes)", clientMessage.getPayloadLength());
       }
 
-      Part regionNamePart = m.getPart(partCnt++);
-      Part keyPart = m.getPart(partCnt++);
-      boolean isDeltaSent = ((Boolean) m.getPart(partCnt++).getObject()).booleanValue();
-      valuePart = m.getPart(partCnt++);
-      Part callbackArgumentPart = m.getPart(partCnt++);
-      VersionTag versionTag = (VersionTag) m.getPart(partCnt++).getObject();
+      int partCnt = 0;
+      Part regionNamePart = clientMessage.getPart(partCnt++);
+      Part keyPart = clientMessage.getPart(partCnt++);
+      boolean isDeltaSent = (Boolean) clientMessage.getPart(partCnt++).getObject();
+      valuePart = clientMessage.getPart(partCnt++);
+      Part callbackArgumentPart = clientMessage.getPart(partCnt++);
+      VersionTag versionTag = (VersionTag) clientMessage.getPart(partCnt++).getObject();
       if (versionTag != null) {
         versionTag.replaceNullIDs((InternalDistributedMember) this.endpoint.getMemberId());
       }
-      Part isInterestListPassedPart = m.getPart(partCnt++);
-      Part hasCqsPart = m.getPart(partCnt++);
+      Part isInterestListPassedPart = clientMessage.getPart(partCnt++);
+      Part hasCqsPart = clientMessage.getPart(partCnt++);
 
-      EventID eventId = (EventID) m.getPart(m.getNumberOfParts() - 1).getObject();
+      EventID eventId =
+          (EventID) clientMessage.getPart(clientMessage.getNumberOfParts() - 1).getObject();
 
-      boolean withInterest = ((Boolean) isInterestListPassedPart.getObject()).booleanValue();
-      boolean withCQs = ((Boolean) hasCqsPart.getObject()).booleanValue();
+      boolean withInterest = (Boolean) isInterestListPassedPart.getObject();
+      boolean withCQs = (Boolean) hasCqsPart.getObject();
 
       regionName = regionNamePart.getString();
       key = keyPart.getStringOrObject();
@@ -655,30 +644,39 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
       // object, it will be stored as a CachedDeserializable and
       // deserialized only when requested.
 
-      boolean isCreate = (m.getMessageType() == MessageType.LOCAL_CREATE);
+      boolean isCreate = clientMessage.getMessageType() == MessageType.LOCAL_CREATE;
+
       if (isDebugEnabled) {
-        logger
-            .debug(
-                "Putting entry for region: {} key: {} create: {}{} callbackArgument: {} withInterest={} withCQs={} eventID={} version={}",
-                regionName, key, isCreate,
-                (valuePart.isObject() ? new StringBuilder(" value: ")
-                    .append(deserialize(valuePart.getSerializedForm())) : ""),
-                callbackArgument, withInterest, withCQs, eventId, versionTag);
+        logger.debug(
+            "Putting entry for region: {} key: {} create: {}{} callbackArgument: {} withInterest={} withCQs={} eventID={} version={}",
+            regionName, key, isCreate,
+            valuePart.isObject()
+                ? new StringBuilder(" value: ").append(deserialize(valuePart.getSerializedForm()))
+                : "",
+            callbackArgument, withInterest, withCQs, eventId, versionTag);
       }
 
-      LocalRegion region = (LocalRegion) cacheHelper.getRegion(regionName);
+      LocalRegion region = (LocalRegion) this.cacheHelper.getRegion(regionName);
+
+      Object newValue = null;
+      byte[] deltaBytes = null;
+      Object fullValue = null;
+      boolean isValueObject;
 
       if (!isDeltaSent) {
         // bug #42162 - must check for a serialized null here
         byte[] serializedForm = valuePart.getSerializedForm();
+
         if (isCreate && InternalDataSerializer.isSerializedNull(serializedForm)) {
           // newValue = null; newValue is already null
         } else {
           newValue = valuePart.getSerializedForm();
         }
+
         if (withCQs) {
           fullValue = valuePart.getObject();
         }
+
         isValueObject = valuePart.isObject();
       } else {
         deltaBytes = valuePart.getSerializedForm();
@@ -689,40 +687,49 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
         if (isDebugEnabled && !quitting()) {
           logger.debug("{}: Region named {} does not exist", this, regionName);
         }
+
       } else if (region.hasServerProxy() && ServerResponseMatrix
-          .checkForValidStateAfterNotification(region, key, m.getMessageType())
+          .checkForValidStateAfterNotification(region, key, clientMessage.getMessageType())
           && (withInterest || !withCQs)) {
         @Released
         EntryEventImpl newEvent = null;
+
         try {
           // Create an event and put the entry
           newEvent = EntryEventImpl.create(region,
-              ((m.getMessageType() == MessageType.LOCAL_CREATE) ? Operation.CREATE
-                  : Operation.UPDATE),
+              clientMessage.getMessageType() == MessageType.LOCAL_CREATE ? Operation.CREATE
+                  : Operation.UPDATE,
               key, null /* newValue */, callbackArgument /* callbackArg */, true /* originRemote */,
               eventId.getDistributedMember());
+
           newEvent.setVersionTag(versionTag);
           newEvent.setFromServer(true);
+
           region.basicBridgeClientUpdate(eventId.getDistributedMember(), key, newValue, deltaBytes,
-              isValueObject, callbackArgument, m.getMessageType() == MessageType.LOCAL_CREATE,
-              qManager.getState().getProcessedMarker() || !this.isDurableClient, newEvent, eventId);
+              isValueObject, callbackArgument,
+              clientMessage.getMessageType() == MessageType.LOCAL_CREATE,
+              this.qManager.getState().getProcessedMarker() || !this.isDurableClient, newEvent,
+              eventId);
+
           this.isOpCompleted = true;
+
           // bug 45520 - ConcurrentCacheModificationException is not thrown and we must check this
           // flag
-          // if (newEvent.isConcurrencyConflict()) {
-          // return; // this is logged elsewhere at fine level
-          // }
           if (withCQs && isDeltaSent) {
             fullValue = newEvent.getNewValue();
           }
-        } catch (InvalidDeltaException ide) {
+        } catch (InvalidDeltaException ignore) {
           Part fullValuePart = requestFullValue(eventId, "Caught InvalidDeltaException.");
           region.getCachePerfStats().incDeltaFullValuesRequested();
-          fullValue = newValue = fullValuePart.getObject();
-          isValueObject = Boolean.valueOf(fullValuePart.isObject());
+          fullValue = newValue = fullValuePart.getObject(); // TODO: fix this line
+          isValueObject = fullValuePart.isObject();
+
           region.basicBridgeClientUpdate(eventId.getDistributedMember(), key, newValue, null,
-              isValueObject, callbackArgument, m.getMessageType() == MessageType.LOCAL_CREATE,
-              qManager.getState().getProcessedMarker() || !this.isDurableClient, newEvent, eventId);
+              isValueObject, callbackArgument,
+              clientMessage.getMessageType() == MessageType.LOCAL_CREATE,
+              this.qManager.getState().getProcessedMarker() || !this.isDurableClient, newEvent,
+              eventId);
+
           this.isOpCompleted = true;
         } finally {
           if (newEvent != null)
@@ -737,20 +744,19 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
 
       // Update CQs. CQs can exist without client region.
       if (withCQs) {
-        Part numCqsPart = m.getPart(partCnt++);
+        Part numCqsPart = clientMessage.getPart(partCnt++);
         if (isDebugEnabled) {
           logger.debug("Received message has CQ Event. Number of cqs interested in the event : {}",
               numCqsPart.getInt() / 2);
         }
-        partCnt = processCqs(m, partCnt, numCqsPart.getInt(), m.getMessageType(), key, fullValue,
-            deltaBytes, eventId);
+        partCnt = processCqs(clientMessage, partCnt, numCqsPart.getInt(),
+            clientMessage.getMessageType(), key, fullValue, deltaBytes, eventId);
         this.isOpCompleted = true;
       }
     } catch (Exception e) {
       String message =
           LocalizedStrings.CacheClientUpdater_THE_FOLLOWING_EXCEPTION_OCCURRED_WHILE_ATTEMPTING_TO_PUT_ENTRY_REGION_0_KEY_1_VALUE_2
-              .toLocalizedString(
-                  new Object[] {regionName, key, deserialize(valuePart.getSerializedForm())});
+              .toLocalizedString(regionName, key, deserialize(valuePart.getSerializedForm()));
       handleException(message, e);
     }
   }
@@ -763,12 +769,14 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
     if (isDebugEnabled) {
       logger.debug("{} Requesting full value...", reason);
     }
-    Part result = (Part) GetEventValueOp.executeOnPrimary(qManager.getPool(), eventId, null);
+    Part result = (Part) GetEventValueOp.executeOnPrimary(this.qManager.getPool(), eventId, null);
 
     if (result == null) {
       // Just log a warning. Do not stop CCU thread.
+      // TODO: throw a subclass of Exception
       throw new Exception("Could not retrieve full value for " + eventId);
     }
+
     if (isDebugEnabled) {
       logger.debug("Full value received.");
     }
@@ -778,39 +786,41 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
   /**
    * Invalidate an entry
    * 
-   * @param m message describing the entry
+   * @param clientMessage message describing the entry
    */
-  private void handleInvalidate(Message m) {
+  private void handleInvalidate(Message clientMessage) {
     String regionName = null;
     Object key = null;
-    int partCnt = 0;
-
     final boolean isDebugEnabled = logger.isDebugEnabled();
+
     try {
       this.isOpCompleted = false;
+
       // Retrieve the data from the local-invalidate message parts
       if (isDebugEnabled) {
-        logger.debug("Received invalidate message of length ({} bytes)", m.getPayloadLength());
+        logger.debug("Received invalidate message of length ({} bytes)",
+            clientMessage.getPayloadLength());
       }
 
-      Part regionNamePart = m.getPart(partCnt++);
-      Part keyPart = m.getPart(partCnt++);
-      Part callbackArgumentPart = m.getPart(partCnt++);
+      int partCnt = 0;
+      Part regionNamePart = clientMessage.getPart(partCnt++);
+      Part keyPart = clientMessage.getPart(partCnt++);
+      Part callbackArgumentPart = clientMessage.getPart(partCnt++);
 
-      VersionTag versionTag = (VersionTag) m.getPart(partCnt++).getObject();
+      VersionTag versionTag = (VersionTag) clientMessage.getPart(partCnt++).getObject();
       if (versionTag != null) {
         versionTag.replaceNullIDs((InternalDistributedMember) this.endpoint.getMemberId());
       }
 
-      Part isInterestListPassedPart = m.getPart(partCnt++);
-      Part hasCqsPart = m.getPart(partCnt++);
+      Part isInterestListPassedPart = clientMessage.getPart(partCnt++);
+      Part hasCqsPart = clientMessage.getPart(partCnt++);
 
       regionName = regionNamePart.getString();
       key = keyPart.getStringOrObject();
 
       Object callbackArgument = callbackArgumentPart.getObject();
-      boolean withInterest = ((Boolean) isInterestListPassedPart.getObject()).booleanValue();
-      boolean withCQs = ((Boolean) hasCqsPart.getObject()).booleanValue();
+      boolean withInterest = (Boolean) isInterestListPassedPart.getObject();
+      boolean withCQs = (Boolean) hasCqsPart.getObject();
 
       if (isDebugEnabled) {
         logger.debug(
@@ -818,34 +828,36 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
             regionName, key, callbackArgument, withInterest, withCQs, versionTag);
       }
 
-      LocalRegion region = (LocalRegion) cacheHelper.getRegion(regionName);
+      LocalRegion region = (LocalRegion) this.cacheHelper.getRegion(regionName);
       if (region == null) {
         if (isDebugEnabled && !quitting()) {
           logger.debug("Region named {} does not exist", regionName);
         }
+
       } else {
         if (region.hasServerProxy() && (withInterest || !withCQs)) {
           try {
-            Part eid = m.getPart(m.getNumberOfParts() - 1);
+            Part eid = clientMessage.getPart(clientMessage.getNumberOfParts() - 1);
             EventID eventId = (EventID) eid.getObject();
+
             try {
               region.basicBridgeClientInvalidate(eventId.getDistributedMember(), key,
                   callbackArgument,
-                  qManager.getState().getProcessedMarker() || !this.isDurableClient, eventId,
+                  this.qManager.getState().getProcessedMarker() || !this.isDurableClient, eventId,
                   versionTag);
-            } catch (ConcurrentCacheModificationException e) {
-              // return; allow CQs to be processed
+            } catch (ConcurrentCacheModificationException ignore) {
+              // allow CQs to be processed
             }
+
             this.isOpCompleted = true;
             // fix for 36615
-            qManager.getState().incrementInvalidatedStats();
+            this.qManager.getState().incrementInvalidatedStats();
 
             if (isDebugEnabled) {
               logger.debug("Invalidated entry for region: {} key: {} callbackArgument: {}",
                   regionName, key, callbackArgument);
             }
-          } catch (EntryNotFoundException e) {
-            /* ignore */
+          } catch (EntryNotFoundException ignore) {
             if (isDebugEnabled && !quitting()) {
               logger.debug("Already invalidated entry for region: {} key: {} callbackArgument: {}",
                   regionName, key, callbackArgument);
@@ -858,19 +870,20 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
       if (withCQs) {
         // The client may have been registered to receive invalidates for
         // create and updates operations. Get the actual region operation.
-        Part regionOpType = m.getPart(partCnt++);
-        Part numCqsPart = m.getPart(partCnt++);
+        Part regionOpType = clientMessage.getPart(partCnt++);
+        Part numCqsPart = clientMessage.getPart(partCnt++);
         if (isDebugEnabled) {
           logger.debug("Received message has CQ Event. Number of cqs interested in the event : {}",
               numCqsPart.getInt() / 2);
         }
-        partCnt = processCqs(m, partCnt, numCqsPart.getInt(), regionOpType.getInt(), key, null);
+        partCnt = processCqs(clientMessage, partCnt, numCqsPart.getInt(), regionOpType.getInt(),
+            key, null);
         this.isOpCompleted = true;
       }
     } catch (Exception e) {
       final String message =
           LocalizedStrings.CacheClientUpdater_THE_FOLLOWING_EXCEPTION_OCCURRED_WHILE_ATTEMPTING_TO_INVALIDATE_ENTRY_REGION_0_KEY_1
-              .toLocalizedString(new Object[] {regionName, key});
+              .toLocalizedString(regionName, key);
       handleException(message, e);
     }
   }
@@ -878,26 +891,27 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
   /**
    * locally destroy an entry
    * 
-   * @param m message describing the entry
+   * @param clientMessage message describing the entry
    */
-  private void handleDestroy(Message m) {
+  private void handleDestroy(Message clientMessage) {
     String regionName = null;
     Object key = null;
-    int partCnt = 0;
-
     final boolean isDebugEnabled = logger.isDebugEnabled();
+
     try {
       this.isOpCompleted = false;
       // Retrieve the data from the local-destroy message parts
       if (isDebugEnabled) {
-        logger.debug("Received destroy message of length ({} bytes)", m.getPayloadLength());
+        logger.debug("Received destroy message of length ({} bytes)",
+            clientMessage.getPayloadLength());
       }
 
-      Part regionNamePart = m.getPart(partCnt++);
-      Part keyPart = m.getPart(partCnt++);
-      Part callbackArgumentPart = m.getPart(partCnt++);
+      int partCnt = 0;
+      Part regionNamePart = clientMessage.getPart(partCnt++);
+      Part keyPart = clientMessage.getPart(partCnt++);
+      Part callbackArgumentPart = clientMessage.getPart(partCnt++);
 
-      VersionTag versionTag = (VersionTag) m.getPart(partCnt++).getObject();
+      VersionTag versionTag = (VersionTag) clientMessage.getPart(partCnt++).getObject();
       if (versionTag != null) {
         versionTag.replaceNullIDs((InternalDistributedMember) this.endpoint.getMemberId());
       }
@@ -905,8 +919,8 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
       regionName = regionNamePart.getString();
       key = keyPart.getStringOrObject();
 
-      Part isInterestListPassedPart = m.getPart(partCnt++);
-      Part hasCqsPart = m.getPart(partCnt++);
+      Part isInterestListPassedPart = clientMessage.getPart(partCnt++);
+      Part hasCqsPart = clientMessage.getPart(partCnt++);
 
       boolean withInterest = ((Boolean) isInterestListPassedPart.getObject()).booleanValue();
       boolean withCQs = ((Boolean) hasCqsPart.getObject()).booleanValue();
@@ -918,30 +932,32 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
             regionName, key, callbackArgument, withInterest, withCQs, versionTag);
       }
 
-      LocalRegion region = (LocalRegion) cacheHelper.getRegion(regionName);
-      EventID eventId = null;
+      LocalRegion region = (LocalRegion) this.cacheHelper.getRegion(regionName);
       if (region == null) {
         if (isDebugEnabled && !quitting()) {
           logger.debug("Region named {} does not exist", regionName);
         }
+
       } else if (region.hasServerProxy() && (withInterest || !withCQs)) {
+        EventID eventId = null;
         try {
-          Part eid = m.getPart(m.getNumberOfParts() - 1);
+          Part eid = clientMessage.getPart(clientMessage.getNumberOfParts() - 1);
           eventId = (EventID) eid.getObject();
+
           try {
             region.basicBridgeClientDestroy(eventId.getDistributedMember(), key, callbackArgument,
-                qManager.getState().getProcessedMarker() || !this.isDurableClient, eventId,
+                this.qManager.getState().getProcessedMarker() || !this.isDurableClient, eventId,
                 versionTag);
-          } catch (ConcurrentCacheModificationException e) {
-            // return; allow CQs to be processed
+          } catch (ConcurrentCacheModificationException ignore) {
+            // allow CQs to be processed
           }
+
           this.isOpCompleted = true;
           if (isDebugEnabled) {
             logger.debug("Destroyed entry for region: {} key: {} callbackArgument: {}", regionName,
                 key, callbackArgument);
           }
-        } catch (EntryNotFoundException e) {
-          /* ignore */
+        } catch (EntryNotFoundException ignore) {
           if (isDebugEnabled && !quitting()) {
             logger.debug(
                 "Already destroyed entry for region: {} key: {} callbackArgument: {} eventId={}",
@@ -952,18 +968,19 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
       }
 
       if (withCQs) {
-        Part numCqsPart = m.getPart(partCnt++);
+        Part numCqsPart = clientMessage.getPart(partCnt++);
         if (isDebugEnabled) {
           logger.debug("Received message has CQ Event. Number of cqs interested in the event : {}",
               numCqsPart.getInt() / 2);
         }
-        partCnt = processCqs(m, partCnt, numCqsPart.getInt(), m.getMessageType(), key, null);
+        partCnt = processCqs(clientMessage, partCnt, numCqsPart.getInt(),
+            clientMessage.getMessageType(), key, null);
         this.isOpCompleted = true;
       }
     } catch (Exception e) {
       String message =
           LocalizedStrings.CacheClientUpdater_THE_FOLLOWING_EXCEPTION_OCCURRED_WHILE_ATTEMPTING_TO_DESTROY_ENTRY_REGION_0_KEY_1
-              .toLocalizedString(new Object[] {regionName, key});
+              .toLocalizedString(regionName, key);
       handleException(message, e);
     }
   }
@@ -971,44 +988,44 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
   /**
    * Locally destroy a region
    * 
-   * @param m message describing the region
+   * @param clientMessage message describing the region
    */
-  private void handleDestroyRegion(Message m) {
-    Part regionNamePart = null, callbackArgumentPart = null;
+  private void handleDestroyRegion(Message clientMessage) {
     String regionName = null;
-    Object callbackArgument = null;
-    LocalRegion region = null;
-    int partCnt = 0;
-
     final boolean isDebugEnabled = logger.isDebugEnabled();
+
     try {
       // Retrieve the data from the local-destroy-region message parts
       if (isDebugEnabled) {
-        logger.debug("Received destroy region message of length ({} bytes)", m.getPayloadLength());
+        logger.debug("Received destroy region message of length ({} bytes)",
+            clientMessage.getPayloadLength());
       }
-      regionNamePart = m.getPart(partCnt++);
-      callbackArgumentPart = m.getPart(partCnt++);
+      int partCnt = 0;
+      Part regionNamePart = clientMessage.getPart(partCnt++);
+      Part callbackArgumentPart = clientMessage.getPart(partCnt++);
       regionName = regionNamePart.getString();
-      callbackArgument = callbackArgumentPart.getObject();
+      Object callbackArgument = callbackArgumentPart.getObject();
 
-      Part hasCqsPart = m.getPart(partCnt++);
+      Part hasCqsPart = clientMessage.getPart(partCnt++);
 
       if (isDebugEnabled) {
         logger.debug("Destroying region: {} callbackArgument: {}", regionName, callbackArgument);
       }
 
       // Handle CQs if any on this region.
-      if (((Boolean) hasCqsPart.getObject()).booleanValue()) {
-        Part numCqsPart = m.getPart(partCnt++);
+      if ((Boolean) hasCqsPart.getObject()) {
+        Part numCqsPart = clientMessage.getPart(partCnt++);
         if (isDebugEnabled) {
           logger.debug("Received message has CQ Event. Number of cqs interested in the event : {}",
               numCqsPart.getInt() / 2);
         }
-        partCnt = processCqs(m, partCnt, numCqsPart.getInt(), m.getMessageType(), null, null);
+        // TODO: partCnt is unused -- does processCqs have side effects
+        partCnt = processCqs(clientMessage, partCnt, numCqsPart.getInt(),
+            clientMessage.getMessageType(), null, null);
       }
 
       // Confirm that the region exists
-      region = (LocalRegion) cacheHelper.getRegion(regionName);
+      LocalRegion region = (LocalRegion) this.cacheHelper.getRegion(regionName);
       if (region == null) {
         if (isDebugEnabled && !quitting()) {
           logger.debug("Region named {} does not exist", regionName);
@@ -1025,7 +1042,7 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
           logger.debug("Destroyed region: {} callbackArgument: {}", regionName, callbackArgument);
         }
       }
-    } catch (RegionDestroyedException e) { // already destroyed
+    } catch (RegionDestroyedException ignore) { // already destroyed
       if (isDebugEnabled) {
         logger.debug("region already destroyed: {}", regionName);
       }
@@ -1040,24 +1057,24 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
   /**
    * Locally clear a region
    * 
-   * @param m message describing the region to clear
+   * @param clientMessage message describing the region to clear
    */
-  private void handleClearRegion(Message m) {
+  private void handleClearRegion(Message clientMessage) {
     String regionName = null;
-    int partCnt = 0;
-
     final boolean isDebugEnabled = logger.isDebugEnabled();
+
     try {
       // Retrieve the data from the clear-region message parts
       if (isDebugEnabled) {
         logger.debug("{}: Received clear region message of length ({} bytes)", this,
-            m.getPayloadLength());
+            clientMessage.getPayloadLength());
       }
 
-      Part regionNamePart = m.getPart(partCnt++);
-      Part callbackArgumentPart = m.getPart(partCnt++);
+      int partCnt = 0;
+      Part regionNamePart = clientMessage.getPart(partCnt++);
+      Part callbackArgumentPart = clientMessage.getPart(partCnt++);
 
-      Part hasCqsPart = m.getPart(partCnt++);
+      Part hasCqsPart = clientMessage.getPart(partCnt++);
 
       regionName = regionNamePart.getString();
       Object callbackArgument = callbackArgumentPart.getObject();
@@ -1065,17 +1082,18 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
         logger.debug("Clearing region: {} callbackArgument: {}", regionName, callbackArgument);
       }
 
-      if (((Boolean) hasCqsPart.getObject()).booleanValue()) {
-        Part numCqsPart = m.getPart(partCnt++);
+      if ((Boolean) hasCqsPart.getObject()) {
+        Part numCqsPart = clientMessage.getPart(partCnt++);
         if (isDebugEnabled) {
           logger.debug("Received message has CQ Event. Number of cqs interested in the event : {}",
               numCqsPart.getInt() / 2);
         }
-        partCnt = processCqs(m, partCnt, numCqsPart.getInt(), m.getMessageType(), null, null);
+        partCnt = processCqs(clientMessage, partCnt, numCqsPart.getInt(),
+            clientMessage.getMessageType(), null, null);
       }
 
       // Confirm that the region exists
-      LocalRegion region = (LocalRegion) cacheHelper.getRegion(regionName);
+      LocalRegion region = (LocalRegion) this.cacheHelper.getRegion(regionName);
       if (region == null) {
         if (isDebugEnabled && !quitting()) {
           logger.debug("Region named {} does not exist", regionName);
@@ -1088,7 +1106,7 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
       if (region.hasServerProxy()) {
         // Locally clear the region
         region.basicBridgeClientClear(callbackArgument,
-            qManager.getState().getProcessedMarker() || !this.isDurableClient);
+            this.qManager.getState().getProcessedMarker() || !this.isDurableClient);
 
         if (isDebugEnabled) {
           logger.debug("Cleared region: {} callbackArgument: {}", regionName, callbackArgument);
@@ -1106,50 +1124,44 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
    * Locally invalidate a region NOTE: Added as part of bug#38048. The code only takes care of CQ
    * processing. Support needs to be added for local region invalidate.
    * 
-   * @param m message describing the region to clear
+   * @param clientMessage message describing the region to clear
    */
-  private void handleInvalidateRegion(Message m) {
+  private void handleInvalidateRegion(Message clientMessage) {
     String regionName = null;
-    int partCnt = 0;
-
     final boolean isDebugEnabled = logger.isDebugEnabled();
+
     try {
       // Retrieve the data from the invalidate-region message parts
       if (isDebugEnabled) {
         logger.debug("{}: Received invalidate region message of length ({} bytes)", this,
-            m.getPayloadLength());
+            clientMessage.getPayloadLength());
       }
 
-      Part regionNamePart = m.getPart(partCnt++);
+      int partCnt = 0;
+      Part regionNamePart = clientMessage.getPart(partCnt++);
       partCnt++; // Part callbackArgumentPart = m.getPart(partCnt++);
 
-      Part hasCqsPart = m.getPart(partCnt++);
+      Part hasCqsPart = clientMessage.getPart(partCnt++);
 
       regionName = regionNamePart.getString();
-      // Object callbackArgument = callbackArgumentPart.getObject();
 
-      if (((Boolean) hasCqsPart.getObject()).booleanValue()) {
-        Part numCqsPart = m.getPart(partCnt++);
+      if ((Boolean) hasCqsPart.getObject()) {
+        Part numCqsPart = clientMessage.getPart(partCnt++);
         if (isDebugEnabled) {
           logger.debug("Received message has CQ Event. Number of cqs interested in the event : {}",
               numCqsPart.getInt() / 2);
         }
-        partCnt = processCqs(m, partCnt, numCqsPart.getInt(), m.getMessageType(), null, null);
+        // TODO: partCnt is unused
+        partCnt = processCqs(clientMessage, partCnt, numCqsPart.getInt(),
+            clientMessage.getMessageType(), null, null);
       }
 
       // Confirm that the region exists
-      LocalRegion region = (LocalRegion) cacheHelper.getRegion(regionName);
+      LocalRegion region = (LocalRegion) this.cacheHelper.getRegion(regionName);
       if (region == null) {
         if (isDebugEnabled && !quitting()) {
           logger.debug("Region named {} does not exist", regionName);
         }
-        return;
-      }
-
-      // Verify that the region in question should respond to this
-      // message
-      if (region.hasServerProxy()) {
-        return;
       }
 
     } catch (Exception e) {
@@ -1163,40 +1175,39 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
   /**
    * Register instantiators locally
    *
-   * @param msg message describing the new instantiators
+   * @param clientMessage message describing the new instantiators
    * @param eventId eventId of the instantiators
    */
-  private void handleRegisterInstantiator(Message msg, EventID eventId) {
+  private void handleRegisterInstantiator(Message clientMessage, EventID eventId) {
     String instantiatorClassName = null;
     final boolean isDebugEnabled = logger.isDebugEnabled();
+
     try {
-      int noOfParts = msg.getNumberOfParts();
+      int noOfParts = clientMessage.getNumberOfParts();
       if (isDebugEnabled) {
         logger.debug("{}: Received register instantiators message of parts {}", getName(),
             noOfParts);
       }
+
       Assert.assertTrue((noOfParts - 1) % 3 == 0);
-      for (int i = 0; i < noOfParts - 1; i = i + 3) {
+      for (int i = 0; i < noOfParts - 1; i += 3) {
         instantiatorClassName =
-            (String) CacheServerHelper.deserialize(msg.getPart(i).getSerializedForm());
-        String instantiatedClassName =
-            (String) CacheServerHelper.deserialize(msg.getPart(i + 1).getSerializedForm());
-        int id = msg.getPart(i + 2).getInt();
+            (String) CacheServerHelper.deserialize(clientMessage.getPart(i).getSerializedForm());
+        String instantiatedClassName = (String) CacheServerHelper
+            .deserialize(clientMessage.getPart(i + 1).getSerializedForm());
+        int id = clientMessage.getPart(i + 2).getInt();
         InternalInstantiator.register(instantiatorClassName, instantiatedClassName, id, false,
-            eventId, null/* context */);
-        // distribute is false because we don't want to propagate this to
-        // servers recursively
+            eventId, null);
+        // distribute is false because we don't want to propagate this to servers recursively
       }
 
       // CALLBACK TESTING PURPOSE ONLY
       if (PoolImpl.IS_INSTANTIATOR_CALLBACK) {
-        ClientServerObserver bo = ClientServerObserverHolder.getInstance();
-        bo.afterReceivingFromServer(eventId);
+        ClientServerObserver clientServerObserver = ClientServerObserverHolder.getInstance();
+        clientServerObserver.afterReceivingFromServer(eventId);
       }
 
-    }
-    // TODO bug: can the following catch be more specific?
-    catch (Exception e) {
+    } catch (Exception e) {
       if (isDebugEnabled) {
         logger.debug("{}: Caught following exception while attempting to read Instantiator : {}",
             this, instantiatorClassName, e);
@@ -1207,6 +1218,7 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
   private void handleRegisterDataSerializer(Message msg, EventID eventId) {
     Class dataSerializerClass = null;
     final boolean isDebugEnabled = logger.isDebugEnabled();
+
     try {
       int noOfParts = msg.getNumberOfParts();
       if (isDebugEnabled) {
@@ -1220,8 +1232,7 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
               (String) CacheServerHelper.deserialize(msg.getPart(i).getSerializedForm());
           int id = msg.getPart(i + 1).getInt();
           InternalDataSerializer.register(dataSerializerClassName, false, eventId, null, id);
-          // distribute is false because we don't want to propagate this to
-          // servers recursively
+          // distribute is false because we don't want to propagate this to servers recursively
 
           int numOfClasses = msg.getPart(i + 2).getInt();
           int j = 0;
@@ -1230,7 +1241,8 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
                 (String) CacheServerHelper.deserialize(msg.getPart(i + 3 + j).getSerializedForm());
             InternalDataSerializer.updateSupportedClassesMap(dataSerializerClassName, className);
           }
-          i = i + 3 + j;
+
+          i += 3 + j;
         } catch (ClassNotFoundException e) {
           if (isDebugEnabled) {
             logger.debug(
@@ -1246,9 +1258,7 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
         bo.afterReceivingFromServer(eventId);
       }
 
-    }
-    // TODO bug: can the following catch be more specific?
-    catch (Exception e) {
+    } catch (Exception e) {
       if (isDebugEnabled) {
         logger.debug("{}: Caught following exception while attempting to read DataSerializer : {}",
             this, dataSerializerClass, e);
@@ -1259,93 +1269,87 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
   /**
    * Processes message to invoke CQ listeners.
    */
-  private int processCqs(Message m, int startMessagePart, int numCqParts, int messageType,
-      Object key, Object value) {
-    return processCqs(m, startMessagePart, numCqParts, messageType, key, value, null,
-        null/* eventId */);
+  private int processCqs(Message clientMessage, int startMessagePart, int numCqParts,
+      int messageType, Object key, Object value) {
+    return processCqs(clientMessage, startMessagePart, numCqParts, messageType, key, value, null,
+        null);
   }
 
-  private int processCqs(Message m, int startMessagePart, int numCqParts, int messageType,
-      Object key, Object value, byte[] delta, EventID eventId) {
+  private int processCqs(Message clientMessage, int startMessagePart, int numCqParts,
+      int messageType, Object key, Object value, byte[] delta, EventID eventId) {
     HashMap cqs = new HashMap();
     final boolean isDebugEnabled = logger.isDebugEnabled();
 
     for (int cqCnt = 0; cqCnt < numCqParts;) {
-      StringBuilder str = null;
+      StringBuilder sb = null;
       if (isDebugEnabled) {
-        str = new StringBuilder(100);
-        str.append("found these queries: ");
+        sb = new StringBuilder(100);
+        sb.append("found these queries: ");
       }
       try {
         // Get CQ Name.
-        Part cqNamePart = m.getPart(startMessagePart + (cqCnt++));
+        Part cqNamePart = clientMessage.getPart(startMessagePart + cqCnt++);
         // Get CQ Op.
-        Part cqOpPart = m.getPart(startMessagePart + (cqCnt++));
-        cqs.put(cqNamePart.getString(), Integer.valueOf(cqOpPart.getInt()));
+        Part cqOpPart = clientMessage.getPart(startMessagePart + cqCnt++);
+        cqs.put(cqNamePart.getString(), cqOpPart.getInt());
 
-        if (str != null) {
-          str.append(cqNamePart.getString()).append(" op=").append(cqOpPart.getInt()).append("  ");
+        if (sb != null) {
+          sb.append(cqNamePart.getString()).append(" op=").append(cqOpPart.getInt()).append("  ");
         }
-      } catch (Exception ex) {
+      } catch (Exception ignore) {
         logger.warn(LocalizedMessage.create(
             LocalizedStrings.CacheClientUpdater_ERROR_WHILE_PROCESSING_THE_CQ_MESSAGE_PROBLEM_WITH_READING_MESSAGE_FOR_CQ_0,
             cqCnt));
       }
-      if (isDebugEnabled && str != null) {
-        logger.debug(str);
+      if (isDebugEnabled) {
+        logger.debug(sb);
       }
     }
 
-    {
-      CqService cqService = this.cache.getCqService();
-      try {
-        cqService.dispatchCqListeners(cqs, messageType, key, value, delta, qManager, eventId);
-      } catch (Exception ex) {
-        logger.warn(LocalizedMessage.create(
-            LocalizedStrings.CacheClientUpdater_FAILED_TO_INVOKE_CQ_DISPATCHER_ERROR___0,
-            ex.getMessage()));
-        if (isDebugEnabled) {
-          logger.debug("Failed to invoke CQ Dispatcher.", ex);
-        }
+    CqService cqService = this.cache.getCqService();
+    try {
+      cqService.dispatchCqListeners(cqs, messageType, key, value, delta, this.qManager, eventId);
+    } catch (Exception ex) {
+      logger.warn(LocalizedMessage.create(
+          LocalizedStrings.CacheClientUpdater_FAILED_TO_INVOKE_CQ_DISPATCHER_ERROR___0,
+          ex.getMessage()));
+      if (isDebugEnabled) {
+        logger.debug("Failed to invoke CQ Dispatcher.", ex);
       }
     }
 
-    return (startMessagePart + numCqParts);
+    return startMessagePart + numCqParts;
   }
 
-  private void handleRegisterInterest(Message m) {
+  private void handleRegisterInterest(Message clientMessage) {
     String regionName = null;
     Object key = null;
-    int interestType;
-    byte interestResultPolicy;
-    boolean isDurable;
-    boolean receiveUpdatesAsInvalidates;
-    int partCnt = 0;
-
     final boolean isDebugEnabled = logger.isDebugEnabled();
+
     try {
       // Retrieve the data from the add interest message parts
       if (isDebugEnabled) {
         logger.debug("{}: Received add interest message of length ({} bytes)", this,
-            m.getPayloadLength());
+            clientMessage.getPayloadLength());
       }
-      Part regionNamePart = m.getPart(partCnt++);
-      Part keyPart = m.getPart(partCnt++);
-      Part interestTypePart = m.getPart(partCnt++);
-      Part interestResultPolicyPart = m.getPart(partCnt++);
-      Part isDurablePart = m.getPart(partCnt++);
-      Part receiveUpdatesAsInvalidatesPart = m.getPart(partCnt++);
+
+      int partCnt = 0;
+      Part regionNamePart = clientMessage.getPart(partCnt++);
+      Part keyPart = clientMessage.getPart(partCnt++);
+      Part interestTypePart = clientMessage.getPart(partCnt++);
+      Part interestResultPolicyPart = clientMessage.getPart(partCnt++);
+      Part isDurablePart = clientMessage.getPart(partCnt++);
+      Part receiveUpdatesAsInvalidatesPart = clientMessage.getPart(partCnt++);
 
       regionName = regionNamePart.getString();
       key = keyPart.getStringOrObject();
-      interestType = ((Integer) interestTypePart.getObject()).intValue();
-      interestResultPolicy = ((Byte) interestResultPolicyPart.getObject()).byteValue();
-      isDurable = ((Boolean) isDurablePart.getObject()).booleanValue();
-      receiveUpdatesAsInvalidates =
-          ((Boolean) receiveUpdatesAsInvalidatesPart.getObject()).booleanValue();
+      int interestType = (Integer) interestTypePart.getObject();
+      byte interestResultPolicy = (Byte) interestResultPolicyPart.getObject();
+      boolean isDurable = (Boolean) isDurablePart.getObject();
+      boolean receiveUpdatesAsInvalidates = (Boolean) receiveUpdatesAsInvalidatesPart.getObject();
 
       // Confirm that region exists
-      LocalRegion region = (LocalRegion) cacheHelper.getRegion(regionName);
+      LocalRegion region = (LocalRegion) this.cacheHelper.getRegion(regionName);
       if (region == null) {
         if (isDebugEnabled && !quitting()) {
           logger.debug("{}: Region named {} does not exist", this, regionName);
@@ -1375,38 +1379,34 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
     }
   }
 
-  private void handleUnregisterInterest(Message m) {
+  private void handleUnregisterInterest(Message clientMessage) {
     String regionName = null;
     Object key = null;
-    int interestType;
-    boolean isDurable;
-    boolean receiveUpdatesAsInvalidates;
-    int partCnt = 0;
-
     final boolean isDebugEnabled = logger.isDebugEnabled();
+
     try {
       // Retrieve the data from the remove interest message parts
       if (isDebugEnabled) {
         logger.debug("{}: Received remove interest message of length ({} bytes)", this,
-            m.getPayloadLength());
+            clientMessage.getPayloadLength());
       }
 
-      Part regionNamePart = m.getPart(partCnt++);
-      Part keyPart = m.getPart(partCnt++);
-      Part interestTypePart = m.getPart(partCnt++);
-      Part isDurablePart = m.getPart(partCnt++);
-      Part receiveUpdatesAsInvalidatesPart = m.getPart(partCnt++);
+      int partCnt = 0;
+      Part regionNamePart = clientMessage.getPart(partCnt++);
+      Part keyPart = clientMessage.getPart(partCnt++);
+      Part interestTypePart = clientMessage.getPart(partCnt++);
+      Part isDurablePart = clientMessage.getPart(partCnt++);
+      Part receiveUpdatesAsInvalidatesPart = clientMessage.getPart(partCnt++);
       // Not reading the eventId part
 
       regionName = regionNamePart.getString();
       key = keyPart.getStringOrObject();
-      interestType = ((Integer) interestTypePart.getObject()).intValue();
-      isDurable = ((Boolean) isDurablePart.getObject()).booleanValue();
-      receiveUpdatesAsInvalidates =
-          ((Boolean) receiveUpdatesAsInvalidatesPart.getObject()).booleanValue();
+      int interestType = (Integer) interestTypePart.getObject();
+      boolean isDurable = (Boolean) isDurablePart.getObject();
+      boolean receiveUpdatesAsInvalidates = (Boolean) receiveUpdatesAsInvalidatesPart.getObject();
 
       // Confirm that region exists
-      LocalRegion region = (LocalRegion) cacheHelper.getRegion(regionName);
+      LocalRegion region = (LocalRegion) this.cacheHelper.getRegion(regionName);
       if (region == null) {
         if (isDebugEnabled) {
           logger.debug("{}: Region named {} does not exist", this, regionName);
@@ -1434,14 +1434,17 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
     }
   }
 
-  private void handleTombstoneOperation(Message msg) {
+  private void handleTombstoneOperation(Message clientMessage) {
     String regionName = "unknown";
+
     try { // not sure why this isn't done by the caller
       int partIdx = 0;
+
       // see ClientTombstoneMessage.getGFE70Message
-      regionName = msg.getPart(partIdx++).getString();
-      int op = msg.getPart(partIdx++).getInt();
-      LocalRegion region = (LocalRegion) cacheHelper.getRegion(regionName);
+      regionName = clientMessage.getPart(partIdx++).getString();
+      int op = clientMessage.getPart(partIdx++).getInt();
+      LocalRegion region = (LocalRegion) this.cacheHelper.getRegion(regionName);
+
       if (region == null) {
         if (!quitting()) {
           if (logger.isDebugEnabled()) {
@@ -1450,24 +1453,29 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
         }
         return;
       }
+
       if (logger.isDebugEnabled()) {
         logger.debug("{}: Received tombstone operation for region {} with operation={}", this,
             region, op);
       }
+
       if (!region.getConcurrencyChecksEnabled()) {
         return;
       }
+
       switch (op) {
         case 0:
           Map<VersionSource, Long> regionGCVersions =
-              (Map<VersionSource, Long>) msg.getPart(partIdx++).getObject();
-          EventID eventID = (EventID) msg.getPart(partIdx++).getObject();
+              (Map<VersionSource, Long>) clientMessage.getPart(partIdx++).getObject();
+          EventID eventID = (EventID) clientMessage.getPart(partIdx++).getObject();
           region.expireTombstones(regionGCVersions, eventID, null);
           break;
+
         case 1:
-          Set<Object> removedKeys = (Set<Object>) msg.getPart(partIdx++).getObject();
+          Set<Object> removedKeys = (Set<Object>) clientMessage.getPart(partIdx++).getObject();
           region.expireTombstoneKeys(removedKeys);
           break;
+
         default:
           throw new IllegalArgumentException("unknown operation type " + op);
       }
@@ -1483,22 +1491,21 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
    */
   private boolean quitting() {
     if (isInterrupted()) {
-      // Any time an interrupt is thrown at this thread, regard it as a
-      // request to terminate
+      // Any time an interrupt is thrown at this thread, regard it as a request to terminate
       return true;
     }
-    if (!continueProcessing.get()) {
+    if (!this.continueProcessing.get()) {
       // de facto flag indicating we are to stop
       return true;
     }
-    if (cache != null && cache.getCancelCriterion().isCancelInProgress()) {
+    if (this.cache != null && this.cache.getCancelCriterion().isCancelInProgress()) {
       // System is cancelling
       return true;
     }
 
     // The pool stuff is really sick, so it's possible for us to have a distributed
     // system that is not the same as our cache. Check it just in case...
-    if (system.getCancelCriterion().isCancelInProgress()) {
+    if (this.system.getCancelCriterion().isCancelInProgress()) {
       return true;
     }
 
@@ -1520,15 +1527,15 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
           this.failedUpdater.join(5000);
         }
       }
-    } catch (InterruptedException ie) {
+    } catch (InterruptedException ignore) {
       gotInterrupted = true;
-      return; // just bail, because I have not done anything yet
+      // just bail, because I have not done anything yet
     } finally {
       if (!gotInterrupted && this.failedUpdater != null) {
         logger.info(LocalizedMessage.create(
             LocalizedStrings.CacheClientUpdater_0_HAS_COMPLETED_WAITING_FOR_1,
             new Object[] {this, this.failedUpdater}));
-        failedUpdater = null;
+        this.failedUpdater = null;
       }
     }
   }
@@ -1537,6 +1544,8 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
    * Processes messages received from the server.
    * 
    * Only certain types of messages are handled.
+   *
+   * TODO: Method 'processMessages' is too complex to analyze by data flow algorithm
    * 
    * @see MessageType#CLIENT_MARKER
    * @see MessageType#LOCAL_CREATE
@@ -1547,11 +1556,11 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
    * @see MessageType#CLEAR_REGION
    * @see ClientUpdateMessage
    */
-  protected void processMessages() {
+  private void processMessages() {
     final boolean isDebugEnabled = logger.isDebugEnabled();
     try {
-      Part eid = null;
-      Message _message = initializeMessage();
+      Message clientMessage = initializeMessage();
+
       if (quitting()) {
         if (isDebugEnabled) {
           logger.debug("processMessages quitting early because we have stopped");
@@ -1559,11 +1568,11 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
         // our caller calls close which will notify all waiters for our init
         return;
       }
+
       logger.info(LocalizedMessage
           .create(LocalizedStrings.CacheClientUpdater_0_READY_TO_PROCESS_MESSAGES, this));
 
-      while (continueProcessing.get()) {
-        // SystemFailure.checkFailure(); dm will check this
+      while (this.continueProcessing.get()) {
         if (quitting()) {
           if (isDebugEnabled) {
             logger.debug("termination detected");
@@ -1583,12 +1592,12 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
 
         try {
           // Read the message
-          _message.recv();
+          clientMessage.recv();
 
           // Wait for the previously failed cache client updater
           // to finish. This will avoid out of order messages.
           waitForFailedUpdater();
-          cache.waitForRegisterInterestsInProgress();
+          this.cache.waitForRegisterInterestsInProgress();
           if (quitting()) {
             if (isDebugEnabled) {
               logger.debug("processMessages quitting before processing message");
@@ -1597,7 +1606,7 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
           }
 
           // If the message is a ping, ignore it
-          if (_message.getMessageType() == MessageType.SERVER_TO_CLIENT_PING) {
+          if (clientMessage.getMessageType() == MessageType.SERVER_TO_CLIENT_PING) {
             if (isDebugEnabled) {
               logger.debug("{}: Received ping", this);
             }
@@ -1605,76 +1614,80 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
           }
 
           boolean isDeltaSent = false;
-          boolean isCreateOrUpdate = _message.getMessageType() == MessageType.LOCAL_CREATE
-              || _message.getMessageType() == MessageType.LOCAL_UPDATE;
+          boolean isCreateOrUpdate = clientMessage.getMessageType() == MessageType.LOCAL_CREATE
+              || clientMessage.getMessageType() == MessageType.LOCAL_UPDATE;
           if (isCreateOrUpdate) {
-            isDeltaSent = ((Boolean) _message.getPart(2).getObject()).booleanValue();
+            isDeltaSent = (Boolean) clientMessage.getPart(2).getObject();
           }
 
           // extract the eventId and verify if it is a duplicate event
           // if it is a duplicate event, ignore
           // @since GemFire 5.1
-          int numberOfParts = _message.getNumberOfParts();
-          eid = _message.getPart(numberOfParts - 1);
+          int numberOfParts = clientMessage.getNumberOfParts();
+          Part eid = clientMessage.getPart(numberOfParts - 1);
+
           // TODO the message handling methods also deserialized the eventID - inefficient
           EventID eventId = (EventID) eid.getObject();
 
           // no need to verify if the instantiator msg is duplicate or not
-          if (_message.getMessageType() != MessageType.REGISTER_INSTANTIATORS
-              && _message.getMessageType() != MessageType.REGISTER_DATASERIALIZERS) {
+          if (clientMessage.getMessageType() != MessageType.REGISTER_INSTANTIATORS
+              && clientMessage.getMessageType() != MessageType.REGISTER_DATASERIALIZERS) {
             if (this.qManager.getState().verifyIfDuplicate(eventId,
                 !(this.isDurableClient || isDeltaSent))) {
               continue;
             }
           }
+
           if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER)) {
-            logger.trace(LogMarker.BRIDGE_SERVER,
-                "Processing event with id {}" + eventId.expensiveToString());
+            logger.trace(LogMarker.BRIDGE_SERVER, "Processing event with id {}",
+                eventId.expensiveToString());
           }
+
           this.isOpCompleted = true;
+
           // Process the message
-          switch (_message.getMessageType()) {
+          switch (clientMessage.getMessageType()) {
             case MessageType.LOCAL_CREATE:
             case MessageType.LOCAL_UPDATE:
-              handleUpdate(_message);
+              handleUpdate(clientMessage);
               break;
             case MessageType.LOCAL_INVALIDATE:
-              handleInvalidate(_message);
+              handleInvalidate(clientMessage);
               break;
             case MessageType.LOCAL_DESTROY:
-              handleDestroy(_message);
+              handleDestroy(clientMessage);
               break;
             case MessageType.LOCAL_DESTROY_REGION:
-              handleDestroyRegion(_message);
+              handleDestroyRegion(clientMessage);
               break;
             case MessageType.CLEAR_REGION:
-              handleClearRegion(_message);
+              handleClearRegion(clientMessage);
               break;
             case MessageType.REGISTER_INSTANTIATORS:
-              handleRegisterInstantiator(_message, eventId);
+              handleRegisterInstantiator(clientMessage, eventId);
               break;
             case MessageType.REGISTER_DATASERIALIZERS:
-              handleRegisterDataSerializer(_message, eventId);
+              handleRegisterDataSerializer(clientMessage, eventId);
               break;
             case MessageType.CLIENT_MARKER:
-              handleMarker(_message);
+              handleMarker(clientMessage);
               break;
             case MessageType.INVALIDATE_REGION:
-              handleInvalidateRegion(_message);
+              handleInvalidateRegion(clientMessage);
               break;
             case MessageType.CLIENT_REGISTER_INTEREST:
-              handleRegisterInterest(_message);
+              handleRegisterInterest(clientMessage);
               break;
             case MessageType.CLIENT_UNREGISTER_INTEREST:
-              handleUnregisterInterest(_message);
+              handleUnregisterInterest(clientMessage);
               break;
             case MessageType.TOMBSTONE_OPERATION:
-              handleTombstoneOperation(_message);
+              handleTombstoneOperation(clientMessage);
               break;
             default:
               logger.warn(LocalizedMessage.create(
                   LocalizedStrings.CacheClientUpdater_0_RECEIVED_AN_UNSUPPORTED_MESSAGE_TYPE_1,
-                  new Object[] {this, MessageType.getString(_message.getMessageType())}));
+                  new Object[] {this, MessageType.getString(clientMessage.getMessageType())}));
               break;
           }
 
@@ -1689,7 +1702,7 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
           // likely to send pings...
           // and the ClientHealthMonitor will cause a disconnect
 
-        } catch (InterruptedIOException e) {
+        } catch (InterruptedIOException ignore) {
           // Per Sun's support web site, this exception seems to be peculiar
           // to Solaris, and may eventually not even be generated there.
           //
@@ -1697,62 +1710,59 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
           // isInterrupted() is false. (How very odd!)
           //
           // We regard it the same as an InterruptedException
-          this.endPointDied = true;
 
-          continueProcessing.set(false);// = false;
+          this.continueProcessing.set(false);
           if (isDebugEnabled) {
             logger.debug("InterruptedIOException");
           }
+
         } catch (IOException e) {
-          this.endPointDied = true;
           // Either the server went away, or we caught a closing condition.
           if (!quitting()) {
             // Server departed; print a message.
-            String message = ": Caught the following exception and will exit: ";
-            String errMessage = e.getMessage();
-            if (errMessage == null) {
-              errMessage = "";
-            }
-            ClientServerObserver bo = ClientServerObserverHolder.getInstance();
-            bo.beforeFailoverByCacheClientUpdater(this.location);
-            eManager.serverCrashed(this.endpoint);
+            ClientServerObserver clientServerObserver = ClientServerObserverHolder.getInstance();
+            clientServerObserver.beforeFailoverByCacheClientUpdater(this.location);
+            this.eManager.serverCrashed(this.endpoint);
             if (isDebugEnabled) {
-              logger.debug("" + message + e);
+              logger.debug("Caught the following exception and will exit", e);
             }
           } // !quitting
 
           // In any event, terminate this thread.
-          continueProcessing.set(false);// = false;
+          this.continueProcessing.set(false);
           if (isDebugEnabled) {
             logger.debug("terminated due to IOException");
           }
+
         } catch (Exception e) {
           if (!quitting()) {
-            this.endPointDied = true;
-            ClientServerObserver bo = ClientServerObserverHolder.getInstance();
-            bo.beforeFailoverByCacheClientUpdater(this.location);
-            eManager.serverCrashed(this.endpoint);
+            ClientServerObserver clientServerObserver = ClientServerObserverHolder.getInstance();
+            clientServerObserver.beforeFailoverByCacheClientUpdater(this.location);
+            this.eManager.serverCrashed(this.endpoint);
             String message = ": Caught the following exception and will exit: ";
             handleException(message, e);
           }
+
           // In any event, terminate this thread.
-          continueProcessing.set(false);// = false; // force termination
+          this.continueProcessing.set(false);// = false; // force termination
           if (isDebugEnabled) {
             logger.debug("CCU terminated due to Exception");
           }
+
         } finally {
-          _message.clear();
+          clientMessage.clear();
         }
       } // while
+
     } finally {
       if (isDebugEnabled) {
         logger.debug("has stopped and cleaning the helper ..");
       }
-      this.close(); // added to fixes some race conditions associated with 38382
+      close(); // added to fix some race conditions associated with 38382
       // this will make sure that if this thread dies without starting QueueMgr then it will start..
       // 1. above we ignore InterruptedIOException and this thread dies without informing QueueMgr
-      // 2. if there is some other race codition with continueProcessing flag
-      this.qManager.checkEndpoint(this, endpoint);
+      // 2. if there is some other race condition with continueProcessing flag
+      this.qManager.checkEndpoint(this, this.endpoint);
     }
   }
 
@@ -1785,12 +1795,11 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
    */
   private Object deserialize(byte[] serializedBytes) {
     Object deserializedObject = serializedBytes;
-    // This is a debugging method so ignore all exceptions like
-    // ClassNotFoundException
+    // This is a debugging method so ignore all exceptions like ClassNotFoundException
     try {
       DataInputStream dis = new DataInputStream(new ByteArrayInputStream(serializedBytes));
       deserializedObject = DataSerializer.readObject(dis);
-    } catch (Exception e) {
+    } catch (ClassNotFoundException | IOException ignore) {
     }
     return deserializedObject;
   }
@@ -1799,18 +1808,14 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
    * @return the local port of our {@link #socket}
    */
   protected int getLocalPort() {
-    return socket.getLocalPort();
+    return this.socket.getLocalPort();
   }
 
+  @Override
   public void onDisconnect(InternalDistributedSystem sys) {
     stopUpdater();
   }
 
-  /**
-   * true if the EndPoint represented by this updater thread has died.
-   */
-  private volatile boolean endPointDied = false;
-
   private void verifySocketBufferSize(int requestedBufferSize, int actualBufferSize, String type) {
     if (actualBufferSize < requestedBufferSize) {
       logger.info(LocalizedMessage.create(
@@ -1826,11 +1831,11 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
    * @since GemFire 5.7
    */
   public static class CCUStats implements MessageStats {
-    // static fields
+
     private static final StatisticsType type;
-    private final static int messagesBeingReceivedId;
-    private final static int messageBytesBeingReceivedId;
-    private final static int receivedBytesId;
+    private static final int messagesBeingReceivedId;
+    private static final int messageBytesBeingReceivedId;
+    private static final int receivedBytesId;
 
     static {
       StatisticsTypeFactory f = StatisticsTypeFactoryImpl.singleton();
@@ -1852,7 +1857,7 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
     // instance fields
     private final Statistics stats;
 
-    public CCUStats(DistributedSystem ids, ServerLocation location) {
+    CCUStats(DistributedSystem ids, ServerLocation location) {
       // no need for atomic since only a single thread will be writing these
       this.stats = ids.createStatistics(type, "CacheClientUpdater-" + location);
     }
@@ -1861,25 +1866,29 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
       this.stats.close();
     }
 
+    @Override
     public void incReceivedBytes(long v) {
       this.stats.incLong(receivedBytesId, v);
     }
 
+    @Override
     public void incSentBytes(long v) {
       // noop since we never send messages
     }
 
+    @Override
     public void incMessagesBeingReceived(int bytes) {
-      stats.incInt(messagesBeingReceivedId, 1);
+      this.stats.incInt(messagesBeingReceivedId, 1);
       if (bytes > 0) {
-        stats.incLong(messageBytesBeingReceivedId, bytes);
+        this.stats.incLong(messageBytesBeingReceivedId, bytes);
       }
     }
 
+    @Override
     public void decMessagesBeingReceived(int bytes) {
-      stats.incInt(messagesBeingReceivedId, -1);
+      this.stats.incInt(messagesBeingReceivedId, -1);
       if (bytes > 0) {
-        stats.incLong(messageBytesBeingReceivedId, -bytes);
+        this.stats.incLong(messageBytesBeingReceivedId, -bytes);
       }
     }
 
@@ -1893,7 +1902,8 @@ public class CacheClientUpdater extends Thread implements ClientUpdater, Disconn
     }
   }
 
+  @Override
   public boolean isProcessing() {
-    return continueProcessing.get();
+    return this.continueProcessing.get();
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/07efaa8e/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ChunkedMessage.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ChunkedMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ChunkedMessage.java
index be30061..39c2f3a 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ChunkedMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ChunkedMessage.java
@@ -152,7 +152,8 @@ public class ChunkedMessage extends Message {
 
   public void setLastChunkAndNumParts(boolean lastChunk, int numParts) {
     setLastChunk(lastChunk);
-    if (this.serverConnection != null && this.serverConnection.getClientVersion().compareTo(Version.GFE_65) >= 0) {
+    if (this.serverConnection != null
+        && this.serverConnection.getClientVersion().compareTo(Version.GFE_65) >= 0) {
       // we us e three bits for number of parts in last chunk byte
       // we us e three bits for number of parts in last chunk byte
       byte localLastChunk = (byte) (numParts << 5);
@@ -240,7 +241,8 @@ public class ChunkedMessage extends Message {
     int totalBytesRead = 0;
     do {
       int bytesRead = 0;
-      bytesRead = inputStream.read(cb.array(), totalBytesRead, CHUNK_HEADER_LENGTH - totalBytesRead);
+      bytesRead =
+          inputStream.read(cb.array(), totalBytesRead, CHUNK_HEADER_LENGTH - totalBytesRead);
       if (bytesRead == -1) {
         throw new EOFException(
             LocalizedStrings.ChunkedMessage_CHUNK_READ_ERROR_CONNECTION_RESET.toLocalizedString());

http://git-wip-us.apache.org/repos/asf/geode/blob/07efaa8e/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/Message.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/Message.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/Message.java
index 354ad0f..2ac6fea 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/Message.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/Message.java
@@ -84,7 +84,8 @@ public class Message {
   // Tentative workaround to avoid OOM stated in #46754.
   public static final ThreadLocal<Integer> MESSAGE_TYPE = new ThreadLocal<>();
 
-  public static final String MAX_MESSAGE_SIZE_PROPERTY = DistributionConfig.GEMFIRE_PREFIX + "client.max-message-size";
+  public static final String MAX_MESSAGE_SIZE_PROPERTY =
+      DistributionConfig.GEMFIRE_PREFIX + "client.max-message-size";
 
   static final int DEFAULT_MAX_MESSAGE_SIZE = 1073741824;
 
@@ -299,8 +300,8 @@ public class Message {
     } else {
       HeapDataOutputStream hdos = new HeapDataOutputStream(str);
       try {
-      this.messageModified = true;
-      part.setPartState(hdos, false);
+        this.messageModified = true;
+        part.setPartState(hdos, false);
       } finally {
         close(hdos);
       }
@@ -309,8 +310,8 @@ public class Message {
   }
 
   /*
-   * Adds a new part to this message that contains a {@code byte} array (as opposed to a
-   * serialized object).
+   * Adds a new part to this message that contains a {@code byte} array (as opposed to a serialized
+   * object).
    *
    * @see #addPart(byte[], boolean)
    */
@@ -378,7 +379,7 @@ public class Message {
     if (this.version.equals(Version.CURRENT)) {
       v = null;
     }
-    
+
     // create the HDOS with a flag telling it that it can keep any byte[] or ByteBuffers/ByteSources
     // passed to it.
     HeapDataOutputStream hdos = new HeapDataOutputStream(this.chunkSize, v, true);
@@ -399,12 +400,12 @@ public class Message {
     if (zipValues) {
       throw new UnsupportedOperationException("zipValues no longer supported");
     }
-    
+
     Version v = this.version;
     if (this.version.equals(Version.CURRENT)) {
       v = null;
     }
-    
+
     HeapDataOutputStream hdos = new HeapDataOutputStream(this.chunkSize, v);
     try {
       BlobHelper.serializeTo(o, hdos);
@@ -520,7 +521,8 @@ public class Message {
   }
 
   protected void packHeaderInfoForSending(int msgLen, boolean isSecurityHeader) {
-    // setting second bit of flags byte for client this is not require but this makes all changes easily at client side right now just see this bit and process security header
+    // setting second bit of flags byte for client this is not require but this makes all changes
+    // easily at client side right now just see this bit and process security header
     byte flagsByte = this.flags;
     if (isSecurityHeader) {
       flagsByte |= MESSAGE_HAS_SECURE_PART;
@@ -529,7 +531,7 @@ public class Message {
       flagsByte |= MESSAGE_IS_RETRY;
     }
     getCommBuffer().putInt(this.messageType).putInt(msgLen).putInt(this.numberOfParts)
-                   .putInt(this.transactionId).put(flagsByte);
+        .putInt(this.transactionId).put(flagsByte);
   }
 
   protected Part getSecurityPart() {
@@ -601,7 +603,7 @@ public class Message {
 
         if (msgLen > this.maxMessageSize) {
           throw new MessageTooLargeException("Message size (" + msgLen
-                                             + ") exceeds gemfire.client.max-message-size setting (" + this.maxMessageSize + ")");
+              + ") exceeds gemfire.client.max-message-size setting (" + this.maxMessageSize + ")");
         }
 
         commBuffer.clear();
@@ -673,7 +675,7 @@ public class Message {
   void fetchHeader() throws IOException {
     final ByteBuffer cb = getCommBuffer();
     cb.clear();
-    
+
     // messageType is invalidated here and can be used as an indicator
     // of problems reading the message
     this.messageType = MessageType.INVALID;
@@ -693,7 +695,7 @@ public class Message {
         }
       } while (cb.remaining() > 0);
       cb.flip();
-      
+
     } else {
       int hdr = 0;
       do {
@@ -728,7 +730,7 @@ public class Message {
       throw new IOException(LocalizedStrings.Message_INVALID_MESSAGE_TYPE_0_WHILE_READING_HEADER
           .toLocalizedString(type));
     }
-    
+
     int timeToWait = 0;
     if (this.serverConnection != null) {
       // Keep track of the fact that a message is being processed.
@@ -736,7 +738,7 @@ public class Message {
       timeToWait = this.serverConnection.getClientReadTimeout();
     }
     this.readHeader = true;
-    
+
     if (this.messageLimiter != null) {
       for (;;) {
         this.serverConnection.getCachedRegionHelper().checkCancelInProgress(null);
@@ -764,15 +766,13 @@ public class Message {
         }
       } // for
     }
-    
+
     if (len > 0) {
       if (this.maxIncomingMessageLength > 0 && len > this.maxIncomingMessageLength) {
         throw new IOException(LocalizedStrings.Message_MESSAGE_SIZE_0_EXCEEDED_MAX_LIMIT_OF_1
-            .toLocalizedString(new Object[] {
-              len, this.maxIncomingMessageLength
-            }));
+            .toLocalizedString(new Object[] {len, this.maxIncomingMessageLength}));
       }
-      
+
       if (this.dataLimiter != null) {
         for (;;) {
           if (this.serverConnection != null) {
@@ -840,7 +840,7 @@ public class Message {
     if (len > 0 && numParts <= 0 || len <= 0 && numParts > 0) {
       throw new IOException(
           LocalizedStrings.Message_PART_LENGTH_0_AND_NUMBER_OF_PARTS_1_INCONSISTENT
-              .toLocalizedString(new Object[] { len, numParts }));
+              .toLocalizedString(new Object[] {len, numParts}));
     }
 
     Integer msgType = MESSAGE_TYPE.get();
@@ -854,7 +854,7 @@ public class Message {
             + MessageType.getString(msgType) + " operation.");
       }
     }
-    
+
     setNumberOfParts(numParts);
     if (numParts <= 0) {
       return;
@@ -872,7 +872,8 @@ public class Message {
     int readSecurePart = checkAndSetSecurityPart();
 
     int bytesRemaining = len;
-    for (int i = 0; i < numParts + readSecurePart || readSecurePart == 1 && cb.remaining() > 0; i++) {
+    for (int i = 0; i < numParts + readSecurePart
+        || readSecurePart == 1 && cb.remaining() > 0; i++) {
       int bytesReadThisTime = readPartChunk(bytesRemaining);
       bytesRemaining -= bytesReadThisTime;
 
@@ -887,7 +888,7 @@ public class Message {
       int partLen = cb.getInt();
       byte partType = cb.get();
       byte[] partBytes = null;
-      
+
       if (partLen > 0) {
         partBytes = new byte[partLen];
         int alreadyReadBytes = cb.remaining();
@@ -897,7 +898,7 @@ public class Message {
           }
           cb.get(partBytes, 0, alreadyReadBytes);
         }
-        
+
         // now we need to read partLen - alreadyReadBytes off the wire
         int off = alreadyReadBytes;
         int remaining = partLen - off;
@@ -965,20 +966,20 @@ public class Message {
       // we already have the next part header in commBuffer so just return
       return 0;
     }
-    
+
     if (commBuffer.position() != 0) {
       commBuffer.compact();
     } else {
       commBuffer.position(commBuffer.limit());
       commBuffer.limit(commBuffer.capacity());
     }
-    
+
     if (this.serverConnection != null) {
       // Keep track of the fact that we are making progress
       this.serverConnection.updateProcessingMessage();
     }
     int bytesRead = 0;
-    
+
     if (this.socketChannel != null) {
       int remaining = commBuffer.remaining();
       if (remaining > bytesRemaining) {
@@ -1006,7 +1007,7 @@ public class Message {
         bytesToRead = bytesRemaining;
       }
       int pos = commBuffer.position();
-      
+
       while (bytesToRead > 0) {
         int res = this.inputStream.read(commBuffer.array(), pos, bytesToRead);
         if (res != -1) {
@@ -1022,7 +1023,7 @@ public class Message {
                   .toLocalizedString());
         }
       }
-      
+
       commBuffer.position(pos);
     }
     commBuffer.flip();

http://git-wip-us.apache.org/repos/asf/geode/blob/07efaa8e/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerConnection.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerConnection.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerConnection.java
index dfda14f..485ccae 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerConnection.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/ServerConnection.java
@@ -723,7 +723,10 @@ public class ServerConnection implements Runnable {
     ThreadState threadState = null;
     try {
       if (msg != null) {
-        // Since this thread is not interrupted when the cache server is shutdown, test again after a message has been read. This is a bit of a hack. I think this thread should be interrupted, but currently AcceptorImpl doesn't keep track of the threads that it launches.
+        // Since this thread is not interrupted when the cache server is shutdown, test again after
+        // a message has been read. This is a bit of a hack. I think this thread should be
+        // interrupted, but currently AcceptorImpl doesn't keep track of the threads that it
+        // launches.
         if (!this.processMessages || (crHelper.isShutdown())) {
           if (logger.isDebugEnabled()) {
             logger.debug("{} ignoring message of type {} from client {} due to shutdown.",