You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by kl...@apache.org on 2015/12/11 23:05:34 UTC

[01/50] [abbrv] incubator-geode git commit: Updating compiled website

Repository: incubator-geode
Updated Branches:
  refs/heads/feature/GEODE-291 0182de3b4 -> 9da3f61ca


Updating compiled website


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/c41f98c6
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/c41f98c6
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/c41f98c6

Branch: refs/heads/feature/GEODE-291
Commit: c41f98c6230517847467c42cdef062327fbd673b
Parents: a3366e3
Author: William Markito <wm...@pivotal.io>
Authored: Wed Nov 25 11:50:03 2015 -0800
Committer: William Markito <wm...@pivotal.io>
Committed: Wed Nov 25 11:50:03 2015 -0800

----------------------------------------------------------------------
 gemfire-site/content/index.html | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c41f98c6/gemfire-site/content/index.html
----------------------------------------------------------------------
diff --git a/gemfire-site/content/index.html b/gemfire-site/content/index.html
index 7ddf2c5..de8327d 100644
--- a/gemfire-site/content/index.html
+++ b/gemfire-site/content/index.html
@@ -82,11 +82,12 @@
             </div>
 
             <div class="btn-wrapper">
-                <iframe class="social-btn" src="/static/github-btn.html?user=apache&repo=incubator-geode&type=watch&size=large" allowtransparency="true" frameborder="0" scrolling="0" width="85" height="30"></iframe>
-                <iframe class="social-btn" src="/static/github-btn.html?user=apache&repo=incubator-geode&type=fork&size=large" allowtransparency="true" frameborder="0" scrolling="0" width="85" height="30"></iframe>
-                <!--<p><span class="or">or</span> <a href="#">Download Geode</a></p>-->
+                <!-- Place this tag where you want the button to render. -->
+              <a class="github-button" href="https://github.com/apache/incubator-geode" data-icon="octicon-star" data-style="mega" data-count-href="/apache/incubator-geode/stargazers" data-count-api="/repos/apache/incubator-geode#stargazers_count" data-count-aria-label="# stargazers on GitHub" aria-label="Star apache/incubator-geode on GitHub">Star</a>
+              <a class="github-button" href="https://github.com/apache/incubator-geode/fork" data-icon="octicon-repo-forked" data-style="mega" data-count-href="/apache/incubator-geode/network" data-count-api="/repos/apache/incubator-geode#forks_count" data-count-aria-label="# forks on GitHub" aria-label="Fork apache/incubator-geode on GitHub">Fork</a>
+              <a class="github-button" href="https://github.com/apache/incubator-geode" data-icon="octicon-eye" data-style="mega" data-count-href="/apache/incubator-geode/watchers" data-count-api="/repos/apache/incubator-geode#subscribers_count" data-count-aria-label="# watchers on GitHub" aria-label="Watch apache/incubator-geode on GitHub">Watch</a>
+              <!--<p><span class="or">or</span> <a href="#">Download Geode</a></p>-->
             </div>
-
         </div>
     </div>
 </section>
@@ -172,6 +173,8 @@
 Commercially available as GemFireâ„¢, the technology was first widely deployed in the financial sector as the transactional, low-latency data engine used
 in Wall Street trading platforms.
 Today Apache Geode is used by over 600 enterprise customers for high-scale business applications that must meet low latency and 24x7 availability requirements.</p>
+
+                <p>This project is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
             </div>
             <!--
             <div class="col-md-4 text-left">


[50/50] [abbrv] incubator-geode git commit: Merge remote-tracking branch 'origin/develop' into feature/GEODE-291

Posted by kl...@apache.org.
Merge remote-tracking branch 'origin/develop' into feature/GEODE-291


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/9da3f61c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/9da3f61c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/9da3f61c

Branch: refs/heads/feature/GEODE-291
Commit: 9da3f61ca8ceed4ca350e4caf277a542d1d354c9
Parents: 0182de3 949507f
Author: Kirk Lund <kl...@pivotal.io>
Authored: Thu Dec 10 15:56:22 2015 -0800
Committer: Kirk Lund <kl...@pivotal.io>
Committed: Thu Dec 10 15:56:22 2015 -0800

----------------------------------------------------------------------
 build.gradle                                    |   39 +-
 gemfire-assembly/build.gradle                   |   30 +
 gemfire-assembly/src/main/dist/bin/gfsh         |   14 +
 .../src/main/dist/bin/gfsh-completion.bash      |   15 +
 gemfire-assembly/src/main/dist/bin/gfsh.bat     |   14 +
 .../LauncherLifecycleCommandsDUnitTest.java     | 1005 +++++++++
 .../LauncherLifecycleCommandsJUnitTest.java     |  625 ++++++
 .../SharedConfigurationEndToEndDUnitTest.java   |  443 ++++
 gemfire-common/build.gradle                     |   17 +
 gemfire-core/build.gradle                       |   17 +
 gemfire-core/src/jca/ra.xml                     |   17 +-
 .../DistributedSystemHealthEvaluator.java       |    2 +-
 .../client/doc-files/example-client-cache.xml   |   16 +
 .../gemfire/cache/doc-files/example-cache.xml   |   16 +
 .../gemfire/cache/doc-files/example2-cache.xml  |   17 +
 .../gemfire/cache/doc-files/example3-cache.xml  |   16 +
 .../cache/partition/PartitionManager.java       |  377 ----
 .../cache/query/internal/parse/fixantlr.sh      |   15 +
 .../gemfire/cache/query/internal/parse/oql.g    |   17 +
 .../gemfire/distributed/internal/DMStats.java   |    1 +
 .../internal/DistributionAdvisor.java           |    2 +-
 .../internal/DistributionManager.java           |   24 +-
 .../internal/DistributionMessage.java           |    2 +-
 .../distributed/internal/DistributionStats.java |    3 +
 .../internal/InternalDistributedSystem.java     |   19 +-
 .../internal/LonerDistributionManager.java      |    2 +
 .../internal/MembershipListener.java            |    3 +-
 .../distributed/internal/ProductUseLog.java     |    2 +-
 .../distributed/internal/ReplyProcessor21.java  |    4 +-
 .../internal/direct/DirectChannelListener.java  |   16 +
 .../internal/locks/DLockGrantor.java            |    2 +-
 .../DistributedMembershipListener.java          |    3 +-
 .../internal/membership/MemberAttributes.java   |  131 +-
 .../internal/membership/NetView.java            |   19 +
 .../internal/membership/gms/GMSMember.java      |    8 +
 .../internal/membership/gms/ServiceConfig.java  |   21 +-
 .../internal/membership/gms/Services.java       |   12 +-
 .../internal/membership/gms/SuspectMember.java  |    9 +-
 .../membership/gms/auth/GMSAuthenticator.java   |    2 +-
 .../membership/gms/fd/GMSHealthMonitor.java     |   38 +-
 .../membership/gms/interfaces/Service.java      |    3 +-
 .../gms/locator/FindCoordinatorRequest.java     |   33 +
 .../membership/gms/membership/GMSJoinLeave.java |  203 +-
 .../gms/messages/HeartbeatMessage.java          |    2 +-
 .../gms/messages/HeartbeatRequestMessage.java   |    2 +-
 .../gms/messages/InstallViewMessage.java        |    2 +-
 .../gms/messages/JoinResponseMessage.java       |   10 +-
 .../membership/gms/messages/ViewAckMessage.java |    2 +-
 .../gms/messenger/AddressManager.java           |   21 +-
 .../membership/gms/messenger/GMSPingPonger.java |   22 +-
 .../membership/gms/messenger/JGAddress.java     |   23 +-
 .../gms/messenger/JGroupsMessenger.java         |  387 ++--
 .../membership/gms/messenger/Transport.java     |    2 +-
 .../gms/mgr/GMSMembershipManager.java           |   27 +-
 .../internal/tcpserver/TcpServer.java           |    2 +-
 .../gemfire/internal/AbstractConfig.java        |    4 -
 .../admin/remote/RemoteGfManagerAgent.java      |    2 +-
 .../internal/cache/DistributedRegion.java       |    2 +-
 .../internal/cache/InitialImageFlowControl.java |    2 +-
 .../internal/cache/PRHARedundancyProvider.java  |    4 +-
 .../internal/cache/PartitionedRegion.java       |    2 +-
 .../internal/cache/PartitionedRegionHelper.java |    2 +-
 .../cache/SearchLoadAndWriteProcessor.java      |    2 +-
 .../gemfire/internal/cache/TXCommitMessage.java |    2 +-
 .../internal/cache/TXFarSideCMTracker.java      |    2 +-
 .../gemfire/internal/cache/TXManagerImpl.java   |    2 +-
 .../PartitionedRegionRebalanceOp.java           |    2 +-
 .../cache/persistence/BackupManager.java        |    2 +-
 .../persistence/PersistenceAdvisorImpl.java     |    2 +-
 .../persistence/PersistentMemberManager.java    |    2 +-
 .../cache/versions/RegionVersionVector.java     |    2 +-
 .../cache/xmlcache/GeodeEntityResolver.java     |    8 +-
 .../internal/i18n/ParentLocalizedStrings.java   |    4 +-
 .../gemfire/internal/offheap/DataAsAddress.java |   14 +-
 .../offheap/OffHeapRegionEntryHelper.java       |   51 +-
 .../internal/offheap/OffHeapStorage.java        |   32 +-
 .../internal/offheap/RefCountChangeInfo.java    |   43 +-
 .../offheap/SimpleMemoryAllocatorImpl.java      |   22 +-
 .../gemfire/internal/tcp/Connection.java        |    6 +-
 .../management/internal/FederatingManager.java  |    5 +-
 .../internal/ManagementMembershipListener.java  |    4 +-
 .../internal/SystemManagementService.java       |    4 +-
 .../internal/beans/MBeanAggregator.java         |    2 +-
 .../tools/gfsh/app/windowsbindings.properties   |   15 +
 .../partition/PartitionManagerDUnitTest.java    |  443 ----
 .../DistributedMulticastRegionDUnitTest.java    |  197 ++
 .../gemfire/cache30/ReconnectDUnitTest.java     |    2 +-
 .../gemfire/distributed/LocatorDUnitTest.java   |    9 +-
 .../internal/DistributionManagerDUnitTest.java  |   44 +-
 .../membership/MembershipJUnitTest.java         |  232 +-
 .../internal/membership/NetViewJUnitTest.java   |   82 +-
 .../membership/gms/GMSMemberJUnitTest.java      |   16 +
 .../membership/gms/MembershipManagerHelper.java |    1 +
 .../gms/fd/GMSHealthMonitorJUnitTest.java       |  110 +-
 .../gms/membership/GMSJoinLeaveJUnitTest.java   |  150 +-
 .../messenger/JGroupsMessengerJUnitTest.java    |  481 +++-
 .../gms/mgr/GMSMembershipManagerJUnitTest.java  |   82 +-
 .../internal/AbstractConfigJUnitTest.java       |  114 +
 .../internal/DataSerializableJUnitTest.java     |   35 +-
 .../fixed/FixedPartitioningTestBase.java        |   83 -
 ...ngWithColocationAndPersistenceDUnitTest.java |  106 -
 .../InstantiatorPropagationDUnitTest.java       | 1777 +++++++++++++++
 .../cache/wan/AsyncEventQueueTestBase.java      | 1666 ++++++++++++++
 .../asyncqueue/AsyncEventListenerDUnitTest.java | 1920 ++++++++++++++++
 .../AsyncEventListenerOffHeapDUnitTest.java     |   33 +
 .../AsyncEventQueueStatsDUnitTest.java          |  320 +++
 .../ConcurrentAsyncEventQueueDUnitTest.java     |  336 +++
 ...ncurrentAsyncEventQueueOffHeapDUnitTest.java |   32 +
 .../CommonParallelAsyncEventQueueDUnitTest.java |   59 +
 ...ParallelAsyncEventQueueOffHeapDUnitTest.java |   32 +
 .../offheap/AbstractStoredObjectTestBase.java   |  203 ++
 .../offheap/DataAsAddressJUnitTest.java         |  368 +++
 .../offheap/OffHeapStorageJUnitTest.java        |  202 ++
 .../offheap/RefCountChangeInfoJUnitTest.java    |  159 ++
 .../internal/cli/GfshParserJUnitTest.java       |    2 +
 .../management/internal/cli/HeadlessGfsh.java   |  376 ++++
 .../internal/cli/HeadlessGfshJUnitTest.java     |   87 +
 .../management/internal/cli/ResultHandler.java  |   23 +
 .../internal/cli/TableBuilderJUnitTest.java     |  183 ++
 .../cli/commands/CliCommandTestBase.java        |  560 +++++
 .../cli/commands/ConfigCommandsDUnitTest.java   |  497 +++++
 ...eateAlterDestroyRegionCommandsDUnitTest.java | 1148 ++++++++++
 .../cli/commands/DeployCommandsDUnitTest.java   |  480 ++++
 .../commands/DiskStoreCommandsDUnitTest.java    | 1154 ++++++++++
 .../cli/commands/FunctionCommandsDUnitTest.java |  593 +++++
 .../commands/GemfireDataCommandsDUnitTest.java  | 2087 ++++++++++++++++++
 ...WithCacheLoaderDuringCacheMissDUnitTest.java |  371 ++++
 .../cli/commands/IndexCommandsDUnitTest.java    |  817 +++++++
 ...stAndDescribeDiskStoreCommandsDUnitTest.java |  197 ++
 .../ListAndDescribeRegionDUnitTest.java         |  320 +++
 .../cli/commands/ListIndexCommandDUnitTest.java |  672 ++++++
 .../cli/commands/MemberCommandsDUnitTest.java   |  286 +++
 .../MiscellaneousCommandsDUnitTest.java         |  492 +++++
 ...laneousCommandsExportLogsPart1DUnitTest.java |  139 ++
 ...laneousCommandsExportLogsPart2DUnitTest.java |  148 ++
 ...laneousCommandsExportLogsPart3DUnitTest.java |  150 ++
 ...laneousCommandsExportLogsPart4DUnitTest.java |  141 ++
 .../cli/commands/QueueCommandsDUnitTest.java    |  385 ++++
 .../SharedConfigurationCommandsDUnitTest.java   |  338 +++
 .../cli/commands/ShellCommandsDUnitTest.java    |  365 +++
 .../cli/commands/ShowDeadlockDUnitTest.java     |  271 +++
 .../cli/commands/ShowMetricsDUnitTest.java      |  347 +++
 .../cli/commands/ShowStackTraceDUnitTest.java   |  149 ++
 .../cli/commands/UserCommandsDUnitTest.java     |  164 ++
 .../src/test/java/dunit/RemoteDUnitVMIF.java    |    2 +
 .../src/test/java/dunit/standalone/ChildVM.java |   11 +-
 .../java/dunit/standalone/DUnitLauncher.java    |   24 +
 .../java/dunit/standalone/ProcessManager.java   |   14 +-
 .../java/dunit/standalone/RemoteDUnitVM.java    |    7 +-
 .../ClientCacheFactoryJUnitTest_single_pool.xml |   17 +
 .../gemfire/cache/query/dunit/IndexCreation.xml |   17 +
 .../functional/index-creation-with-eviction.xml |   17 +
 .../index-creation-without-eviction.xml         |   17 +
 .../functional/index-recovery-overflow.xml      |   17 +
 .../query/internal/index/cachequeryindex.xml    |   17 +
 .../internal/index/cachequeryindexwitherror.xml |   17 +
 .../cache/query/partitioned/PRIndexCreation.xml |   17 +
 .../gemfire/cache30/attributesUnordered.xml     |   17 +
 .../com/gemstone/gemfire/cache30/badFloat.xml   |   17 +
 .../com/gemstone/gemfire/cache30/badInt.xml     |   17 +
 .../gemfire/cache30/badKeyConstraintClass.xml   |   17 +
 .../com/gemstone/gemfire/cache30/badScope.xml   |   17 +
 .../com/gemstone/gemfire/cache30/bug44710.xml   |   17 +
 .../gemfire/cache30/callbackNotDeclarable.xml   |   17 +
 .../gemfire/cache30/callbackWithException.xml   |   17 +
 .../com/gemstone/gemfire/cache30/coLocation.xml |   17 +
 .../gemstone/gemfire/cache30/coLocation3.xml    |   17 +
 .../com/gemstone/gemfire/cache30/ewtest.xml     |   17 +
 .../cache30/examples_3_0/example-cache.xml      |   16 +
 .../cache30/examples_4_0/example-cache.xml      |   16 +
 .../gemfire/cache30/loaderNotLoader.xml         |   17 +
 .../com/gemstone/gemfire/cache30/malformed.xml  |   17 +
 .../gemfire/cache30/namedAttributes.xml         |   17 +
 .../gemfire/cache30/partitionedRegion.xml       |   17 +
 .../gemfire/cache30/partitionedRegion51.xml     |   17 +
 .../gemstone/gemfire/cache30/sameRootRegion.xml |   17 +
 .../gemstone/gemfire/cache30/sameSubregion.xml  |   17 +
 .../gemfire/cache30/unknownNamedAttributes.xml  |   17 +
 .../internal/SharedConfigurationJUnitTest.xml   |   17 +
 .../internal/cache/BackupJUnitTest.cache.xml    |   17 +
 .../internal/cache/DiskRegCacheXmlJUnitTest.xml |   16 +
 .../cache/PartitionRegionCacheExample1.xml      |   17 +
 .../cache/PartitionRegionCacheExample2.xml      |   17 +
 .../incorrect_bytes_threshold.xml               |   17 +
 .../faultyDiskXMLsForTesting/incorrect_dir.xml  |   17 +
 .../incorrect_dir_size.xml                      |   17 +
 .../incorrect_max_oplog_size.xml                |   17 +
 .../incorrect_roll_oplogs_value.xml             |   17 +
 .../incorrect_sync_value.xml                    |   17 +
 .../incorrect_time_interval.xml                 |   17 +
 .../mixed_diskstore_diskdir.xml                 |   17 +
 .../mixed_diskstore_diskwriteattrs.xml          |   17 +
 .../tier/sockets/RedundancyLevelJUnitTest.xml   |   16 +
 ...testDTDFallbackWithNonEnglishLocal.cache.xml |   17 +
 .../gemstone/gemfire/internal/jta/cachejta.xml  |   17 +
 ...dNewNodeJUnitTest.testAddNewNodeNewNamed.xml |   17 +
 ...ewNodeJUnitTest.testAddNewNodeNewUnnamed.xml |   17 +
 ...itTest.testAddNewNodeNewUnnamedExtension.xml |   17 +
 ...NodeJUnitTest.testAddNewNodeReplaceNamed.xml |   17 +
 ...deJUnitTest.testAddNewNodeReplaceUnnamed.xml |   17 +
 ...st.testAddNewNodeReplaceUnnamedExtension.xml |   17 +
 ...sAddNewNodeJUnitTest.testDeleteNodeNamed.xml |   17 +
 ...ddNewNodeJUnitTest.testDeleteNodeUnnamed.xml |   17 +
 ...JUnitTest.testDeleteNodeUnnamedExtension.xml |   17 +
 .../utils/XmlUtilsAddNewNodeJUnitTest.xml       |   17 +
 ...Test.testBuildSchemaLocationMapAttribute.xml |   17 +
 ...testBuildSchemaLocationMapEmptyAttribute.xml |   17 +
 ...ationMapMapOfStringListOfStringAttribute.xml |   17 +
 ....testBuildSchemaLocationMapNullAttribute.xml |   17 +
 ...XmlUtilsJUnitTest.testQuerySingleElement.xml |   17 +
 .../src/test/resources/jta/cachejta.xml         |   17 +
 .../resources/spring/spring-gemfire-context.xml |   17 +
 gemfire-lucene/build.gradle                     |   17 +
 .../lucene/internal/xml/LuceneXmlConstants.java |    2 +-
 .../geode.apache.org/lucene/lucene-1.0.xsd      |   57 +
 .../lucene/lucene-1.0.xsd                       |   58 -
 ...erIntegrationJUnitTest.createIndex.cache.xml |   23 +-
 ...serIntegrationJUnitTest.parseIndex.cache.xml |   23 +-
 gemfire-rebalancer/build.gradle                 |   17 +
 gemfire-site/build.gradle                       |   37 -
 .../content/bootstrap/bootstrap.min.css         |    9 +
 gemfire-site/content/community/index.html       |  629 ++++++
 .../content/css/bootflat-extensions.css         |  356 +++
 gemfire-site/content/css/bootflat-square.css    |   69 +
 gemfire-site/content/css/bootflat.css           | 1559 +++++++++++++
 gemfire-site/content/css/font-awesome.min.css   |  405 ++++
 gemfire-site/content/css/geode-site.css         | 1554 +++++++++++++
 gemfire-site/content/css/usergrid-site.css      | 1554 +++++++++++++
 gemfire-site/content/favicon.ico                |  Bin 0 -> 20805 bytes
 gemfire-site/content/font/FontAwesome.otf       |  Bin 0 -> 61896 bytes
 .../content/font/fontawesome-webfont-eot.eot    |  Bin 0 -> 37405 bytes
 .../content/font/fontawesome-webfont-svg.svg    |  399 ++++
 .../content/font/fontawesome-webfont-ttf.ttf    |  Bin 0 -> 79076 bytes
 .../content/font/fontawesome-webfont-woff.woff  |  Bin 0 -> 43572 bytes
 gemfire-site/content/img/apache_geode_logo.png  |  Bin 0 -> 23616 bytes
 .../content/img/apache_geode_logo_white.png     |  Bin 0 -> 22695 bytes
 .../img/apache_geode_logo_white_small.png       |  Bin 0 -> 52948 bytes
 gemfire-site/content/img/check_flat/default.png |  Bin 0 -> 25851 bytes
 gemfire-site/content/img/egg-logo.png           |  Bin 0 -> 9938 bytes
 gemfire-site/content/img/github.png             |  Bin 0 -> 8936 bytes
 gemfire-site/content/index.html                 |  295 +++
 gemfire-site/content/js/bootstrap.min.js        |    8 +
 gemfire-site/content/js/head.js                 |  708 ++++++
 gemfire-site/content/js/html5shiv.js            |    8 +
 gemfire-site/content/js/jquery-1.10.1.min.js    |    6 +
 gemfire-site/content/js/jquery.icheck.js        |  397 ++++
 gemfire-site/content/js/respond.min.js          |    6 +
 gemfire-site/content/js/usergrid-site.js        |   50 +
 gemfire-site/content/releases/index.html        |  239 ++
 gemfire-site/content/static/github-btn.html     |    2 +
 gemfire-site/src/jbake.zip                      |  Bin 207030 -> 0 bytes
 gemfire-site/src/jbake/assets/favicon.ico       |  Bin 1150 -> 0 bytes
 .../src/jbake/assets/images/bg-billboard.png    |  Bin 25538 -> 0 bytes
 .../jbake/assets/images/bg-crystals-home.png    |  Bin 41684 -> 0 bytes
 .../assets/images/bg-crystals-secondary.png     |  Bin 26046 -> 0 bytes
 .../src/jbake/assets/images/egg-logo1.png       |  Bin 8626 -> 0 bytes
 .../jbake/assets/images/events/apachecon.png    |  Bin 4528 -> 0 bytes
 .../src/jbake/assets/images/events/oscon.png    |  Bin 26024 -> 0 bytes
 .../src/jbake/assets/images/geode-banner.png    |  Bin 7916 -> 0 bytes
 .../assets/images/logo-apache-geode-white.png   |  Bin 2336 -> 0 bytes
 .../jbake/assets/images/logo-apache-geode.png   |  Bin 3200 -> 0 bytes
 .../jbake/assets/images/logo-geode-white.png    |  Bin 1620 -> 0 bytes
 .../src/jbake/assets/images/logo-geode.png      |  Bin 3345 -> 0 bytes
 .../src/jbake/assets/javascripts/master.js      |  121 -
 .../src/jbake/assets/javascripts/scale.fix.js   |   20 -
 .../jbake/assets/stylesheets/pygment_trac.css   |   60 -
 .../src/jbake/assets/stylesheets/styles.css     |  319 ---
 gemfire-site/src/jbake/content/404.md           |    9 -
 gemfire-site/src/jbake/content/README.md        |   36 -
 gemfire-site/src/jbake/content/about/index.md   |   31 -
 .../src/jbake/content/community/index.md        |   82 -
 .../src/jbake/content/contribute/index.md       |   47 -
 gemfire-site/src/jbake/content/docs/index.md    |   23 -
 .../src/jbake/content/download/index.md         |   13 -
 .../src/jbake/content/getting-started/index.md  |   88 -
 gemfire-site/src/jbake/content/index.md         |   76 -
 gemfire-site/src/jbake/jbake.properties         |    6 -
 gemfire-site/src/jbake/templates/page.groovy    |   80 -
 gemfire-site/website/.gitignore                 |    1 +
 gemfire-site/website/README.md                  |   54 +
 gemfire-site/website/Rules                      |   52 +
 gemfire-site/website/build.sh                   |    1 +
 .../website/content/bootstrap/bootstrap.min.css |    9 +
 .../website/content/community/index.html        |  286 +++
 .../website/content/css/bootflat-extensions.css |  356 +++
 .../website/content/css/bootflat-square.css     |   69 +
 gemfire-site/website/content/css/bootflat.css   | 1559 +++++++++++++
 .../website/content/css/font-awesome.min.css    |  405 ++++
 gemfire-site/website/content/css/geode-site.css | 1554 +++++++++++++
 gemfire-site/website/content/favicon.ico        |  Bin 0 -> 20805 bytes
 .../website/content/font/FontAwesome.otf        |  Bin 0 -> 61896 bytes
 .../content/font/fontawesome-webfont-eot.eot    |  Bin 0 -> 37405 bytes
 .../content/font/fontawesome-webfont-svg.svg    |  399 ++++
 .../content/font/fontawesome-webfont-ttf.ttf    |  Bin 0 -> 79076 bytes
 .../content/font/fontawesome-webfont-woff.woff  |  Bin 0 -> 43572 bytes
 .../website/content/img/apache_geode_logo.png   |  Bin 0 -> 23616 bytes
 .../content/img/apache_geode_logo_white.png     |  Bin 0 -> 22695 bytes
 .../img/apache_geode_logo_white_small.png       |  Bin 0 -> 52948 bytes
 .../website/content/img/check_flat/default.png  |  Bin 0 -> 25851 bytes
 gemfire-site/website/content/img/egg-logo.png   |  Bin 0 -> 9938 bytes
 gemfire-site/website/content/img/github.png     |  Bin 0 -> 8936 bytes
 gemfire-site/website/content/index.html         |  124 ++
 .../website/content/js/bootstrap.min.js         |    8 +
 gemfire-site/website/content/js/head.js         |  708 ++++++
 gemfire-site/website/content/js/html5shiv.js    |    8 +
 .../website/content/js/jquery-1.10.1.min.js     |    6 +
 .../website/content/js/jquery.icheck.js         |  397 ++++
 gemfire-site/website/content/js/respond.min.js  |    6 +
 .../website/content/js/usergrid-site.js         |   50 +
 .../website/content/releases/index.html         |   65 +
 gemfire-site/website/layouts/community.html     |    1 +
 gemfire-site/website/layouts/default.html       |   44 +
 gemfire-site/website/layouts/docs.html          |    1 +
 gemfire-site/website/layouts/footer.html        |   96 +
 gemfire-site/website/layouts/header.html        |  231 ++
 gemfire-site/website/lib/default.rb             |   43 +
 gemfire-site/website/lib/helpers_.rb            |    0
 gemfire-site/website/lib/pandoc.template        |    4 +
 gemfire-site/website/nanoc.yaml                 |   77 +
 gemfire-site/website/run.sh                     |    1 +
 gemfire-site/website/utilities/map-markers.rb   |   58 +
 gemfire-site/website/utilities/markers.txt      |  440 ++++
 .../website/utilities/snapshot-apigee.rb        |   71 +
 gemfire-site/website/utilities/usergrid.csv     |  290 +++
 .../src/it/resources/test-regions.xml           |   17 +
 .../src/it/resources/test-retrieve-regions.xml  |   17 +
 gemfire-web-api/build.gradle                    |   17 +
 gemfire-web/build.gradle                        |   17 +
 gradle.properties                               |   14 +
 gradle/dependency-versions.properties           |   15 +
 gradle/rat.gradle                               |  133 ++
 settings.gradle                                 |   16 +
 332 files changed, 41615 insertions(+), 2986 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9da3f61c/build.gradle
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9da3f61c/gradle/dependency-versions.properties
----------------------------------------------------------------------


[28/50] [abbrv] incubator-geode git commit: GEODE-53 - Removing unused images and adding missing incubating references

Posted by kl...@apache.org.
GEODE-53 - Removing unused images and adding missing incubating references


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/1f193af7
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/1f193af7
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/1f193af7

Branch: refs/heads/feature/GEODE-291
Commit: 1f193af7016e125059022e76d7cd053339bc309a
Parents: cd75b1f
Author: William Markito <wm...@pivotal.io>
Authored: Tue Dec 8 17:28:18 2015 -0800
Committer: William Markito <wm...@pivotal.io>
Committed: Tue Dec 8 17:28:18 2015 -0800

----------------------------------------------------------------------
 gemfire-site/content/community/index.html         |   2 +-
 gemfire-site/content/index.html                   |   4 ++--
 gemfire-site/website/content/community/index.html |   2 +-
 gemfire-site/website/content/img/intellij.png     | Bin 9199 -> 0 bytes
 gemfire-site/website/content/img/yourkit.jpeg     | Bin 7763 -> 0 bytes
 gemfire-site/website/content/index.html           |   2 +-
 6 files changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1f193af7/gemfire-site/content/community/index.html
----------------------------------------------------------------------
diff --git a/gemfire-site/content/community/index.html b/gemfire-site/content/community/index.html
index 3c612b4..d1bac78 100644
--- a/gemfire-site/content/community/index.html
+++ b/gemfire-site/content/community/index.html
@@ -244,7 +244,7 @@
     	<div class="row">
         <br/>
 	    	<h2>Join Our Community of Contributors!</h2>
-        <p>The Apache Geode team welcomes contributors who want to support the Geode technology. Our community builds everything from this website, from the Geode code to documentation and best practices information.</p>
+        <p>The Apache Geode (incubating) team welcomes contributors who want to support the Geode technology. Our community builds everything from this website, from the Geode code to documentation and best practices information.</p>
 
         <p>We especially welcome additions and corrections to the documentation, wiki, and website to improve the user experience. Bug reports and fixes and additions to the Apache Geode code are welcome. Helping users learn best practices also earns good karma in our community.</p>
 		</div>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1f193af7/gemfire-site/content/index.html
----------------------------------------------------------------------
diff --git a/gemfire-site/content/index.html b/gemfire-site/content/index.html
index a9d3425..c0d92a1 100644
--- a/gemfire-site/content/index.html
+++ b/gemfire-site/content/index.html
@@ -79,7 +79,7 @@
                   Take advantage of Apache Geode's unique technology that blends advanced techniques for data replication, partitioning and distributed processing.
 
                   <br/><br/>
-                  Apache Geode provides a database-like consistency model, reliable transaction processing and a shared-nothing architecture to maintain very low latency performance with high concurrency processing.<br/></p>
+                  Apache Geode (incubating) provides a database-like consistency model, reliable transaction processing and a shared-nothing architecture to maintain very low latency performance with high concurrency processing.<br/></p>
             </div>
 
             <div class="btn-wrapper">
@@ -166,7 +166,7 @@
         <div class="row">
             <div class="col-md-12 text-left">
                 <h2>About the Project</h2>
-                <p>Apache Geode is a data management platform that provides real-time, consistent access to data-intensive applications throughout widely distributed cloud architectures.</p>
+                <p>Apache Geode (incubating) is a data management platform that provides real-time, consistent access to data-intensive applications throughout widely distributed cloud architectures.</p>
 
                 <p>By pooling memory, CPU, network resources, and (optionally) local disk across multiple processes to manage application objects and behavior, it uses dynamic replication and data partitioning techniques to implement high availability, improved performance, scalability, and fault tolerance. In addition to being a distributed data container, Apache Geode is an in-memory data management system that provides reliable asynchronous event notifications and guaranteed message delivery.</p>
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1f193af7/gemfire-site/website/content/community/index.html
----------------------------------------------------------------------
diff --git a/gemfire-site/website/content/community/index.html b/gemfire-site/website/content/community/index.html
index ec58600..affc8e7 100644
--- a/gemfire-site/website/content/community/index.html
+++ b/gemfire-site/website/content/community/index.html
@@ -6,7 +6,7 @@
     	<div class="row">
         <br/>
 	    	<h2>Join Our Community of Contributors!</h2>
-        <p>The Apache Geode team welcomes contributors who want to support the Geode technology. Our community builds everything from this website, from the Geode code to documentation and best practices information.</p>
+        <p>The Apache Geode (incubating) team welcomes contributors who want to support the Geode technology. Our community builds everything from this website, from the Geode code to documentation and best practices information.</p>
 
         <p>We especially welcome additions and corrections to the documentation, wiki, and website to improve the user experience. Bug reports and fixes and additions to the Apache Geode code are welcome. Helping users learn best practices also earns good karma in our community.</p>
 		</div>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1f193af7/gemfire-site/website/content/img/intellij.png
----------------------------------------------------------------------
diff --git a/gemfire-site/website/content/img/intellij.png b/gemfire-site/website/content/img/intellij.png
deleted file mode 100644
index 2e8c480..0000000
Binary files a/gemfire-site/website/content/img/intellij.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1f193af7/gemfire-site/website/content/img/yourkit.jpeg
----------------------------------------------------------------------
diff --git a/gemfire-site/website/content/img/yourkit.jpeg b/gemfire-site/website/content/img/yourkit.jpeg
deleted file mode 100644
index 4a53b77..0000000
Binary files a/gemfire-site/website/content/img/yourkit.jpeg and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1f193af7/gemfire-site/website/content/index.html
----------------------------------------------------------------------
diff --git a/gemfire-site/website/content/index.html b/gemfire-site/website/content/index.html
index 555d024..5e6a474 100644
--- a/gemfire-site/website/content/index.html
+++ b/gemfire-site/website/content/index.html
@@ -100,7 +100,7 @@ title: Performance is key. Consistency is a must.
         <div class="row">
             <div class="col-md-12 text-left">
                 <h2>About the Project</h2>
-                <p>Apache Geode is a data management platform that provides real-time, consistent access to data-intensive applications throughout widely distributed cloud architectures.</p>
+                <p>Apache Geode (incubating) is a data management platform that provides real-time, consistent access to data-intensive applications throughout widely distributed cloud architectures.</p>
 
                 <p>By pooling memory, CPU, network resources, and (optionally) local disk across multiple processes to manage application objects and behavior, it uses dynamic replication and data partitioning techniques to implement high availability, improved performance, scalability, and fault tolerance. In addition to being a distributed data container, Apache Geode is an in-memory data management system that provides reliable asynchronous event notifications and guaranteed message delivery.</p>
 


[22/50] [abbrv] incubator-geode git commit: GEODE-637: Additional tests for AsyncEventQueues

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/476c6cd3/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerOffHeapDUnitTest.java
new file mode 100644
index 0000000..b050ef5
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerOffHeapDUnitTest.java
@@ -0,0 +1,17 @@
+package com.gemstone.gemfire.internal.cache.wan.asyncqueue;
+
+
+@SuppressWarnings("serial")
+public class AsyncEventListenerOffHeapDUnitTest extends
+    AsyncEventListenerDUnitTest {
+
+  public AsyncEventListenerOffHeapDUnitTest(String name) {
+    super(name);
+  }
+
+  @Override
+  public boolean isOffHeap() {
+    return true;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/476c6cd3/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventQueueStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventQueueStatsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventQueueStatsDUnitTest.java
new file mode 100644
index 0000000..cf4a184
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventQueueStatsDUnitTest.java
@@ -0,0 +1,311 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.internal.cache.wan.asyncqueue;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import com.gemstone.gemfire.internal.cache.wan.AsyncEventQueueTestBase;
+
+import dunit.AsyncInvocation;
+
+public class AsyncEventQueueStatsDUnitTest extends AsyncEventQueueTestBase {
+
+  private static final long serialVersionUID = 1L;
+  
+  public AsyncEventQueueStatsDUnitTest(String name) {
+    super(name);
+  }
+  
+  public void setUp() throws Exception {
+    super.setUp();
+  }
+  
+  /**
+   * Normal replication scenario
+   */
+  public void testReplicatedSerialPropagation() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+        1000 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 1000 });// primary sender
+    pause(2000);//give some time for system to become stable
+    
+    vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueStats", new Object[] {
+        "ln", 0, 1000, 1000, 1000 });
+    vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueBatchStats",
+        new Object[] { "ln", 10 });
+    vm5.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueStats", new Object[] {
+        "ln", 0, 1000, 0, 0 });
+    vm5.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueBatchStats",
+        new Object[] { "ln", 0 });
+  }
+  
+  /**
+   * Two listeners added to the same RR.
+   */
+  public void testAsyncStatsTwoListeners() throws Exception {
+    Integer lnPort = createFirstLocatorWithDSId(1);
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] {lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] {lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] {lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] {lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln1",
+      false, 100, 100, false, false, null, false });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln1",
+      false, 100, 100, false, false, null, false });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln1",
+      false, 100, 100, false, false, null, false });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln1",
+      false, 100, 100, false, false, null, false });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln2",
+      false, 100, 100, false, false, null, false });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln2",
+      false, 100, 100, false, false, null, false });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln2",
+      false, 100, 100, false, false, null, false });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln2",
+      false, 100, 100, false, false, null, false });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln1,ln2", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln1,ln2", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln1,ln2", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln1,ln2", isOffHeap() });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+        1000 });
+    
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln1", 1000 });
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln2", 1000 });
+    pause(2000);//give some time for system to become stable
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueStats", new Object[] {
+        "ln1", 0, 1000, 1000, 1000 });
+    vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueBatchStats",
+        new Object[] { "ln1", 10 });
+    vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueStats", new Object[] {
+        "ln2", 0, 1000, 1000, 1000 });
+    vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueBatchStats",
+        new Object[] { "ln2", 10 });
+    vm5.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueStats", new Object[] {
+        "ln1", 0, 1000, 0, 0 });
+    vm5.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueBatchStats",
+        new Object[] { "ln1", 0 });
+    vm5.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueStats", new Object[] {
+        "ln2", 0, 1000, 0, 0 });
+    vm5.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueBatchStats",
+        new Object[] { "ln2", 0 });
+  }
+  
+  /**
+   * HA scenario: kill one vm when puts are in progress on the other vm.
+   */
+  public void testReplicatedSerialPropagationHA() throws Exception {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] {lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] {lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] {lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] {lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+      false, 100, 100, false, false, null, false });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+      false, 100, 100, false, false, null, false });
+    
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
+        testName + "_RR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
+        testName + "_RR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
+        testName + "_RR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
+        testName + "_RR", "ln", isOffHeap() });
+    
+    AsyncInvocation inv1 = vm5.invokeAsync(AsyncEventQueueTestBase.class, "doPuts",
+        new Object[] { testName + "_RR", 10000 });
+    pause(2000);
+    AsyncInvocation inv2 = vm4.invokeAsync(AsyncEventQueueTestBase.class, "killAsyncEventQueue", new Object[] { "ln" });
+    Boolean isKilled = Boolean.FALSE;
+    try {
+      isKilled = (Boolean)inv2.getResult();
+    }
+    catch (Throwable e) {
+      fail("Unexpected exception while killing a AsyncEventQueue");
+    }
+    AsyncInvocation inv3 = null; 
+    if(!isKilled){
+      inv3 = vm5.invokeAsync(AsyncEventQueueTestBase.class, "killSender", new Object[] { "ln" });
+      inv3.join();
+    }
+    inv1.join();
+    inv2.join();
+    pause(2000);//give some time for system to become stable
+    vm5.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueStats_Failover", new Object[] {"ln", 10000});
+  }
+
+  /**
+   * Two regions attached to same AsyncEventQueue
+   */
+  public void testReplicatedSerialPropagationUNPorcessedEvents() throws Exception {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+      false, 100, 100, false, false, null, false });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+      false, 100, 100, false, false, null, false });
+
+    //create one RR (RR_1) on local site
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
+        testName + "_RR_1", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
+        testName + "_RR_1", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
+        testName + "_RR_1", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
+        testName + "_RR_1", "ln", isOffHeap() });
+
+    //create another RR (RR_2) on local site
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
+        testName + "_RR_2", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
+        testName + "_RR_2", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
+        testName + "_RR_2", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
+        testName + "_RR_2", "ln", isOffHeap() });
+    
+    //start puts in RR_1 in another thread
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR_1", 1000 });
+    //do puts in RR_2 in main thread
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPutsFrom", new Object[] { testName + "_RR_2", 1000, 1500 });
+    
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 1500 });
+        
+    pause(2000);//give some time for system to become stable
+    vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueStats", new Object[] {"ln",
+      0, 1500, 1500, 1500});
+    vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueUnprocessedStats", new Object[] {"ln", 0});
+    
+    
+    vm5.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueStats", new Object[] {"ln",
+      0, 1500, 0, 0});
+    vm5.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueUnprocessedStats", new Object[] {"ln", 1500});
+  }
+  
+  /**
+   * Test with conflation enabled
+   */
+  public void testSerialPropagationConflation() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    
+    vm4
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    //pause at least for the batchTimeInterval to make sure that the AsyncEventQueue is actually paused
+    pause(2000);
+
+    final Map keyValues = new HashMap();
+    final Map updateKeyValues = new HashMap();
+    for(int i=0; i< 1000; i++) {
+      keyValues.put(i, i);
+    }
+    
+    vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] { testName + "_RR", keyValues });
+    vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] { "ln", keyValues.size() });
+    
+    for(int i=0;i<500;i++) {
+      updateKeyValues.put(i, i+"_updated");
+    }
+    
+    // Put the update events and check the queue size.
+    // There should be no conflation with the previous create events.
+    vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] { testName + "_RR", updateKeyValues });    
+    vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] { "ln", keyValues.size() + updateKeyValues.size() });
+    
+    // Put the update events again and check the queue size.
+    // There should be conflation with the previous update events.
+    vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] { testName + "_RR", updateKeyValues });    
+    vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] { "ln", keyValues.size() + updateKeyValues.size() });
+    
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });
+  
+    vm4.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue", new Object[] { "ln" });
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 1000 });
+    
+    pause(2000);// give some time for system to become stable
+    vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueStats", new Object[] {
+        "ln", 0, 2000, 2000, 1000 });
+    vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueConflatedStats",
+        new Object[] { "ln", 500 });
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/476c6cd3/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueDUnitTest.java
new file mode 100644
index 0000000..2fb7496
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueDUnitTest.java
@@ -0,0 +1,330 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+/**
+ * 
+ */
+package com.gemstone.gemfire.internal.cache.wan.concurrent;
+
+import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueFactoryImpl;
+import com.gemstone.gemfire.cache.wan.GatewaySender.OrderPolicy;
+import com.gemstone.gemfire.internal.cache.wan.AsyncEventQueueTestBase;
+
+import dunit.AsyncInvocation;
+
+/**
+ * @author skumar
+ *
+ */
+public class ConcurrentAsyncEventQueueDUnitTest extends AsyncEventQueueTestBase {
+
+  private static final long serialVersionUID = 1L;
+
+  public ConcurrentAsyncEventQueueDUnitTest(String name) {
+    super(name);
+  }
+
+  public void setUp() throws Exception {
+    super.setUp();
+  }
+  
+  public void testConcurrentSerialAsyncEventQueueAttributes() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 150, true, true, "testDS", true, 5, OrderPolicy.THREAD });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateConcurrentAsyncEventQueueAttributes",
+        new Object[] { "ln", 100, 150, AsyncEventQueueFactoryImpl.DEFAULT_BATCH_TIME_INTERVAL, true, "testDS", true, true, 5, OrderPolicy.THREAD });
+  }
+  
+ 
+  public void testConcurrentParallelAsyncEventQueueAttributesOrderPolicyKey() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        true, 100, 150, true, true, "testDS", true, 5, OrderPolicy.KEY });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateConcurrentAsyncEventQueueAttributes",
+        new Object[] { "ln", 100, 150, AsyncEventQueueFactoryImpl.DEFAULT_BATCH_TIME_INTERVAL, true, "testDS", true, true, 5, OrderPolicy.KEY });
+  }
+
+  public void testConcurrentParallelAsyncEventQueueAttributesOrderPolicyPartition() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        true, 100, 150, true, true, "testDS", true, 5, OrderPolicy.PARTITION });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateConcurrentAsyncEventQueueAttributes",
+        new Object[] { "ln", 100, 150, AsyncEventQueueFactoryImpl.DEFAULT_BATCH_TIME_INTERVAL, true, "testDS", true, true, 5, OrderPolicy.PARTITION });
+  }
+  
+  /**
+   * Test configuration::
+   * 
+   * Region: Replicated 
+   * WAN: Serial 
+   * Dispatcher threads: more than 1
+   * Order policy: key based ordering
+   */
+
+  public void testReplicatedSerialAsyncEventQueueWithMultipleDispatcherThreadsOrderPolicyKey() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 10, true, false, null, false, 3, OrderPolicy.KEY });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 10, true, false, null, false, 3, OrderPolicy.KEY });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 10, true, false, null, false, 3, OrderPolicy.KEY });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 10, true, false, null, false, 3, OrderPolicy.KEY });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+        100 });
+    
+    vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm5.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm6.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm7.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 100 });// primary sender
+    vm5.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+    vm6.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+    vm7.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+  }
+  
+  /**
+   * Test configuration::
+   * 
+   * Region: Replicated 
+   * WAN: Serial 
+   * Dispatcher threads: more than 1
+   * Order policy: Thread ordering
+   */
+
+  public void testReplicatedSerialAsyncEventQueueWithMultipleDispatcherThreadsOrderPolicyThread() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 10, true, false, null, false, 3, OrderPolicy.THREAD });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 10, true, false, null, false, 3, OrderPolicy.THREAD });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 10, true, false, null, false, 3, OrderPolicy.THREAD });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 10, true, false, null, false, 3, OrderPolicy.THREAD });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+
+    AsyncInvocation inv1 = vm4.invokeAsync(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+        50 });
+    AsyncInvocation inv2 = vm4.invokeAsync(AsyncEventQueueTestBase.class, "doNextPuts", new Object[] { testName + "_RR",
+      50, 100 });
+    AsyncInvocation inv3 = vm4.invokeAsync(AsyncEventQueueTestBase.class, "doNextPuts", new Object[] { testName + "_RR",
+      100, 150 });
+    
+    try {
+      inv1.join();
+      inv2.join();
+      inv3.join();
+    } catch (InterruptedException ie) {
+      fail(
+          "Cought interrupted exception while waiting for the task tgo complete.",
+          ie);
+    }
+    
+    vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm5.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm6.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm7.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 150 });// primary sender
+    vm5.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+    vm6.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+    vm7.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+  }
+  
+  /**
+   * Test configuration::
+   * 
+   * Region: PartitionedRegion 
+   * WAN: Parallel
+   * Dispatcher threads: more than 1
+   * Order policy: key based ordering
+   */
+  // Disabling test for bug #48323
+  public void testPartitionedParallelAsyncEventQueueWithMultipleDispatcherThreadsOrderPolicyKey() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        true, 100, 10, true, false, null, false, 3, OrderPolicy.KEY });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        true, 100, 10, true, false, null, false, 3, OrderPolicy.KEY });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        true, 100, 10, true, false, null, false, 3, OrderPolicy.KEY });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        true, 100, 10, true, false, null, false, 3, OrderPolicy.KEY });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_PR",
+        100 });
+    
+    vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+      new Object[] { "ln" });
+    vm5.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+      new Object[] { "ln" });
+    vm6.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+      new Object[] { "ln" });
+    vm7.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+      new Object[] { "ln" });
+  
+    int vm4size = (Integer)vm4.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+      new Object[] { "ln"});
+    int vm5size = (Integer)vm5.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+      new Object[] { "ln"});
+    int vm6size = (Integer)vm6.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+      new Object[] { "ln"});
+    int vm7size = (Integer)vm7.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+      new Object[] { "ln"});
+  
+    assertEquals(vm4size + vm5size + vm6size + vm7size, 100);
+  
+  }
+  
+  
+  /**
+   * Test configuration::
+   * 
+   * Region: PartitionedRegion 
+   * WAN: Parallel
+   * Dispatcher threads: more than 1
+   * Order policy: PARTITION based ordering
+   */
+  // Disabled test for bug #48323
+  public void testPartitionedParallelAsyncEventQueueWithMultipleDispatcherThreadsOrderPolicyPartition() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue",
+        new Object[] { "ln", true, 100, 10, true, false, null, false, 3,
+            OrderPolicy.PARTITION });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue",
+        new Object[] { "ln", true, 100, 10, true, false, null, false, 3,
+            OrderPolicy.PARTITION });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue",
+        new Object[] { "ln", true, 100, 10, true, false, null, false, 3,
+            OrderPolicy.PARTITION });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue",
+        new Object[] { "ln", true, 100, 10, true, false, null, false, 3,
+            OrderPolicy.PARTITION });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_PR",
+        100 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm5.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm6.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm7.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+
+    int vm4size = (Integer)vm4.invoke(AsyncEventQueueTestBase.class,
+        "getAsyncEventListenerMapSize", new Object[] { "ln" });
+    int vm5size = (Integer)vm5.invoke(AsyncEventQueueTestBase.class,
+        "getAsyncEventListenerMapSize", new Object[] { "ln" });
+    int vm6size = (Integer)vm6.invoke(AsyncEventQueueTestBase.class,
+        "getAsyncEventListenerMapSize", new Object[] { "ln" });
+    int vm7size = (Integer)vm7.invoke(AsyncEventQueueTestBase.class,
+        "getAsyncEventListenerMapSize", new Object[] { "ln" });
+
+    assertEquals(100, vm4size + vm5size + vm6size + vm7size);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/476c6cd3/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueOffHeapDUnitTest.java
new file mode 100644
index 0000000..41eb22d
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueOffHeapDUnitTest.java
@@ -0,0 +1,16 @@
+package com.gemstone.gemfire.internal.cache.wan.concurrent;
+
+@SuppressWarnings("serial")
+public class ConcurrentAsyncEventQueueOffHeapDUnitTest extends
+    ConcurrentAsyncEventQueueDUnitTest {
+
+  public ConcurrentAsyncEventQueueOffHeapDUnitTest(String name) {
+    super(name);
+  }
+
+  @Override
+  public boolean isOffHeap() {
+    return true;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/476c6cd3/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueDUnitTest.java
new file mode 100644
index 0000000..425d1a6
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueDUnitTest.java
@@ -0,0 +1,53 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+/**
+ * 
+ */
+package com.gemstone.gemfire.internal.cache.wan.misc;
+
+import com.gemstone.gemfire.internal.cache.wan.AsyncEventQueueTestBase;
+
+/**
+ * @author skumar
+ *
+ */
+public class CommonParallelAsyncEventQueueDUnitTest extends AsyncEventQueueTestBase {
+  
+  private static final long serialVersionUID = 1L;
+
+  public CommonParallelAsyncEventQueueDUnitTest(String name) {
+    super(name);
+  }
+
+  public void setUp() throws Exception  {
+    super.setUp();
+  }
+    
+  public void testSameSenderWithNonColocatedRegions() throws Exception {
+    addExpectedException("cannot have the same parallel async");
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+      true, 100, 100, false, false, null, false });
+    vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR1", "ln", isOffHeap()  });
+    try {
+      vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+          new Object[] { testName + "_PR2", "ln", isOffHeap()  });
+      fail("Expected IllegateStateException : cannot have the same parallel gateway sender");
+    }
+    catch (Exception e) {
+      if (!(e.getCause() instanceof IllegalStateException)
+          || !(e.getCause().getMessage()
+              .contains("cannot have the same parallel async event queue id"))) {
+        fail("Expected IllegalStateException", e);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/476c6cd3/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueOffHeapDUnitTest.java
new file mode 100644
index 0000000..8ab77b9
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueOffHeapDUnitTest.java
@@ -0,0 +1,16 @@
+package com.gemstone.gemfire.internal.cache.wan.misc;
+
+@SuppressWarnings("serial")
+public class CommonParallelAsyncEventQueueOffHeapDUnitTest extends
+    CommonParallelAsyncEventQueueDUnitTest {
+
+  public CommonParallelAsyncEventQueueOffHeapDUnitTest(String name) {
+    super(name);
+  }
+
+  @Override
+  public boolean isOffHeap() {
+    return true;
+  }
+
+}



[11/50] [abbrv] incubator-geode git commit: GEODE-563: Moving gfsh tests from closed

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListIndexCommandDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListIndexCommandDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListIndexCommandDUnitTest.java
new file mode 100644
index 0000000..22a38d2
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListIndexCommandDUnitTest.java
@@ -0,0 +1,672 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.DataPolicy;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.Scope;
+import com.gemstone.gemfire.cache.query.Index;
+import com.gemstone.gemfire.cache.query.IndexStatistics;
+import com.gemstone.gemfire.cache.query.IndexType;
+import com.gemstone.gemfire.cache.query.SelectResults;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.internal.lang.MutableIdentifiable;
+import com.gemstone.gemfire.internal.lang.ObjectUtils;
+import com.gemstone.gemfire.internal.lang.StringUtils;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.internal.cli.domain.IndexDetails;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import dunit.Host;
+import dunit.SerializableRunnable;
+import dunit.VM;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * The ListIndexCommandDUnitTest class is distributed test suite of test cases for testing the index-based GemFire shell
+ * (Gfsh) commands. </p>
+ *
+ * @author John Blum
+ * @see com.gemstone.gemfire.management.internal.cli.commands.CliCommandTestBase
+ * @see com.gemstone.gemfire.management.internal.cli.commands.IndexCommands
+ * @since 7.0
+ */
+@SuppressWarnings("unused")
+public class ListIndexCommandDUnitTest extends CliCommandTestBase {
+
+  protected static final int DEFAULT_REGION_INITIAL_CAPACITY = 10000;
+
+  private final AtomicLong idGenerator = new AtomicLong(0l);
+
+  protected static String toString(final Result result) {
+    assert result != null : "The Result object from the command execution cannot be null!";
+
+    final StringBuilder buffer = new StringBuilder(System.getProperty("line.separator"));
+
+    while (result.hasNextLine()) {
+      buffer.append(result.nextLine());
+      buffer.append(System.getProperty("line.separator"));
+    }
+
+    return buffer.toString();
+  }
+
+
+  public ListIndexCommandDUnitTest(final String testName) {
+    super(testName);
+  }
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    createDefaultSetup(null);
+    setupGemFire();
+  }
+
+  @Override
+  public void tearDown2() throws Exception {
+    super.tearDown2();
+  }
+
+  protected Index createIndex(final String name, final String indexedExpression, final String fromClause) {
+    return createIndex(name, IndexType.FUNCTIONAL, indexedExpression, fromClause);
+  }
+
+  protected Index createIndex(final String name, final IndexType type, final String indexedExpression,
+      final String fromClause) {
+    return new IndexAdapter(name, type, indexedExpression, fromClause);
+  }
+
+  protected Peer createPeer(final VM vm, final Properties distributedSystemProperties,
+      final RegionDefinition... regions) {
+    final Peer peer = new Peer(vm, distributedSystemProperties);
+    peer.add(regions);
+    return peer;
+  }
+
+  protected RegionDefinition createRegionDefinition(final String regionName, final Class<?> keyConstraint,
+      final Class<?> valueConstraint, final Index... indexes) {
+    final RegionDefinition regionDefinition = new RegionDefinition(regionName, keyConstraint, valueConstraint);
+    regionDefinition.add(indexes);
+    return regionDefinition;
+  }
+
+  protected void setupGemFire() throws Exception {
+    final Host host = Host.getHost(0);
+
+    final VM vm1 = host.getVM(1);
+    final VM vm2 = host.getVM(2);
+
+    final Peer peer1 = createPeer(vm1, createDistributedSystemProperties("consumerServer"),
+        createRegionDefinition("consumers", Long.class, Consumer.class,
+            createIndex("cidIdx", IndexType.PRIMARY_KEY, "id", "/consumers"),
+            createIndex("cnameIdx", "name", "/consumers")));
+
+    final Peer peer2 = createPeer(vm2, createDistributedSystemProperties("producerServer"),
+        createRegionDefinition("producers", Long.class, Producer.class, createIndex("pidIdx", "id", "/producers")));
+
+    createRegionWithIndexes(peer1);
+    createRegionWithIndexes(peer2);
+
+    loadConsumerData(peer1, 10000);
+    loadProducerData(peer2, 10000);
+  }
+
+  protected Properties createDistributedSystemProperties(final String gemfireName) {
+    final Properties distributedSystemProperties = new Properties();
+
+    distributedSystemProperties.setProperty(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+    distributedSystemProperties.setProperty(DistributionConfig.NAME_NAME, gemfireName);
+
+    return distributedSystemProperties;
+  }
+
+  protected void createRegionWithIndexes(final Peer peer) {
+    peer.run(new SerializableRunnable(
+        String.format("Creating Regions with Indexes on GemFire peer (%1$s).", peer.getName())) {
+      public void run() {
+        // create the GemFire distributed system with custom configuration properties...
+        getSystem(peer.getConfiguration());
+
+        final Cache cache = getCache();
+        final RegionFactory regionFactory = cache.createRegionFactory();
+
+        for (RegionDefinition regionDefinition : peer) {
+          regionFactory.setDataPolicy(DataPolicy.REPLICATE);
+          regionFactory.setIndexMaintenanceSynchronous(true);
+          regionFactory.setInitialCapacity(DEFAULT_REGION_INITIAL_CAPACITY);
+          regionFactory.setKeyConstraint(regionDefinition.getKeyConstraint());
+          regionFactory.setScope(Scope.DISTRIBUTED_NO_ACK);
+          regionFactory.setStatisticsEnabled(true);
+          regionFactory.setValueConstraint(regionDefinition.getValueConstraint());
+
+          final Region region = regionFactory.create(regionDefinition.getRegionName());
+          String indexName = null;
+
+          try {
+            for (Index index : regionDefinition) {
+              indexName = index.getName();
+              if (IndexType.PRIMARY_KEY.equals(index.getType())) {
+                cache.getQueryService().createKeyIndex(indexName, index.getIndexedExpression(), region.getFullPath());
+              } else {
+                cache.getQueryService().createIndex(indexName, index.getIndexedExpression(), region.getFullPath());
+              }
+            }
+          } catch (Exception e) {
+            getLogWriter().error(
+                String.format("Error occurred creating Index (%1$s) on Region (%2$s) - (%3$s)", indexName,
+                    region.getFullPath(), e.getMessage()));
+          }
+        }
+      }
+    });
+  }
+
+  protected void loadConsumerData(final Peer peer, final int operationsTotal) {
+    peer.run(new SerializableRunnable("Load /consumers Region with data") {
+      public void run() {
+        final Cache cache = getCache();
+        final Region<Long, Consumer> consumerRegion = cache.getRegion("/consumers");
+
+        final Random random = new Random(System.currentTimeMillis());
+        int count = 0;
+
+        final List<Proxy> proxies = new ArrayList<Proxy>();
+
+        Consumer consumer;
+        Proxy proxy;
+
+        while (count++ < operationsTotal) {
+          switch (CrudOperation.values()[random.nextInt(CrudOperation.values().length)]) {
+            case RETRIEVE:
+              if (!proxies.isEmpty()) {
+                proxy = proxies.get(random.nextInt(proxies.size()));
+                consumer = query(consumerRegion, "id = " + proxy.getId() + "l"); // works
+                //consumer = query(consumerRegion, "Id = " + proxy.getId()); // works
+                //consumer = query(consumerRegion, "id = " + proxy.getId()); // does not work
+                proxy.setUnitsSnapshot(consumer.getUnits());
+                break;
+              }
+            case UPDATE:
+              if (!proxies.isEmpty()) {
+                proxy = proxies.get(random.nextInt(proxies.size()));
+                consumer = query(consumerRegion, "Name = " + proxy.getName());
+                consumer.consume();
+                break;
+              }
+            case CREATE:
+            default:
+              consumer = new Consumer(idGenerator.incrementAndGet());
+              proxies.add(new Proxy(consumer));
+              consumerRegion.put(consumer.getId(), consumer);
+              assertTrue(consumerRegion.containsKey(consumer.getId()));
+              assertTrue(consumerRegion.containsValueForKey(consumer.getId()));
+              assertSame(consumer, consumerRegion.get(consumer.getId()));
+          }
+        }
+      }
+    });
+  }
+
+  protected void loadProducerData(final Peer peer, final int operationsTotal) {
+    peer.run(new SerializableRunnable("Load /producers Region with data") {
+      public void run() {
+        final Cache cache = getCache();
+        final Region<Long, Producer> producerRegion = cache.getRegion("/producers");
+
+        final Random random = new Random(System.currentTimeMillis());
+        int count = 0;
+
+        final List<Proxy> proxies = new ArrayList<Proxy>();
+
+        Producer producer;
+        Proxy proxy;
+
+        while (count++ < operationsTotal) {
+          switch (CrudOperation.values()[random.nextInt(CrudOperation.values().length)]) {
+            case RETRIEVE:
+              if (!proxies.isEmpty()) {
+                proxy = proxies.get(random.nextInt(proxies.size()));
+                producer = query(producerRegion, "Id = " + proxy.getId());
+                proxy.setUnitsSnapshot(producer.getUnits());
+                break;
+              }
+            case UPDATE:
+              if (!proxies.isEmpty()) {
+                proxy = proxies.get(random.nextInt(proxies.size()));
+                producer = query(producerRegion, "Id = " + proxy.getId());
+                producer.produce();
+                break;
+              }
+            case CREATE:
+            default:
+              producer = new Producer(idGenerator.incrementAndGet());
+              proxies.add(new Proxy(producer));
+              producerRegion.put(producer.getId(), producer);
+              assertTrue(producerRegion.containsKey(producer.getId()));
+              assertTrue(producerRegion.containsValueForKey(producer.getId()));
+              assertSame(producer, producerRegion.get(producer.getId()));
+          }
+        }
+      }
+    });
+  }
+
+  @SuppressWarnings("unchecked")
+  protected <T extends Comparable<T>, B extends AbstractBean<T>> B query(final Cache cache, final String queryString) {
+    try {
+      getLogWriter().info(String.format("Running Query (%1$s) in GemFire...", queryString));
+
+      final SelectResults<B> results = (SelectResults<B>) cache.getQueryService().newQuery(queryString).execute();
+
+      getLogWriter().info(
+          String.format("Running Query (%1$s) in GemFire returned (%2$d) result(s).", queryString, results.size()));
+
+      return (results.iterator().hasNext() ? results.iterator().next() : null);
+    } catch (Exception e) {
+      throw new RuntimeException(String.format("An error occurred running Query (%1$s)!", queryString), e);
+    }
+  }
+
+  protected <T extends Comparable<T>, B extends AbstractBean<T>> B query(final Region<T, B> region,
+      final String queryPredicate) {
+    try {
+      getLogWriter().info(
+          String.format("Running Query (%1$s) on Region (%2$s)...", queryPredicate, region.getFullPath()));
+
+      final SelectResults<B> results = region.query(queryPredicate);
+
+      getLogWriter().info(
+          String.format("Running Query (%1$s) on Region (%2$s) returned (%3$d) result(s).", queryPredicate,
+              region.getFullPath(), results.size()));
+
+      return (results.iterator().hasNext() ? results.iterator().next() : null);
+    } catch (Exception e) {
+      throw new RuntimeException(
+          String.format("An error occurred running Query (%1$s) on Region (%2$s)!", queryPredicate,
+              region.getFullPath()), e);
+    }
+  }
+
+  public void testListIndex() throws Exception {
+    final Result result = executeCommand(CliStrings.LIST_INDEX + " --" + CliStrings.LIST_INDEX__STATS);
+
+    assertNotNull(result);
+    getLogWriter().info(toString(result));
+    assertEquals(Result.Status.OK, result.getStatus());
+  }
+
+  protected static class Peer implements Iterable<RegionDefinition>, Serializable {
+
+    private final Properties distributedSystemProperties;
+
+    private final Set<RegionDefinition> regions = new HashSet<RegionDefinition>();
+
+    private final VM vm;
+
+    public Peer(final VM vm, final Properties distributedSystemProperties) {
+      assert distributedSystemProperties != null : "The GemFire Distributed System configuration properties cannot be null!";
+      this.distributedSystemProperties = distributedSystemProperties;
+      this.vm = vm;
+    }
+
+    public Properties getConfiguration() {
+      return this.distributedSystemProperties;
+    }
+
+    public String getName() {
+      return getConfiguration().getProperty(DistributionConfig.NAME_NAME);
+    }
+
+    public VM getVm() {
+      return vm;
+    }
+
+    public boolean add(final RegionDefinition... regionDefinitions) {
+      return (regionDefinitions != null && regions.addAll(Arrays.asList(regionDefinitions)));
+    }
+
+    public Iterator<RegionDefinition> iterator() {
+      return Collections.unmodifiableSet(regions).iterator();
+    }
+
+    public boolean remove(final RegionDefinition... regionDefinitions) {
+      return (regionDefinitions != null && regions.removeAll(Arrays.asList(regionDefinitions)));
+    }
+
+    public void run(final Runnable runnable) {
+      if (getVm() == null) {
+        runnable.run();
+      } else {
+        getVm().invoke(runnable);
+      }
+    }
+
+    @Override
+    public String toString() {
+      final StringBuilder buffer = new StringBuilder(getClass().getSimpleName());
+      buffer.append(" {configuration = ").append(getConfiguration());
+      buffer.append(", name = ").append(getName());
+      buffer.append(", pid = ").append(getVm().getPid());
+      buffer.append("}");
+      return buffer.toString();
+    }
+  }
+
+  protected static class IndexAdapter implements Index, Serializable {
+
+    private final IndexDetails.IndexType type;
+
+    private final String fromClause;
+    private final String indexedExpression;
+    private final String name;
+
+    protected IndexAdapter(final String name, final String indexedExpression, final String fromClause) {
+      this(name, IndexType.FUNCTIONAL, indexedExpression, fromClause);
+    }
+
+    protected IndexAdapter(final String name, final IndexType type, final String indexedExpression,
+        final String fromClause) {
+      assert name != null : "The name of the Index cannot be null!";
+      assert indexedExpression != null : String.format("The expression to index for Index (%1$s) cannot be null!",
+          name);
+      assert fromClause != null : String.format("The from clause for Index (%1$s) cannot be null!", name);
+
+      this.type = ObjectUtils.defaultIfNull(IndexDetails.IndexType.valueOf(type), IndexDetails.IndexType.FUNCTIONAL);
+      this.name = name;
+      this.indexedExpression = indexedExpression;
+      this.fromClause = fromClause;
+    }
+
+    public String getName() {
+      return this.name;
+    }
+
+    public String getFromClause() {
+      return this.fromClause;
+    }
+
+    public String getCanonicalizedFromClause() {
+      return this.fromClause;
+    }
+
+    public String getIndexedExpression() {
+      return this.indexedExpression;
+    }
+
+    public String getCanonicalizedIndexedExpression() {
+      return this.indexedExpression;
+    }
+
+    public String getProjectionAttributes() {
+      throw new UnsupportedOperationException("Not Implemented!");
+    }
+
+    public String getCanonicalizedProjectionAttributes() {
+      throw new UnsupportedOperationException("Not Implemented!");
+    }
+
+    public Region<?, ?> getRegion() {
+      throw new UnsupportedOperationException("Not Implemented!");
+    }
+
+    public IndexStatistics getStatistics() {
+      throw new UnsupportedOperationException("Not Implemented!");
+    }
+
+    public IndexType getType() {
+      return type.getType();
+    }
+
+    @Override
+    public String toString() {
+      final StringBuilder buffer = new StringBuilder(getClass().getSimpleName());
+      buffer.append(" {indexName = ").append(getName());
+      buffer.append(", indexType = ").append(getType());
+      buffer.append(", indexedExpression = ").append(getIndexedExpression());
+      buffer.append(", fromClause = ").append(getFromClause());
+      buffer.append("}");
+      return buffer.toString();
+    }
+  }
+
+  protected static class RegionDefinition implements Iterable<Index>, Serializable {
+
+    private final Class<?> keyConstraint;
+    private final Class<?> valueConstraint;
+
+    private final Set<Index> indexes = new HashSet<Index>();
+
+    private final String regionName;
+
+    @SuppressWarnings("unchecked")
+    protected RegionDefinition(final String regionName, final Class<?> keyConstraint, final Class<?> valueConstraint) {
+      assert !StringUtils.isBlank(regionName) : "The name of the Region must be specified!";
+      this.regionName = regionName;
+      this.keyConstraint = ObjectUtils.defaultIfNull(keyConstraint, Object.class);
+      this.valueConstraint = ObjectUtils.defaultIfNull(valueConstraint, Object.class);
+    }
+
+    public String getRegionName() {
+      return regionName;
+    }
+
+    public Class<?> getKeyConstraint() {
+      return keyConstraint;
+    }
+
+    public Class<?> getValueConstraint() {
+      return valueConstraint;
+    }
+
+    public boolean add(final Index... indexes) {
+      return (indexes != null && this.indexes.addAll(Arrays.asList(indexes)));
+    }
+
+    public Iterator<Index> iterator() {
+      return Collections.unmodifiableSet(indexes).iterator();
+    }
+
+    public boolean remove(final Index... indexes) {
+      return (indexes != null && this.indexes.removeAll(Arrays.asList(indexes)));
+    }
+
+    @Override
+    public boolean equals(final Object obj) {
+      if (obj == this) {
+        return true;
+      }
+
+      if (!(obj instanceof RegionDefinition)) {
+        return false;
+      }
+
+      final RegionDefinition that = (RegionDefinition) obj;
+
+      return ObjectUtils.equals(getRegionName(), that.getRegionName());
+    }
+
+    @Override
+    public int hashCode() {
+      int hashValue = 17;
+      hashValue = 37 * hashValue + ObjectUtils.hashCode(getRegionName());
+      return hashValue;
+    }
+
+    @Override
+    public String toString() {
+      final StringBuilder buffer = new StringBuilder(getClass().getSimpleName());
+      buffer.append(" {regionName = ").append(getRegionName());
+      buffer.append(", keyConstraint = ").append(getKeyConstraint());
+      buffer.append(", valueConstraint = ").append(getValueConstraint());
+      buffer.append("}");
+      return buffer.toString();
+    }
+  }
+
+  protected static abstract class AbstractBean<T extends Comparable<T>> implements MutableIdentifiable<T>, Serializable {
+
+    private T id;
+    private String name;
+
+    public AbstractBean() {
+    }
+
+    public AbstractBean(final T id) {
+      this.id = id;
+    }
+
+    public T getId() {
+      return id;
+    }
+
+    public void setId(final T id) {
+      this.id = id;
+    }
+
+    public String getName() {
+      return name;
+    }
+
+    public void setName(final String name) {
+      this.name = name;
+    }
+
+    @Override
+    public boolean equals(final Object obj) {
+      if (obj == this) {
+        return true;
+      }
+
+      if (!(getClass().isInstance(obj))) {
+        return false;
+      }
+
+      final AbstractBean bean = (AbstractBean) obj;
+
+      return ObjectUtils.equals(getId(), bean.getId());
+    }
+
+    @Override
+    public int hashCode() {
+      int hashValue = 17;
+      hashValue = 37 * hashValue + ObjectUtils.hashCode(getId());
+      return hashValue;
+    }
+
+    @Override
+    public String toString() {
+      final StringBuilder buffer = new StringBuilder(getClass().getSimpleName());
+      buffer.append(" {id = ").append(getId());
+      buffer.append(", name = ").append(getName());
+      buffer.append("}");
+      return buffer.toString();
+    }
+  }
+
+  public static class Consumer extends AbstractBean<Long> {
+
+    private volatile int units;
+
+    public Consumer() {
+    }
+
+    public Consumer(final Long id) {
+      super(id);
+    }
+
+    public int getUnits() {
+      return units;
+    }
+
+    public int consume() {
+      return ++units;
+    }
+  }
+
+  public static class Producer extends AbstractBean<Long> {
+
+    private volatile int units;
+
+    public Producer() {
+    }
+
+    public Producer(final Long id) {
+      super(id);
+    }
+
+    public int getUnits() {
+      return units;
+    }
+
+    public int produce() {
+      return ++units;
+    }
+  }
+
+  public static class Proxy extends AbstractBean<Long> {
+
+    private final AbstractBean<Long> bean;
+    private int unitsSnapshot;
+
+    public Proxy(final AbstractBean<Long> bean) {
+      assert bean != null : "The bean to proxy cannot be null!";
+      this.bean = bean;
+    }
+
+    public AbstractBean<Long> getBean() {
+      return bean;
+    }
+
+    @Override
+    public Long getId() {
+      return getBean().getId();
+    }
+
+    @Override
+    public String getName() {
+      return getBean().getName();
+    }
+
+    public int getUnitsSnapshot() {
+      return unitsSnapshot;
+    }
+
+    public void setUnitsSnapshot(final int unitsSnapshot) {
+      this.unitsSnapshot = unitsSnapshot;
+    }
+  }
+
+  protected static enum CrudOperation {
+    CREATE,
+    RETRIEVE,
+    UPDATE,
+    DELETE
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MemberCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MemberCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MemberCommandsDUnitTest.java
new file mode 100644
index 0000000..6623403
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MemberCommandsDUnitTest.java
@@ -0,0 +1,286 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.EvictionAction;
+import com.gemstone.gemfire.cache.EvictionAttributes;
+import com.gemstone.gemfire.cache.FixedPartitionAttributes;
+import com.gemstone.gemfire.cache.PartitionAttributes;
+import com.gemstone.gemfire.cache.PartitionAttributesFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache30.CacheTestCase;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.distributed.Locator;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.cli.Result.Status;
+import com.gemstone.gemfire.management.internal.cli.CliUtil;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.remote.CommandProcessor;
+import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+import dunit.Host;
+import dunit.SerializableRunnable;
+import dunit.VM;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+
+/****
+ * @author bansods since 7.0
+ */
+
+public class MemberCommandsDUnitTest extends CacheTestCase {
+  private static final long serialVersionUID = 1L;
+  private static final Map<String, String> EMPTY_ENV = Collections.emptyMap();
+  private static final String REGION1 = "region1";
+  private static final String REGION2 = "region2";
+  private static final String REGION3 = "region3";
+  private static final String SUBREGION1A = "subregion1A";
+  private static final String SUBREGION1B = "subregion1B";
+  private static final String SUBREGION1C = "subregion1C";
+  private static final String PR1 = "PartitionedRegion1";
+  private static final String PR2 = "ParitionedRegion2";
+
+  public MemberCommandsDUnitTest(String name) {
+    super(name);
+  }
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    // This test does not require an actual Gfsh connection to work, however when run as part of a suite, prior tests
+    // may mess up the environment causing this test to fail. Setting this prevents false failures.
+    CliUtil.isGfshVM = false;
+  }
+
+  @Override
+  public void tearDown2() throws Exception {
+    super.tearDown2();
+    CliUtil.isGfshVM = true;
+  }
+
+  private Properties createProperties(String name, String groups) {
+    Properties props = new Properties();
+    props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    props.setProperty(DistributionConfig.LOG_LEVEL_NAME, "info");
+    props.setProperty(DistributionConfig.STATISTIC_SAMPLING_ENABLED_NAME, "true");
+    props.setProperty(DistributionConfig.ENABLE_TIME_STATISTICS_NAME, "true");
+    props.setProperty(DistributionConfig.NAME_NAME, name);
+    props.setProperty(DistributionConfig.GROUPS_NAME, groups);
+    return props;
+  }
+
+  private void createRegionsWithSubRegions() {
+    final Cache cache = getCache();
+
+    RegionFactory<String, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.REPLICATE);
+    dataRegionFactory.setConcurrencyLevel(3);
+    Region<String, Integer> region1 = dataRegionFactory.create(REGION1);
+    region1.createSubregion(SUBREGION1C, region1.getAttributes());
+    Region<String, Integer> subregion2 = region1.createSubregion(SUBREGION1A, region1.getAttributes());
+
+    subregion2.createSubregion(SUBREGION1B, subregion2.getAttributes());
+    dataRegionFactory.create(REGION2);
+    dataRegionFactory.create(REGION3);
+  }
+
+  private void createPartitionedRegion1() {
+    final Cache cache = getCache();
+    // Create the data region
+    RegionFactory<String, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+    dataRegionFactory.create(PR1);
+  }
+
+  private void createPartitionedRegion(String regionName) {
+    final Cache cache = getCache();
+    // Create the data region
+    RegionFactory<String, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+    dataRegionFactory.setConcurrencyLevel(4);
+    EvictionAttributes ea = EvictionAttributes.createLIFOEntryAttributes(100, EvictionAction.LOCAL_DESTROY);
+    dataRegionFactory.setEvictionAttributes(ea);
+    dataRegionFactory.setEnableAsyncConflation(true);
+
+    FixedPartitionAttributes fpa = FixedPartitionAttributes.createFixedPartition("Par1", true);
+    PartitionAttributes pa = new PartitionAttributesFactory().setLocalMaxMemory(100).setRecoveryDelay(
+        2).setTotalMaxMemory(200).setRedundantCopies(1).addFixedPartitionAttributes(fpa).create();
+    dataRegionFactory.setPartitionAttributes(pa);
+
+    dataRegionFactory.create(regionName);
+  }
+
+
+  private void createLocalRegion() {
+    final Cache cache = getCache();
+    // Create the data region
+    RegionFactory<String, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.LOCAL);
+    dataRegionFactory.create("LocalRegion");
+  }
+
+  private void setupSystem() throws IOException {
+    disconnectAllFromDS();
+    final Host host = Host.getHost(0);
+    final VM[] servers = {host.getVM(0), host.getVM(1)};
+
+    final Properties propsMe = createProperties("me", "G1");
+    final Properties propsServer1 = createProperties("Server1", "G1");
+    final Properties propsServer2 = createProperties("Server2", "G2");
+
+
+    getSystem(propsMe);
+    final Cache cache = getCache();
+    RegionFactory<String, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.REPLICATE_PROXY);
+    dataRegionFactory.setConcurrencyLevel(5);
+    Region<String, Integer> region1 = dataRegionFactory.create(REGION1);
+
+
+    servers[1].invoke(new SerializableRunnable("Create cache for server1") {
+      public void run() {
+        getSystem(propsServer2);
+        createRegionsWithSubRegions();
+        createLocalRegion();
+        createPartitionedRegion("ParReg1");
+      }
+    });
+    servers[0].invoke(new SerializableRunnable("Create cache for server0") {
+      public void run() {
+        getSystem(propsServer1);
+        createRegionsWithSubRegions();
+        createLocalRegion();
+      }
+    });
+  }
+
+  private Properties createProperties(Host host, int locatorPort) {
+    Properties props = new Properties();
+
+    props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    props.setProperty(DistributionConfig.LOCATORS_NAME, getServerHostName(host) + "[" + locatorPort + "]");
+    props.setProperty(DistributionConfig.LOG_LEVEL_NAME, "info");
+    props.setProperty(DistributionConfig.STATISTIC_SAMPLING_ENABLED_NAME, "true");
+    props.setProperty(DistributionConfig.ENABLE_TIME_STATISTICS_NAME, "true");
+    props.put(DistributionConfig.ENABLE_NETWORK_PARTITION_DETECTION_NAME, "true");
+
+    return props;
+  }
+
+  /**
+   * Creates the cache.
+   */
+  private void createCache(Properties props) {
+    getSystem(props);
+    final Cache cache = getCache();
+  }
+
+  /***
+   * Tests the execution of "list member" command which should list out all the members in the DS
+   *
+   * @throws IOException
+   * @throws ClassNotFoundException
+   */
+  public void testListMemberAll() throws IOException, ClassNotFoundException {
+    setupSystem();
+    CommandProcessor commandProcessor = new CommandProcessor();
+    Result result = commandProcessor.createCommandStatement(CliStrings.LIST_MEMBER, EMPTY_ENV).process();
+    getLogWriter().info("#SB" + getResultAsString(result));
+    assertEquals(true, result.getStatus().equals(Status.OK));
+  }
+
+  /****
+   * Tests the execution of "list member" command, when no cache is created
+   *
+   * @throws IOException
+   * @throws ClassNotFoundException
+   */
+  public void testListMemberWithNoCache() throws IOException, ClassNotFoundException {
+    final Host host = Host.getHost(0);
+    final VM[] servers = {host.getVM(0), host.getVM(1)};
+    final int openPorts[] = AvailablePortHelper.getRandomAvailableTCPPorts(1);
+    final File logFile = new File(getUniqueName() + "-locator" + openPorts[0] + ".log");
+
+    Locator locator = Locator.startLocator(openPorts[0], logFile);
+    try {
+
+      final Properties props = createProperties(host, openPorts[0]);
+      CommandProcessor commandProcessor = new CommandProcessor();
+      Result result = commandProcessor.createCommandStatement(CliStrings.LIST_MEMBER, EMPTY_ENV).process();
+
+      getLogWriter().info("#SB" + getResultAsString(result));
+      assertEquals(true, result.getStatus().equals(Status.ERROR));
+    } finally {
+      locator.stop(); // fix for bug 46562
+    }
+  }
+
+  /***
+   * Tests list member --group=G1
+   *
+   * @throws IOException
+   * @throws ClassNotFoundException
+   */
+  public void testListMemberWithGroups() throws IOException, ClassNotFoundException {
+    setupSystem();
+    CommandProcessor commandProcessor = new CommandProcessor();
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.LIST_MEMBER);
+    csb.addOption(CliStrings.LIST_MEMBER__GROUP, "G1");
+    Result result = commandProcessor.createCommandStatement(csb.toString(), EMPTY_ENV).process();
+    getLogWriter().info("#SB" + getResultAsString(result));
+    assertEquals(true, result.getStatus().equals(Status.OK));
+  }
+
+  /***
+   * Tests the "describe member" command for all the members in the DS
+   *
+   * @throws IOException
+   * @throws ClassNotFoundException
+   */
+  public void testDescribeMember() throws IOException, ClassNotFoundException {
+    setupSystem();
+    CommandProcessor commandProcessor = new CommandProcessor();
+    GemFireCacheImpl cache = (GemFireCacheImpl) CacheFactory.getAnyInstance();
+    Set<DistributedMember> members = cache.getDistributedSystem().getAllOtherMembers();
+
+    Iterator<DistributedMember> iters = members.iterator();
+
+    while (iters.hasNext()) {
+      DistributedMember member = iters.next();
+      Result result = commandProcessor.createCommandStatement("describe member --name=" + member.getId(),
+          EMPTY_ENV).process();
+      assertEquals(true, result.getStatus().equals(Status.OK));
+      getLogWriter().info("#SB" + getResultAsString(result));
+      //assertEquals(true, result.getStatus().equals(Status.OK));
+    }
+  }
+
+  private String getResultAsString(Result result) {
+    StringBuilder sb = new StringBuilder();
+    while (result.hasNextLine()) {
+      sb.append(result.nextLine());
+    }
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsDUnitTest.java
new file mode 100644
index 0000000..ca3f94d
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsDUnitTest.java
@@ -0,0 +1,492 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheClosedException;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.lang.ThreadUtils;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.cli.Result.Status;
+import com.gemstone.gemfire.management.internal.cli.HeadlessGfsh;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import com.gemstone.gemfire.management.internal.cli.result.CompositeResultData;
+import com.gemstone.gemfire.management.internal.cli.result.CompositeResultData.SectionResultData;
+import com.gemstone.gemfire.management.internal.cli.result.ResultBuilder;
+import com.gemstone.gemfire.management.internal.cli.result.ResultData;
+import com.gemstone.gemfire.management.internal.cli.result.TabularResultData;
+import dunit.Host;
+import dunit.SerializableCallable;
+import dunit.SerializableRunnable;
+import dunit.VM;
+import org.junit.Ignore;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Properties;
+
+/**
+ * Dunit class for testing gemfire function commands : GC, Shutdown
+ *
+ * @author apande
+ */
+public class MiscellaneousCommandsDUnitTest extends CliCommandTestBase {
+
+  private static final long serialVersionUID = 1L;
+  private static String cachedLogLevel;
+
+  public MiscellaneousCommandsDUnitTest(String name) {
+    super(name);
+  }
+
+  @Override
+  public void tearDown2() throws Exception {
+    invokeInEveryVM(new SerializableRunnable("reset log level") {
+      public void run() {
+        if (cachedLogLevel != null) {
+          System.setProperty("gemfire.log-level", cachedLogLevel);
+          cachedLogLevel = null;
+        }
+      }
+    });
+  }
+
+  public void testGCForGroup() {
+    Properties localProps = new Properties();
+    localProps.setProperty(DistributionConfig.NAME_NAME, "Manager");
+    localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group1");
+    createDefaultSetup(localProps);
+    String command = "gc --group=Group1";
+    CommandResult cmdResult = executeCommand(command);
+    cmdResult.resetToFirstLine();
+    if (cmdResult != null) {
+      String cmdResultStr = commandResultToString(cmdResult);
+      getLogWriter().info("testGCForGroup cmdResultStr=" + cmdResultStr + "; cmdResult=" + cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      if (cmdResult.getType().equals(ResultData.TYPE_TABULAR)) {
+        TabularResultData table = (TabularResultData) cmdResult.getResultData();
+        List<String> memberNames = table.retrieveAllValues(CliStrings.GC__MSG__MEMBER_NAME);
+        assertEquals(true, memberNames.size() == 1 ? true : false);
+      } else {
+        fail("testGCForGroup failed as CommandResult should be table type");
+      }
+    } else {
+      fail("testGCForGroup failed as did not get CommandResult");
+    }
+  }
+
+  public static String getMemberId() {
+    Cache cache = new GemfireDataCommandsDUnitTest("test").getCache();
+    return cache.getDistributedSystem().getDistributedMember().getId();
+  }
+
+  public void testGCForMemberID() {
+    createDefaultSetup(null);
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final String vm1MemberId = (String) vm1.invoke(MiscellaneousCommandsDUnitTest.class, "getMemberId");
+    String command = "gc --member=" + vm1MemberId;
+    CommandResult cmdResult = executeCommand(command);
+    cmdResult.resetToFirstLine();
+    if (cmdResult != null) {
+      String cmdResultStr = commandResultToString(cmdResult);
+      getLogWriter().info("testGCForMemberID cmdResultStr=" + cmdResultStr);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      if (cmdResult.getType().equals(ResultData.TYPE_TABULAR)) {
+        TabularResultData table = (TabularResultData) cmdResult.getResultData();
+        List<String> memberNames = table.retrieveAllValues(CliStrings.GC__MSG__MEMBER_NAME);
+        assertEquals(true, memberNames.size() == 1 ? true : false);
+      } else {
+        fail("testGCForGroup failed as CommandResult should be table type");
+      }
+    } else {
+      fail("testGCForCluster failed as did not get CommandResult");
+    }
+  }
+
+  public void testShowLogDefault() throws IOException {
+    Properties props = new Properties();
+    try {
+      props.setProperty("log-file", "testShowLogDefault.log");
+      createDefaultSetup(props);
+      final VM vm1 = Host.getHost(0).getVM(0);
+      final String vm1MemberId = (String) vm1.invoke(MiscellaneousCommandsDUnitTest.class, "getMemberId");
+      String command = "show log --member=" + vm1MemberId;
+      CommandResult cmdResult = executeCommand(command);
+      if (cmdResult != null) {
+        String log = commandResultToString(cmdResult);
+        assertNotNull(log);
+        getLogWriter().info("Show Log is" + log);
+        assertEquals(Result.Status.OK, cmdResult.getStatus());
+      } else {
+        fail("testShowLog failed as did not get CommandResult");
+      }
+    } finally {
+      disconnectAllFromDS();
+    }
+  }
+
+  public void testShowLogNumLines() {
+    Properties props = new Properties();
+    props.setProperty("log-file", "testShowLogNumLines.log");
+    try {
+      createDefaultSetup(props);
+      final VM vm1 = Host.getHost(0).getVM(0);
+      final String vm1MemberId = (String) vm1.invoke(MiscellaneousCommandsDUnitTest.class, "getMemberId");
+      String command = "show log --member=" + vm1MemberId + " --lines=50";
+      CommandResult cmdResult = executeCommand(command);
+      if (cmdResult != null) {
+        String log = commandResultToString(cmdResult);
+        assertNotNull(log);
+        getLogWriter().info("Show Log is" + log);
+        assertEquals(Result.Status.OK, cmdResult.getStatus());
+      } else {
+        fail("testShowLog failed as did not get CommandResult");
+      }
+    } finally {
+      disconnectAllFromDS();
+    }
+  }
+
+  public void testGCForEntireCluster() {
+    setupForGC();
+    String command = "gc";
+    CommandResult cmdResult = executeCommand(command);
+    cmdResult.resetToFirstLine();
+    if (cmdResult != null) {
+      String cmdResultStr = commandResultToString(cmdResult);
+      getLogWriter().info("testGCForEntireCluster cmdResultStr=" + cmdResultStr + "; cmdResult=" + cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      if (cmdResult.getType().equals(ResultData.TYPE_TABULAR)) {
+        TabularResultData table = (TabularResultData) cmdResult.getResultData();
+        List<String> memberNames = table.retrieveAllValues(CliStrings.GC__MSG__MEMBER_NAME);
+        assertEquals(3, memberNames.size());
+      } else {
+        fail("testGCForGroup failed as CommandResult should be table type");
+      }
+    } else {
+      fail("testGCForGroup failed as did not get CommandResult");
+    }
+  }
+
+  void setupForGC() {
+    disconnectAllFromDS();
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final VM vm2 = Host.getHost(0).getVM(2);
+
+
+    createDefaultSetup(null);
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        // no need to close cache as it will be closed as part of teardown2
+        Cache cache = getCache();
+
+        RegionFactory<Integer, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+        Region region = dataRegionFactory.create("testRegion");
+        for (int i = 0; i < 10; i++) {
+          region.put("key" + (i + 200), "value" + (i + 200));
+        }
+      }
+    });
+    vm2.invoke(new SerializableRunnable() {
+      public void run() {
+        // no need to close cache as it will be closed as part of teardown2
+        Cache cache = getCache();
+
+        RegionFactory<Integer, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+        dataRegionFactory.create("testRegion");
+      }
+    });
+  }
+
+  public void testShutDownWithoutTimeout() {
+
+    addExpectedException("EntryDestroyedException");
+
+    setupForShutDown();
+    ThreadUtils.sleep(2500);
+
+    String command = "shutdown";
+    CommandResult cmdResult = executeCommand(command);
+
+    if (cmdResult != null) {
+      String cmdResultStr = commandResultToString(cmdResult);
+      getLogWriter().info("testShutDownWithoutTimeout cmdResultStr=" + cmdResultStr);
+    }
+
+    verifyShutDown();
+
+    final HeadlessGfsh defaultShell = getDefaultShell();
+
+    // Need for the Gfsh HTTP enablement during shutdown to properly assess the
+    // state of the connection.
+    waitForCriterion(new WaitCriterion() {
+      public boolean done() {
+        return !defaultShell.isConnectedAndReady();
+      }
+
+      public String description() {
+        return "Waits for the shell to disconnect!";
+      }
+    }, 1000, 250, true);
+
+    assertFalse(defaultShell.isConnectedAndReady());
+  }
+
+  @Ignore("Disabled for 52350")
+  public void DISABLED_testShutDownWithTimeout() {
+    setupForShutDown();
+    ThreadUtils.sleep(2500);
+
+    addExpectedException("EntryDestroyedException");
+
+    String command = "shutdown --time-out=15";
+    CommandResult cmdResult = executeCommand(command);
+
+    if (cmdResult != null) {
+      String cmdResultStr = commandResultToString(cmdResult);
+      getLogWriter().info("testShutDownWithTIMEOUT cmdResultStr=" + cmdResultStr);
+    }
+
+    verifyShutDown();
+
+    final HeadlessGfsh defaultShell = getDefaultShell();
+
+    // Need for the Gfsh HTTP enablement during shutdown to properly assess the state of the connection.
+    waitForCriterion(new WaitCriterion() {
+      public boolean done() {
+        return !defaultShell.isConnectedAndReady();
+      }
+
+      public String description() {
+        return "Waits for the shell to disconnect!";
+      }
+    }, 1000, 250, false);
+
+    assertFalse(defaultShell.isConnectedAndReady());
+  }
+
+  public void testShutDownForTIMEOUT() {
+    setupForShutDown();
+    ThreadUtils.sleep(2500);
+    final VM vm0 = Host.getHost(0).getVM(0);
+    vm0.invoke(new SerializableRunnable() {
+      public void run() {
+        System.setProperty("ThrowTimeoutException", "true");
+      }
+    });
+
+
+    String command = "shutdown --time-out=15";
+    CommandResult cmdResult = executeCommand(command);
+
+    if (cmdResult != null) {
+      String cmdResultStr = commandResultToString(cmdResult);
+      getLogWriter().info("testShutDownForTIMEOUT cmdResultStr = " + cmdResultStr);
+      CommandResult result = (CommandResult) ResultBuilder.createInfoResult(CliStrings.SHUTDOWN_TIMEDOUT);
+      String expectedResult = commandResultToString(result);
+      assertEquals(expectedResult, cmdResultStr);
+    }
+    vm0.invoke(new SerializableRunnable() {
+      public void run() {
+        System.clearProperty("ThrowTimeoutException");
+      }
+    });
+  }
+
+  void setupForChangeLogLelvel() {
+    final VM vm0 = Host.getHost(0).getVM(0);
+    final VM vm1 = Host.getHost(0).getVM(1);
+
+    createDefaultSetup(null);
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        // no need to close cache as it will be closed as part of teardown2
+        Cache cache = getCache();
+
+        RegionFactory<Integer, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+        Region region = dataRegionFactory.create("testRegion");
+        for (int i = 0; i < 10; i++) {
+          region.put("key" + (i + 200), "value" + (i + 200));
+        }
+      }
+    });
+  }
+
+  void setupForShutDown() {
+    final VM vm0 = Host.getHost(0).getVM(0);
+    final VM vm1 = Host.getHost(0).getVM(1);
+
+    System.setProperty(CliStrings.IGNORE_INTERCEPTORS, "true");
+    createDefaultSetup(null);
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        // no need to close cache as it will be closed as part of teardown2
+        Cache cache = getCache();
+
+        RegionFactory<Integer, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+        Region region = dataRegionFactory.create("testRegion");
+        for (int i = 0; i < 10; i++) {
+          region.put("key" + (i + 200), "value" + (i + 200));
+        }
+      }
+    });
+  }
+
+  void verifyShutDown() {
+    final VM vm0 = Host.getHost(0).getVM(0);
+    final VM vm1 = Host.getHost(0).getVM(1);
+
+    @SuppressWarnings("serial") final SerializableCallable connectedChecker = new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        boolean cacheExists = true;
+        try {
+          Cache cacheInstance = CacheFactory.getAnyInstance();
+          cacheExists = cacheInstance.getDistributedSystem().isConnected();
+        } catch (CacheClosedException e) {
+          cacheExists = false;
+        }
+        return cacheExists;
+      }
+    };
+
+    WaitCriterion waitCriterion = new WaitCriterion() {
+      @Override
+      public boolean done() {
+        return Boolean.FALSE.equals(vm0.invoke(connectedChecker)) && Boolean.FALSE.equals(vm1.invoke(connectedChecker));
+      }
+
+      @Override
+      public String description() {
+        return "Wait for gfsh to get disconnected from Manager.";
+      }
+    };
+    waitForCriterion(waitCriterion, 5000, 200, true);
+
+    assertTrue(Boolean.FALSE.equals(vm1.invoke(connectedChecker)));
+    assertTrue(Boolean.FALSE.equals(vm0.invoke(connectedChecker)));
+  }
+
+  public void testChangeLogLevelForMembers() {
+    final VM vm0 = Host.getHost(0).getVM(0);
+    final VM vm1 = Host.getHost(0).getVM(1);
+
+    setupForChangeLogLelvel();
+
+    String serverName1 = (String) vm0.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        cachedLogLevel = System.getProperty("gemfire.log-level");
+        return GemFireCacheImpl.getInstance().getDistributedSystem().getDistributedMember().getId();
+      }
+    });
+
+    String serverName2 = (String) vm1.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        cachedLogLevel = System.getProperty("gemfire.log-level");
+        return GemFireCacheImpl.getInstance().getDistributedSystem().getDistributedMember().getId();
+      }
+    });
+
+    String commandString = CliStrings.CHANGE_LOGLEVEL + " --" + CliStrings.CHANGE_LOGLEVEL__LOGLEVEL + "=finer" + " --" + CliStrings.CHANGE_LOGLEVEL__MEMBER + "=" + serverName1 + "," + serverName2;
+
+    CommandResult commandResult = executeCommand(commandString);
+    getLogWriter().info("testChangeLogLevel commandResult=" + commandResult);
+    assertTrue(Status.OK.equals(commandResult.getStatus()));
+    CompositeResultData resultData = (CompositeResultData) commandResult.getResultData();
+    SectionResultData section = resultData.retrieveSection("section");
+    assertNotNull(section);
+    TabularResultData tableRsultData = section.retrieveTable("ChangeLogLevel");
+    assertNotNull(tableRsultData);
+
+    List<String> columns = tableRsultData.retrieveAllValues(CliStrings.CHANGE_LOGLEVEL__COLUMN_MEMBER);
+    List<String> status = tableRsultData.retrieveAllValues(CliStrings.CHANGE_LOGLEVEL__COLUMN_STATUS);
+
+    assertEquals(columns.size(), 2);
+    assertEquals(status.size(), 2);
+
+    assertTrue(columns.contains(serverName1));
+    assertTrue(columns.contains(serverName2));
+    assertTrue(status.contains("true"));
+  }
+
+  public void testChangeLogLevelForGrps() {
+    Properties localProps = new Properties();
+    localProps.setProperty(DistributionConfig.NAME_NAME, "Manager");
+    localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group0");
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final VM vm2 = Host.getHost(0).getVM(2);
+    final String grp1 = "Group1";
+    final String grp2 = "Group2";
+
+    createDefaultSetup(localProps);
+
+    String vm1id = (String) vm1.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, grp1);
+        getSystem(localProps);
+        Cache cache = getCache();
+        return cache.getDistributedSystem().getDistributedMember().getId();
+      }
+    });
+
+    String vm2id = (String) vm2.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, grp2);
+        getSystem(localProps);
+        Cache cache = getCache();
+        return cache.getDistributedSystem().getDistributedMember().getId();
+      }
+    });
+
+    String commandString = CliStrings.CHANGE_LOGLEVEL + " --" + CliStrings.CHANGE_LOGLEVEL__LOGLEVEL + "=finer" + " --" + CliStrings.CHANGE_LOGLEVEL__GROUPS + "=" + grp1 + "," + grp2;
+
+    CommandResult commandResult = executeCommand(commandString);
+    getLogWriter().info("testChangeLogLevelForGrps commandResult=" + commandResult);
+
+    assertTrue(Status.OK.equals(commandResult.getStatus()));
+
+    CompositeResultData resultData = (CompositeResultData) commandResult.getResultData();
+    SectionResultData section = resultData.retrieveSection("section");
+    assertNotNull(section);
+    TabularResultData tableRsultData = section.retrieveTable("ChangeLogLevel");
+    assertNotNull(tableRsultData);
+
+    List<String> columns = tableRsultData.retrieveAllValues(CliStrings.CHANGE_LOGLEVEL__COLUMN_MEMBER);
+    List<String> status = tableRsultData.retrieveAllValues(CliStrings.CHANGE_LOGLEVEL__COLUMN_STATUS);
+
+    assertEquals(columns.size(), 2);
+    assertEquals(status.size(), 2);
+
+    assertTrue(columns.contains(vm1id));
+    assertTrue(columns.contains(vm2id));
+    assertTrue(status.contains("true"));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart1DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart1DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart1DUnitTest.java
new file mode 100644
index 0000000..6afa7ee
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart1DUnitTest.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.internal.FileUtil;
+import com.gemstone.gemfire.internal.logging.LogWriterImpl;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import dunit.Host;
+import dunit.SerializableRunnable;
+import dunit.VM;
+
+import java.io.File;
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+
+/**
+ * Dunit class for testing gemfire function commands : export logs
+ *
+ * @author apande
+ */
+
+public class MiscellaneousCommandsExportLogsPart1DUnitTest extends CliCommandTestBase {
+
+  private static final long serialVersionUID = 1L;
+
+  public MiscellaneousCommandsExportLogsPart1DUnitTest(String name) {
+    super(name);
+  }
+
+  public static String getMemberId() {
+    Cache cache = new GemfireDataCommandsDUnitTest("test").getCache();
+    return cache.getDistributedSystem().getDistributedMember().getId();
+  }
+
+  void setupForExportLogs() {
+    final VM vm1 = Host.getHost(0).getVM(1);
+    createDefaultSetup(null);
+
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        // no need to close cache as it will be closed as part of teardown2
+        Cache cache = getCache();
+
+        RegionFactory<Integer, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+        Region region = dataRegionFactory.create("testRegion");
+        for (int i = 0; i < 5; i++) {
+          region.put("key" + (i + 200), "value" + (i + 200));
+        }
+      }
+    });
+  }
+
+  String getCurrentTimeString() {
+    SimpleDateFormat sf = new SimpleDateFormat("yyyy_MM_dd_HH_mm_ss_SSS_z");
+    Date startDate = new Date(System.currentTimeMillis());
+    String formattedStartDate = sf.format(startDate);
+    return ("_" + formattedStartDate);
+  }
+
+  public void testExportLogs() throws IOException {
+    Date startDate = new Date(System.currentTimeMillis() - 2 * 60 * 1000);
+    SimpleDateFormat sf = new SimpleDateFormat("yyyy/MM/dd");
+    String start = sf.format(startDate);
+
+    Date enddate = new Date(System.currentTimeMillis() + 2 * 60 * 60 * 1000);
+    String end = sf.format(enddate);
+    String dir = getCurrentTimeString();
+
+    setupForExportLogs();
+    String logLevel = LogWriterImpl.levelToString(LogWriterImpl.INFO_LEVEL);
+
+    MiscellaneousCommands misc = new MiscellaneousCommands();
+    getCache();
+
+    Result cmdResult = misc.exportLogsPreprocessing("./testExportLogs" + dir, null, null, logLevel, false, false, start,
+        end, 1);
+
+    getLogWriter().info("testExportLogs command result =" + cmdResult);
+
+    if (cmdResult != null) {
+      String cmdStringRsult = commandResultToString((CommandResult) cmdResult);
+      getLogWriter().info("testExportLogs cmdStringRsult=" + cmdStringRsult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    } else {
+      fail("testExportLogs failed as did not get CommandResult");
+    }
+    FileUtil.delete(new File("./testExportLogs" + dir));
+  }
+
+  public void testExportLogsForMerge() throws IOException {
+    setupForExportLogs();
+    Date startDate = new Date(System.currentTimeMillis() - 2 * 60 * 1000);
+    SimpleDateFormat sf = new SimpleDateFormat("yyyy/MM/dd");
+    String start = sf.format(startDate);
+
+    Date enddate = new Date(System.currentTimeMillis() + 2 * 60 * 60 * 1000);
+    String end = sf.format(enddate);
+    String dir = getCurrentTimeString();
+
+    String logLevel = LogWriterImpl.levelToString(LogWriterImpl.INFO_LEVEL);
+
+    MiscellaneousCommands misc = new MiscellaneousCommands();
+    getCache();
+
+    Result cmdResult = misc.exportLogsPreprocessing("./testExportLogsForMerge" + dir, null, null, logLevel, false, true,
+        start, end, 1);
+    getLogWriter().info("testExportLogsForMerge command=" + cmdResult);
+
+    if (cmdResult != null) {
+      String cmdStringRsult = commandResultToString((CommandResult) cmdResult);
+      getLogWriter().info("testExportLogsForMerge cmdStringRsult=" + cmdStringRsult);
+
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    } else {
+      fail("testExportLogsForMerge failed as did not get CommandResult");
+    }
+    FileUtil.delete(new File("./testExportLogsForMerge" + dir));
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart2DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart2DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart2DUnitTest.java
new file mode 100644
index 0000000..6a1d86c
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart2DUnitTest.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.internal.FileUtil;
+import com.gemstone.gemfire.internal.logging.LogWriterImpl;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import dunit.Host;
+import dunit.SerializableRunnable;
+import dunit.VM;
+
+import java.io.File;
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+
+/**
+ * Dunit class for testing gemfire function commands : export logs
+ *
+ * @author apande
+ */
+
+public class MiscellaneousCommandsExportLogsPart2DUnitTest extends CliCommandTestBase {
+
+  private static final long serialVersionUID = 1L;
+
+  public MiscellaneousCommandsExportLogsPart2DUnitTest(String name) {
+    super(name);
+  }
+
+  public static String getMemberId() {
+    Cache cache = new GemfireDataCommandsDUnitTest("test").getCache();
+    return cache.getDistributedSystem().getDistributedMember().getId();
+  }
+
+  void setupForExportLogs() {
+    final VM vm1 = Host.getHost(0).getVM(1);
+    createDefaultSetup(null);
+
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        // no need to close cache as it will be closed as part of teardown2
+        Cache cache = getCache();
+
+        RegionFactory<Integer, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+        Region region = dataRegionFactory.create("testRegion");
+        for (int i = 0; i < 5; i++) {
+          region.put("key" + (i + 200), "value" + (i + 200));
+        }
+      }
+    });
+  }
+
+  String getCurrentTimeString() {
+    SimpleDateFormat sf = new SimpleDateFormat("yyyy_MM_dd_HH_mm_ss_SSS_z");
+    Date startDate = new Date(System.currentTimeMillis());
+    String formattedStartDate = sf.format(startDate);
+    return ("_" + formattedStartDate);
+  }
+
+  public void testExportLogsForLogLevel() throws IOException {
+    setupForExportLogs();
+
+    Date startDate = new Date(System.currentTimeMillis() - 60 * 1000);
+    SimpleDateFormat sf = new SimpleDateFormat("yyyy/MM/dd");
+    String start = sf.format(startDate);
+
+    Date enddate = new Date(System.currentTimeMillis() + 60 * 1000);
+    String end = sf.format(enddate);
+    String dir = getCurrentTimeString();
+
+    String logLevel = LogWriterImpl.levelToString(LogWriterImpl.CONFIG_LEVEL);
+
+    MiscellaneousCommands misc = new MiscellaneousCommands();
+    getCache();
+
+    Result cmdResult = misc.exportLogsPreprocessing("./testExportLogsForLogLevel" + dir, null, null, logLevel, false,
+        false, start, end, 1);
+
+    getLogWriter().info("testExportLogsForLogLevel command=" + cmdResult);
+
+    if (cmdResult != null) {
+      String cmdStringRsult = commandResultToString((CommandResult) cmdResult);
+      getLogWriter().info("testExportLogsForLogLevel cmdStringRsult=" + cmdStringRsult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    } else {
+      fail("testExportLogsForLogLevel failed as did not get CommandResult");
+    }
+    FileUtil.delete(new File("testExportLogsForLogLevel" + dir));
+  }
+
+
+  public void testExportLogsForLogLevelWithUPTOLOGLEVEL() throws IOException {
+    setupForExportLogs();
+
+    Date startDate = new Date(System.currentTimeMillis() - 2 * 60 * 1000);
+    SimpleDateFormat sf = new SimpleDateFormat("yyyy/MM/dd");
+    String start = sf.format(startDate);
+
+    Date enddate = new Date(System.currentTimeMillis() + 2 * 60 * 60 * 1000);
+    String end = sf.format(enddate);
+    String dir = getCurrentTimeString();
+
+    String logLevel = LogWriterImpl.levelToString(LogWriterImpl.SEVERE_LEVEL);
+
+    MiscellaneousCommands misc = new MiscellaneousCommands();
+    getCache();
+
+    Result cmdResult = misc.exportLogsPreprocessing("./testExportLogsForLogLevelWithUPTOLOGLEVEL" + dir, null, null,
+        logLevel, true, false, start, end, 1);
+
+    getLogWriter().info("testExportLogsForLogLevelWithUPTOLOGLEVEL command=" + cmdResult);
+
+    if (cmdResult != null) {
+      String cmdStringRsult = commandResultToString((CommandResult) cmdResult);
+      getLogWriter().info("testExportLogsForLogLevelWithUPTOLOGLEVEL cmdStringRsult=" + cmdStringRsult);
+
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    } else {
+      fail("testExportLogsForLogLevelWithUPTOLOGLEVEL failed as did not get CommandResult");
+    }
+    FileUtil.delete(new File("testExportLogsForLogLevelWithUPTOLOGLEVEL" + dir));
+  }
+
+  @Override
+  public void tearDown2() throws Exception {
+    super.tearDown2();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart3DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart3DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart3DUnitTest.java
new file mode 100644
index 0000000..1c2933d
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart3DUnitTest.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.internal.FileUtil;
+import com.gemstone.gemfire.internal.logging.LogWriterImpl;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import dunit.Host;
+import dunit.SerializableRunnable;
+import dunit.VM;
+
+import java.io.File;
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.Properties;
+
+/**
+ * Dunit class for testing gemfire function commands : export logs
+ *
+ * @author apande
+ */
+
+public class MiscellaneousCommandsExportLogsPart3DUnitTest extends CliCommandTestBase {
+
+  private static final long serialVersionUID = 1L;
+
+  public MiscellaneousCommandsExportLogsPart3DUnitTest(String name) {
+    super(name);
+  }
+
+  public static String getMemberId() {
+    Cache cache = new GemfireDataCommandsDUnitTest("test").getCache();
+    return cache.getDistributedSystem().getDistributedMember().getId();
+  }
+
+  void setupForExportLogs() {
+    final VM vm1 = Host.getHost(0).getVM(1);
+    createDefaultSetup(null);
+
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        // no need to close cache as it will be closed as part of teardown2
+        Cache cache = getCache();
+
+        RegionFactory<Integer, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+        Region region = dataRegionFactory.create("testRegion");
+        for (int i = 0; i < 5; i++) {
+          region.put("key" + (i + 200), "value" + (i + 200));
+        }
+      }
+    });
+  }
+
+  String getCurrentTimeString() {
+    SimpleDateFormat sf = new SimpleDateFormat("yyyy_MM_dd_HH_mm_ss_SSS_z");
+    Date startDate = new Date(System.currentTimeMillis());
+    String formattedStartDate = sf.format(startDate);
+    return ("_" + formattedStartDate);
+  }
+
+  public void testExportLogsForGroup() throws IOException {
+    Properties localProps = new Properties();
+    localProps.setProperty(DistributionConfig.NAME_NAME, "Manager");
+    localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group1");
+    createDefaultSetup(localProps);
+    String dir = getCurrentTimeString();
+
+    Date startDate = new Date(System.currentTimeMillis() - 2 * 60 * 1000);
+    SimpleDateFormat sf = new SimpleDateFormat("yyyy/MM/dd");
+    String start = sf.format(startDate);
+
+    Date enddate = new Date(System.currentTimeMillis() + 2 * 60 * 60 * 1000);
+    String end = sf.format(enddate);
+
+    String logLevel = LogWriterImpl.levelToString(LogWriterImpl.INFO_LEVEL);
+
+    MiscellaneousCommands misc = new MiscellaneousCommands();
+    getCache();
+    String[] groups = new String[1];
+    groups[0] = "Group1";
+
+    Result cmdResult = misc.exportLogsPreprocessing("./testExportLogsForGroup" + dir, groups, null, logLevel, false,
+        false, start, end, 1);
+
+    getLogWriter().info("testExportLogsForGroup command result =" + cmdResult);
+    if (cmdResult != null) {
+      String cmdStringRsult = commandResultToString((CommandResult) cmdResult);
+      getLogWriter().info("testExportLogsForGroup cmdStringRsult=" + cmdStringRsult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    } else {
+      fail("testExportLogsForGroup failed as did not get CommandResult");
+    }
+    FileUtil.delete(new File("testExportLogsForGroup" + dir));
+  }
+
+  public void testExportLogsForMember() throws IOException {
+    createDefaultSetup(null);
+
+    Date startDate = new Date(System.currentTimeMillis() - 2 * 60 * 1000);
+    SimpleDateFormat sf = new SimpleDateFormat("yyyy/MM/dd");
+    String start = sf.format(startDate);
+
+    Date enddate = new Date(System.currentTimeMillis() + 2 * 60 * 60 * 1000);
+    String end = sf.format(enddate);
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final String vm1MemberId = (String) vm1.invoke(MiscellaneousCommandsDUnitTest.class, "getMemberId");
+    String dir = getCurrentTimeString();
+
+    String logLevel = LogWriterImpl.levelToString(LogWriterImpl.INFO_LEVEL);
+
+    MiscellaneousCommands misc = new MiscellaneousCommands();
+    getCache();
+
+    Result cmdResult = misc.exportLogsPreprocessing("./testExportLogsForMember" + dir, null, vm1MemberId, logLevel,
+        false, false, start, end, 1);
+
+    getLogWriter().info("testExportLogsForMember command result =" + cmdResult);
+
+    if (cmdResult != null) {
+      String cmdStringRsult = commandResultToString((CommandResult) cmdResult);
+      getLogWriter().info("testExportLogsForMember cmdStringRsult=" + cmdStringRsult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    } else {
+      fail("testExportLogsForMember failed as did not get CommandResult");
+    }
+    FileUtil.delete(new File("testExportLogsForMember" + dir));
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart4DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart4DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart4DUnitTest.java
new file mode 100644
index 0000000..da12c6e
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart4DUnitTest.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.internal.FileUtil;
+import com.gemstone.gemfire.internal.logging.LogWriterImpl;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import dunit.Host;
+import dunit.SerializableRunnable;
+import dunit.VM;
+
+import java.io.File;
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+
+/**
+ * Dunit class for testing gemfire function commands : export logs
+ *
+ * @author apande
+ */
+public class MiscellaneousCommandsExportLogsPart4DUnitTest extends CliCommandTestBase {
+
+  private static final long serialVersionUID = 1L;
+
+  public MiscellaneousCommandsExportLogsPart4DUnitTest(String name) {
+    super(name);
+  }
+
+  public static String getMemberId() {
+    Cache cache = new GemfireDataCommandsDUnitTest("test").getCache();
+    return cache.getDistributedSystem().getDistributedMember().getId();
+  }
+
+  @Override
+  public void tearDown2() throws Exception {
+    super.tearDown2();
+  }
+
+  void setupForExportLogs() {
+    final VM vm1 = Host.getHost(0).getVM(1);
+    createDefaultSetup(null);
+
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        // no need to close cache as it will be closed as part of teardown2
+        Cache cache = getCache();
+
+        RegionFactory<Integer, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+        Region region = dataRegionFactory.create("testRegion");
+        for (int i = 0; i < 5; i++) {
+          region.put("key" + (i + 200), "value" + (i + 200));
+        }
+      }
+    });
+  }
+
+  String getCurrentTimeString() {
+    SimpleDateFormat sf = new SimpleDateFormat("yyyy_MM_dd_HH_mm_ss_SSS_z");
+    Date startDate = new Date(System.currentTimeMillis());
+    String formattedStartDate = sf.format(startDate);
+    return ("_" + formattedStartDate);
+  }
+
+  public void testExportLogsForTimeRange1() throws IOException {
+    setupForExportLogs();
+    Date startDate = new Date(System.currentTimeMillis() - 1 * 60 * 1000);
+    SimpleDateFormat sf = new SimpleDateFormat("yyyy/MM/dd");
+    String start = sf.format(startDate);
+
+    Date enddate = new Date(System.currentTimeMillis() + 1 * 60 * 60 * 1000);
+    String end = sf.format(enddate);
+    String dir = getCurrentTimeString();
+
+    String logLevel = LogWriterImpl.levelToString(LogWriterImpl.INFO_LEVEL);
+
+    MiscellaneousCommands misc = new MiscellaneousCommands();
+    getCache();
+
+    Result cmdResult = misc.exportLogsPreprocessing("./testExportLogsForTimeRange1" + dir, null, null, logLevel, false,
+        false, start, end, 1);
+
+    getLogWriter().info("testExportLogsForTimeRange1 command result =" + cmdResult);
+
+    if (cmdResult != null) {
+      String cmdStringRsult = commandResultToString((CommandResult) cmdResult);
+      getLogWriter().info("testExportLogsForTimeRange1 cmdStringRsult=" + cmdStringRsult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    } else {
+      fail("testExportLogsForTimeRange1 failed as did not get CommandResult");
+    }
+    FileUtil.delete(new File("testExportLogsForTimeRange1" + dir));
+  }
+
+  public void testExportLogsForTimeRangeForOnlyStartTime() throws IOException {
+    setupForExportLogs();
+    Date date = new Date();
+    date.setTime(System.currentTimeMillis() - 30 * 1000);
+    SimpleDateFormat sf = new SimpleDateFormat("yyyy/MM/dd/HH:mm");
+    String s = sf.format(date);
+    String dir = getCurrentTimeString();
+
+    String logLevel = LogWriterImpl.levelToString(LogWriterImpl.INFO_LEVEL);
+
+    MiscellaneousCommands misc = new MiscellaneousCommands();
+    getCache();
+
+    Result cmdResult = misc.exportLogsPreprocessing("./testExportLogsForTimeRangeForOnlyStartTime" + dir, null, null,
+        logLevel, false, false, s, null, 1);
+
+    getLogWriter().info("testExportLogsForTimeRangeForOnlyStartTime command result =" + cmdResult);
+
+    if (cmdResult != null) {
+      String cmdStringRsult = commandResultToString((CommandResult) cmdResult);
+      getLogWriter().info("testExportLogsForTimeRangeForOnlyStartTime cmdStringRsult=" + cmdStringRsult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    } else {
+      fail("testExportLogsForTimeRangeForOnlyStartTime failed as did not get CommandResult");
+    }
+    FileUtil.delete(new File("testExportLogsForTimeRangeForOnlyStartTime" + dir));
+  }
+}
\ No newline at end of file


[29/50] [abbrv] incubator-geode git commit: GEODE-647: Ignoring failing test for now

Posted by kl...@apache.org.
GEODE-647: Ignoring failing test for now


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/6e32ffe9
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/6e32ffe9
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/6e32ffe9

Branch: refs/heads/feature/GEODE-291
Commit: 6e32ffe9cad8cf251dda2807d6fd23ecffb0b1cc
Parents: 1f193af
Author: Jens Deppe <jd...@pivotal.io>
Authored: Wed Dec 9 12:37:59 2015 -0800
Committer: Jens Deppe <jd...@pivotal.io>
Committed: Wed Dec 9 12:37:59 2015 -0800

----------------------------------------------------------------------
 .../gemfire/management/internal/cli/GfshParserJUnitTest.java       | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/6e32ffe9/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/GfshParserJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/GfshParserJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/GfshParserJUnitTest.java
index 2cb1148..68fe251 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/GfshParserJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/GfshParserJUnitTest.java
@@ -25,6 +25,7 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.junit.After;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.springframework.shell.core.CommandMarker;
@@ -777,6 +778,7 @@ public class GfshParserJUnitTest {
    * @throws SecurityException
    */
   @Test
+  @Ignore("GEODE-647")
   public void testParse() throws Exception {
     // get a CommandManager, add sample commands
     CommandManager commandManager = CommandManager.getInstance(false);


[48/50] [abbrv] incubator-geode git commit: More JoinLeeve tests

Posted by kl...@apache.org.
More JoinLeeve tests


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/7dfce7cd
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/7dfce7cd
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/7dfce7cd

Branch: refs/heads/feature/GEODE-291
Commit: 7dfce7cd92c7f2d9e89374dd7799eb87dd8711b3
Parents: 81c472f
Author: Hitesh Khamesra <hi...@yahoo.com>
Authored: Wed Dec 9 16:26:43 2015 -0800
Committer: Hitesh Khamesra <hi...@yahoo.com>
Committed: Thu Dec 10 15:31:14 2015 -0800

----------------------------------------------------------------------
 .../gms/locator/FindCoordinatorRequest.java     | 33 ++++++++++
 .../membership/gms/membership/GMSJoinLeave.java | 31 ++++++++--
 .../gms/membership/GMSJoinLeaveJUnitTest.java   | 63 ++++++++++++++++++++
 3 files changed, 123 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7dfce7cd/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/locator/FindCoordinatorRequest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/locator/FindCoordinatorRequest.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/locator/FindCoordinatorRequest.java
index f1ec2a0..5c0a1d1 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/locator/FindCoordinatorRequest.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/locator/FindCoordinatorRequest.java
@@ -112,4 +112,37 @@ public class FindCoordinatorRequest extends HighPriorityDistributionMessage
     throw new IllegalStateException("this message should not be executed");
   }
 
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + lastViewId;
+    result = prime * result + ((memberID == null) ? 0 : memberID.hashCode());
+    result = prime * result + ((rejectedCoordinators == null) ? 0 : rejectedCoordinators.hashCode());
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (obj == null)
+      return false;
+    if (getClass() != obj.getClass())
+      return false;
+    FindCoordinatorRequest other = (FindCoordinatorRequest) obj;
+    if (lastViewId != other.lastViewId)
+      return false;
+    if (memberID == null) {
+      if (other.memberID != null)
+        return false;
+    } else if (!memberID.equals(other.memberID))
+      return false;
+    if (rejectedCoordinators == null) {
+      if (other.rejectedCoordinators != null)
+        return false;
+    } else if (!rejectedCoordinators.equals(other.rejectedCoordinators))
+      return false;
+    return true;
+  }  
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7dfce7cd/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeave.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeave.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeave.java
index e1821db..3a3486b 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeave.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeave.java
@@ -313,7 +313,7 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
    * @param coord
    * @return true if the attempt succeeded, false if it timed out
    */
-  private boolean attemptToJoin() {
+   boolean attemptToJoin() {
     SearchState state = searchState;
 
     // send a join request to the coordinator and wait for a response
@@ -826,6 +826,15 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
     }
   }
 
+  private TcpClientWrapper tcpClientWrapper = new TcpClientWrapper();
+  
+  /***
+   * testing purpose
+   * @param tcpClientWrapper
+   */
+  void setTcpClientWrapper(TcpClientWrapper tcpClientWrapper) {
+    this.tcpClientWrapper = tcpClientWrapper;
+  }
   /**
    * This contacts the locators to find out who the current coordinator is.
    * All locators are contacted. If they don't agree then we choose the oldest
@@ -861,9 +870,7 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
     do {
       for (InetSocketAddress addr : locators) {
         try {
-          Object o = TcpClient.requestToServer(
-              addr.getAddress(), addr.getPort(), request, connectTimeout, 
-              true);
+          Object o = tcpClientWrapper.sendCoordinatorFindRequest(addr, request, connectTimeout);
           FindCoordinatorResponse response = (o instanceof FindCoordinatorResponse) ? (FindCoordinatorResponse)o : null;
           if (response != null) {
             state.locatorsContacted++;
@@ -937,6 +944,15 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
     }
     return true;
   }
+  
+  protected class TcpClientWrapper {
+    protected Object sendCoordinatorFindRequest(InetSocketAddress addr, FindCoordinatorRequest request, int connectTimeout) 
+        throws ClassNotFoundException, IOException{
+      return TcpClient.requestToServer(
+          addr.getAddress(), addr.getPort(), request, connectTimeout, 
+          true);
+    }
+  }    
 
   boolean findCoordinatorFromView() {
     ArrayList<FindCoordinatorResponse> result;
@@ -1051,6 +1067,13 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
   JoinResponseMessage[] getJoinResponseMessage() {
     return joinResponse;
   }
+  /***
+   * for testing purpose
+   * @param jrm
+   */
+  void setJoinResponseMessage(JoinResponseMessage jrm) {
+    joinResponse[0] = jrm;
+  }
 
   private void processFindCoordinatorRequest(FindCoordinatorRequest req) {
     FindCoordinatorResponse resp;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7dfce7cd/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeaveJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeaveJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeaveJUnitTest.java
index 01c0695..05b0996 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeaveJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeaveJUnitTest.java
@@ -28,6 +28,7 @@ import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -59,9 +60,11 @@ import com.gemstone.gemfire.distributed.internal.membership.gms.interfaces.Authe
 import com.gemstone.gemfire.distributed.internal.membership.gms.interfaces.HealthMonitor;
 import com.gemstone.gemfire.distributed.internal.membership.gms.interfaces.Manager;
 import com.gemstone.gemfire.distributed.internal.membership.gms.interfaces.Messenger;
+import com.gemstone.gemfire.distributed.internal.membership.gms.locator.FindCoordinatorRequest;
 import com.gemstone.gemfire.distributed.internal.membership.gms.locator.FindCoordinatorResponse;
 import com.gemstone.gemfire.distributed.internal.membership.gms.membership.GMSJoinLeave.SearchState;
 import com.gemstone.gemfire.distributed.internal.membership.gms.membership.GMSJoinLeave.ViewCreator;
+import com.gemstone.gemfire.distributed.internal.membership.gms.membership.GMSJoinLeave.TcpClientWrapper;
 import com.gemstone.gemfire.distributed.internal.membership.gms.membership.GMSJoinLeave.ViewReplyProcessor;
 import com.gemstone.gemfire.distributed.internal.membership.gms.messages.InstallViewMessage;
 import com.gemstone.gemfire.distributed.internal.membership.gms.messages.JoinRequestMessage;
@@ -99,6 +102,7 @@ public class GMSJoinLeaveJUnitTest {
     when(mockDistConfig.getEnableNetworkPartitionDetection()).thenReturn(enableNetworkPartition);
     when(mockDistConfig.getLocators()).thenReturn("localhost[8888]");
     mockConfig = mock(ServiceConfig.class);
+    when(mockDistConfig.getStartLocator()).thenReturn("localhost[12345]");
     when(mockConfig.getDistributionConfig()).thenReturn(mockDistConfig);
     when(mockDistConfig.getLocators()).thenReturn("localhost[12345]");
     when(mockDistConfig.getMcastPort()).thenReturn(0);
@@ -1028,5 +1032,64 @@ public class GMSJoinLeaveJUnitTest {
     assertTrue(newView.contains(mockMembers[1]));
     assertTrue(newView.getViewId() > preparedView.getViewId());
   }
+
+  private NetView createView() {
+    List<InternalDistributedMember> mbrs = new LinkedList<>();
+    Set<InternalDistributedMember> shutdowns = new HashSet<>();
+    Set<InternalDistributedMember> crashes = new HashSet<>();
+    mbrs.add(mockMembers[0]);
+    mbrs.add(mockMembers[1]);
+    mbrs.add(mockMembers[2]);
+    mbrs.add(gmsJoinLeaveMemberId);
+    
+    //prepare the view
+    NetView netView = new NetView(mockMembers[0], 1, mbrs, shutdowns, crashes);
+    return netView;
+  }
+  
+  @Test
+  public void testCoordinatorFindRequestSuccess()  throws Exception {
+    try{
+      initMocks(false);
+      HashSet<InternalDistributedMember> registrants = new HashSet<>();
+      registrants.add(mockMembers[0]);
+      FindCoordinatorResponse fcr = new FindCoordinatorResponse(mockMembers[0], mockMembers[0], false, null, registrants, false, true);
+      NetView view = createView();
+      JoinResponseMessage jrm = new JoinResponseMessage(mockMembers[0], view);
+      gmsJoinLeave.setJoinResponseMessage(jrm);
+      
+      TcpClientWrapper tcpClientWrapper = mock(TcpClientWrapper.class);
+      gmsJoinLeave.setTcpClientWrapper(tcpClientWrapper);
+      FindCoordinatorRequest fcreq = new FindCoordinatorRequest(gmsJoinLeaveMemberId, new HashSet<>(), -1);
+      int connectTimeout = (int)services.getConfig().getMemberTimeout() * 2;
+      when(tcpClientWrapper.sendCoordinatorFindRequest(new InetSocketAddress("localhost", 12345), fcreq, connectTimeout)).thenReturn(fcr);
+      assertTrue("Should be able to join ", gmsJoinLeave.join());
+    }finally{
+      
+    }   
+  }
+  
+  @Test
+  public void testCoordinatorFindRequestFailure()  throws Exception {
+    try{
+      initMocks(false);
+      HashSet<InternalDistributedMember> registrants = new HashSet<>();
+      registrants.add(mockMembers[0]);
+      FindCoordinatorResponse fcr = new FindCoordinatorResponse(mockMembers[0], mockMembers[0], false, null, registrants, false, true);
+      NetView view = createView();
+      JoinResponseMessage jrm = new JoinResponseMessage(mockMembers[0], view);
+      gmsJoinLeave.setJoinResponseMessage(jrm);
+      
+      TcpClientWrapper tcpClientWrapper = mock(TcpClientWrapper.class);
+      gmsJoinLeave.setTcpClientWrapper(tcpClientWrapper);
+      FindCoordinatorRequest fcreq = new FindCoordinatorRequest(gmsJoinLeaveMemberId, new HashSet<>(), -1);
+      int connectTimeout = (int)services.getConfig().getMemberTimeout() * 2;
+      //passing wrong port here, so ot will fail
+      when(tcpClientWrapper.sendCoordinatorFindRequest(new InetSocketAddress("localhost", 12346), fcreq, connectTimeout)).thenReturn(fcr);
+      assertFalse("Should not be able to join ", gmsJoinLeave.join());
+    }finally{
+      
+    }   
+  }
 }
 


[21/50] [abbrv] incubator-geode git commit: Merge branch 'feature/GEODE-390' into develop

Posted by kl...@apache.org.
Merge branch 'feature/GEODE-390' into develop

Closes #37


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/386d1ac8
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/386d1ac8
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/386d1ac8

Branch: refs/heads/feature/GEODE-291
Commit: 386d1ac8c9ed486d7c9415ac3d14bcc78dc15c97
Parents: 80b59bf 4a07f45
Author: Jens Deppe <jd...@pivotal.io>
Authored: Tue Dec 8 14:22:29 2015 -0800
Committer: Jens Deppe <jd...@pivotal.io>
Committed: Tue Dec 8 14:22:29 2015 -0800

----------------------------------------------------------------------
 .../cache/partition/PartitionManager.java       | 377 ----------------
 .../partition/PartitionManagerDUnitTest.java    | 443 -------------------
 .../fixed/FixedPartitioningTestBase.java        |  83 ----
 ...ngWithColocationAndPersistenceDUnitTest.java | 106 -----
 4 files changed, 1009 deletions(-)
----------------------------------------------------------------------



[16/50] [abbrv] incubator-geode git commit: GEODE-563: Moving gfsh tests from closed

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/configuration/SharedConfigurationEndToEndDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/configuration/SharedConfigurationEndToEndDUnitTest.java b/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/configuration/SharedConfigurationEndToEndDUnitTest.java
new file mode 100644
index 0000000..383012e
--- /dev/null
+++ b/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/configuration/SharedConfigurationEndToEndDUnitTest.java
@@ -0,0 +1,434 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.management.internal.configuration;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache.wan.GatewaySender.OrderPolicy;
+import com.gemstone.gemfire.distributed.Locator;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.distributed.internal.InternalLocator;
+import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.internal.ClassBuilder;
+import com.gemstone.gemfire.internal.FileUtil;
+import com.gemstone.gemfire.internal.JarDeployer;
+import com.gemstone.gemfire.internal.admin.remote.ShutdownAllRequest;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.lang.StringUtils;
+import com.gemstone.gemfire.management.cli.Result.Status;
+import com.gemstone.gemfire.management.internal.cli.CliUtil;
+import com.gemstone.gemfire.management.internal.cli.HeadlessGfsh;
+import com.gemstone.gemfire.management.internal.cli.commands.CliCommandTestBase;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+import dunit.DistributedTestCase;
+import dunit.Host;
+import dunit.SerializableCallable;
+import dunit.VM;
+import org.apache.commons.io.FileUtils;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.HashSet;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+
+public class SharedConfigurationEndToEndDUnitTest extends CliCommandTestBase {
+  private static final int TIMEOUT = 10000;
+  private static final int INTERVAL = 500;
+  private static final String REGION1 = "R1";
+  private static final String REGION2 = "R2";
+  private static final String INDEX1 = "ID1";
+  private transient ClassBuilder classBuilder = new ClassBuilder();
+  public static Set<String> serverNames = new HashSet<String>();
+  public static Set<String> jarFileNames = new HashSet<String>();
+
+  public SharedConfigurationEndToEndDUnitTest(String name) {
+    super(name);
+    // TODO Auto-generated constructor stub
+  }
+
+  private static final long serialVersionUID = -2276690105585944041L;
+
+  public Set<String> startServers(HeadlessGfsh gfsh, String locatorString, int numServers, String serverNamePrefix, int startNum) throws ClassNotFoundException, IOException {
+    Set<String> serverNames = new HashSet<String>();
+
+    final int[] serverPorts = AvailablePortHelper.getRandomAvailableTCPPorts(numServers);
+    for (int i=0; i<numServers; i++) {
+      int port = serverPorts[i];
+      String serverName = serverNamePrefix+ Integer.toString(i+startNum) + "-" + port;
+      CommandStringBuilder csb = new CommandStringBuilder(CliStrings.START_SERVER);
+      csb.addOption(CliStrings.START_SERVER__NAME, serverName);
+      csb.addOption(CliStrings.START_SERVER__LOCATORS, locatorString);
+      csb.addOption(CliStrings.START_SERVER__SERVER_PORT, Integer.toString(port));
+      CommandResult cmdResult = executeCommand(gfsh, csb.getCommandString());
+      assertEquals(Status.OK, cmdResult.getStatus());
+    }
+    return serverNames;
+  }
+
+  public void testStartServerAndExecuteCommands() throws InterruptedException, ClassNotFoundException, IOException, ExecutionException {
+    addExpectedException("EntryDestroyedException");
+    Object[] result = setup();
+    final int locatorPort = (Integer) result[0];
+    final String jmxHost = (String) result[1];
+    final int jmxPort = (Integer) result[2];
+    final int httpPort = (Integer) result[3];
+    final String locatorString = "localHost[" + locatorPort + "]";
+
+    final HeadlessGfsh gfsh = new HeadlessGfsh("gfsh2", 300);
+    assertNotNull(gfsh);
+    shellConnect(jmxHost, jmxPort, httpPort, gfsh);
+
+    serverNames.addAll(startServers(gfsh, locatorString, 2, "Server", 1));
+    doCreateCommands();
+    serverNames.addAll(startServers(gfsh, locatorString, 1, "NewMember", 4));
+    verifyRegionCreateOnAllMembers(REGION1);
+    verifyRegionCreateOnAllMembers(REGION2);
+    verifyIndexCreationOnAllMembers(INDEX1);
+    verifyAsyncEventQueueCreation();
+   
+
+
+    //shutdown everything
+    getLogWriter().info("Shutting down all the members");
+    shutdownAll();
+    deleteSavedJarFiles();
+  }
+
+
+  private void doCreateCommands() {
+    createRegion(REGION1, RegionShortcut.REPLICATE, null);
+    createRegion(REGION2, RegionShortcut.PARTITION, null);
+    createIndex(INDEX1 , "AAPL", REGION1, null);
+    createAndDeployJar("Deploy1.jar");
+    createAsyncEventQueue("q1");
+    final String autoCompact = "true";
+    final String allowForceCompaction = "true";
+    final String compactionThreshold = "50";
+    final String duCritical = "90";
+    final String duWarning = "85";
+    final String maxOplogSize = "1000";
+    final String queueSize = "300";
+    final String timeInterval = "10";
+    final String writeBufferSize="100";
+    final String diskStoreName = "ds1";
+    final String diskDirs = "ds1";
+    
+    createDiskStore(diskStoreName, diskDirs, autoCompact, allowForceCompaction, compactionThreshold, duCritical, duWarning, maxOplogSize, queueSize, timeInterval, writeBufferSize);
+  }
+
+
+  protected void executeAndVerifyCommand(String commandString) {
+    CommandResult cmdResult = executeCommand(commandString);
+    getLogWriter().info("Command Result : \n" + commandResultToString(cmdResult));
+    assertEquals(Status.OK, cmdResult.getStatus());
+    assertFalse(cmdResult.failedToPersist());
+  }
+
+  private void createRegion(String regionName, RegionShortcut regionShortCut, String group) {
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.CREATE_REGION);
+    csb.addOption(CliStrings.CREATE_REGION__REGION, regionName);
+    csb.addOption(CliStrings.CREATE_REGION__REGIONSHORTCUT, regionShortCut.name());
+    executeAndVerifyCommand(csb.getCommandString());
+  }
+
+  private void destroyRegion(String regionName) {
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.DESTROY_REGION);
+    csb.addOption(CliStrings.DESTROY_REGION__REGION, regionName);
+    executeAndVerifyCommand(csb.getCommandString());
+  }
+
+  private void stopServer(String serverName) {
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.STOP_SERVER);
+    csb.addOption(CliStrings.STOP_SERVER__MEMBER, serverName);
+    executeAndVerifyCommand(csb.getCommandString());
+  }
+
+  public void createAsyncEventQueue(String queueName) {
+    String queueCommandsJarName = "testEndToEndSC-QueueCommands.jar";
+    final File jarFile = new File(queueCommandsJarName);
+
+    try {
+      ClassBuilder classBuilder = new ClassBuilder();
+      byte[] jarBytes = classBuilder.createJarFromClassContent("com/qcdunit/QueueCommandsDUnitTestListener",
+          "package com.qcdunit;" +
+              "import java.util.List; import java.util.Properties;" +
+              "import com.gemstone.gemfire.internal.cache.xmlcache.Declarable2; import com.gemstone.gemfire.cache.asyncqueue.AsyncEvent;" +
+              "import com.gemstone.gemfire.cache.asyncqueue.AsyncEventListener;" +
+              "public class QueueCommandsDUnitTestListener implements Declarable2, AsyncEventListener {" +
+              "Properties props;" +
+              "public boolean processEvents(List<AsyncEvent> events) { return true; }" +
+              "public void close() {}" +
+              "public void init(final Properties props) {this.props = props;}" +
+          "public Properties getConfig() {return this.props;}}");
+
+      FileUtils.writeByteArrayToFile(jarFile, jarBytes);
+      CommandStringBuilder csb = new CommandStringBuilder(CliStrings.DEPLOY);
+      csb.addOption(CliStrings.DEPLOY__JAR, queueCommandsJarName);
+      executeAndVerifyCommand(csb.getCommandString());
+
+      csb = new CommandStringBuilder(CliStrings.CREATE_ASYNC_EVENT_QUEUE);
+      csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__ID, queueName);
+      csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__LISTENER, "com.qcdunit.QueueCommandsDUnitTestListener");
+      csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__BATCH_SIZE, "100");
+      csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__BATCHTIMEINTERVAL, "200");
+      csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__DISPATCHERTHREADS, "4");
+      csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__ENABLEBATCHCONFLATION, "true");
+      csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__DISKSYNCHRONOUS, "true");
+      csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__MAXIMUM_QUEUE_MEMORY, "1000");
+      csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__ORDERPOLICY, OrderPolicy.KEY.toString());
+      csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__PERSISTENT, "true");
+      csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__PARALLEL, "true");
+      
+      executeAndVerifyCommand(csb.getCommandString());
+
+    } catch (IOException e) {
+      e.printStackTrace();
+    } finally {
+      FileUtils.deleteQuietly(jarFile);
+    }
+  }
+  private void createDiskStore(String diskStoreName, 
+      String diskDirs, 
+      String autoCompact, 
+      String allowForceCompaction, 
+      String compactionThreshold, 
+      String duCritical, 
+      String duWarning,
+      String maxOplogSize,
+      String queueSize,
+      String timeInterval,
+      String writeBufferSize) {
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.CREATE_DISK_STORE);
+    csb.addOption(CliStrings.CREATE_DISK_STORE__NAME, diskStoreName);
+    csb.addOption(CliStrings.CREATE_DISK_STORE__DIRECTORY_AND_SIZE, diskDirs);
+    csb.addOptionWithValueCheck(CliStrings.CREATE_DISK_STORE__AUTO_COMPACT, autoCompact);
+    csb.addOptionWithValueCheck(CliStrings.CREATE_DISK_STORE__ALLOW_FORCE_COMPACTION, allowForceCompaction);
+    csb.addOptionWithValueCheck(CliStrings.CREATE_DISK_STORE__COMPACTION_THRESHOLD, compactionThreshold);
+    csb.addOptionWithValueCheck(CliStrings.CREATE_DISK_STORE__DISK_USAGE_CRITICAL_PCT, duCritical);
+    csb.addOptionWithValueCheck(CliStrings.CREATE_DISK_STORE__DISK_USAGE_WARNING_PCT, duWarning);
+    csb.addOptionWithValueCheck(CliStrings.CREATE_DISK_STORE__MAX_OPLOG_SIZE, maxOplogSize);
+    csb.addOptionWithValueCheck(CliStrings.CREATE_DISK_STORE__QUEUE_SIZE, queueSize);
+    csb.addOptionWithValueCheck(CliStrings.CREATE_DISK_STORE__TIME_INTERVAL, timeInterval);
+    csb.addOptionWithValueCheck(CliStrings.CREATE_DISK_STORE__WRITE_BUFFER_SIZE, writeBufferSize);
+    executeAndVerifyCommand(csb.getCommandString());
+  }
+  
+  private void destroyDiskStore(String diskStoreName, String group) {
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.DESTROY_DISK_STORE);
+    csb.addOption(CliStrings.DESTROY_DISK_STORE__NAME, diskStoreName);
+    csb.addOptionWithValueCheck(CliStrings.DESTROY_DISK_STORE__GROUP, group);
+    executeAndVerifyCommand(csb.toString());
+  }
+  public void createIndex(String indexName, String expression, String regionName, String group) {
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.CREATE_INDEX);
+    csb.addOption(CliStrings.CREATE_INDEX__NAME, indexName);
+    csb.addOption(CliStrings.CREATE_INDEX__EXPRESSION, expression);
+    csb.addOption(CliStrings.CREATE_INDEX__REGION, regionName);
+    executeAndVerifyCommand(csb.getCommandString());
+  }
+
+  public void destoyIndex(String indexName, String regionName, String group) {
+    if (StringUtils.isBlank(indexName) && StringUtils.isBlank(regionName) && StringUtils.isBlank(group)) {
+      return;
+    }
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.DESTROY_INDEX);
+    if (!StringUtils.isBlank(indexName)) {
+      csb.addOption(CliStrings.DESTROY_INDEX__NAME, indexName);
+    }
+
+    if (!StringUtils.isBlank(regionName)) {
+      csb.addOption(CliStrings.DESTROY_INDEX__REGION, regionName);
+    }
+
+    if (!StringUtils.isBlank(group)) {
+      csb.addOption(CliStrings.DESTROY_INDEX__GROUP, group);
+    }
+    executeAndVerifyCommand(csb.getCommandString());
+  }
+
+  public void createAndDeployJar(String jarName) {
+    File newDeployableJarFile = new File(jarName);
+    try {
+      this.classBuilder.writeJarFromName("ShareConfigClass", newDeployableJarFile);
+      CommandStringBuilder csb = new CommandStringBuilder(CliStrings.DEPLOY);
+      csb.addOption(CliStrings.DEPLOY__JAR, jarName);
+      executeAndVerifyCommand(csb.getCommandString());
+      jarFileNames.add(jarName);
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+  }
+
+  public void deleteSavedJarFiles() {
+    try {
+      FileUtil.deleteMatching(new File("."), "^" + JarDeployer.JAR_PREFIX + "Deploy1.*#\\d++$");
+      FileUtil.delete(new File("Deploy1.jar"));
+    } catch (IOException ioe) {
+      ioe.printStackTrace();
+    }
+  }
+
+  public Object[] setup() {
+    disconnectAllFromDS();
+    final int [] ports = AvailablePortHelper.getRandomAvailableTCPPorts(3);
+    final int locator1Port = ports[0];
+    final String locator1Name = "locator1-" + locator1Port;
+    VM locatorAndMgr = Host.getHost(0).getVM(3);
+
+    Object[] result = (Object[]) locatorAndMgr.invoke(new SerializableCallable() {
+      @Override
+      public Object call() {
+        int httpPort;
+        int jmxPort;
+        String jmxHost;
+
+        try {
+          jmxHost = InetAddress.getLocalHost().getHostName();
+        }
+        catch (UnknownHostException ignore) {
+          jmxHost = "localhost";
+        }
+
+        final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
+
+        jmxPort = ports[0];
+        httpPort = ports[1];
+
+        final File locatorLogFile = new File("locator-" + locator1Port + ".log");
+
+        final Properties locatorProps = new Properties();
+        locatorProps.setProperty(DistributionConfig.NAME_NAME, locator1Name);
+        locatorProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        locatorProps.setProperty(DistributionConfig.LOG_LEVEL_NAME, "config");
+        locatorProps.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "true");
+        locatorProps.setProperty(DistributionConfig.JMX_MANAGER_NAME, "true");
+        locatorProps.setProperty(DistributionConfig.JMX_MANAGER_START_NAME, "true");
+        locatorProps.setProperty(DistributionConfig.JMX_MANAGER_BIND_ADDRESS_NAME, String.valueOf(jmxHost));
+        locatorProps.setProperty(DistributionConfig.JMX_MANAGER_PORT_NAME, String.valueOf(jmxPort));
+        locatorProps.setProperty(DistributionConfig.HTTP_SERVICE_PORT_NAME, String.valueOf(httpPort));
+
+        try {
+          final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locator1Port, locatorLogFile, null,
+              locatorProps);
+          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+            @Override
+            public boolean done() {
+              return locator.isSharedConfigurationRunning();
+            }
+
+            @Override
+            public String description() {
+              return "Waiting for shared configuration to be started";
+            }
+          };
+          DistributedTestCase.waitForCriterion(wc, TIMEOUT, INTERVAL, true);
+        } catch (IOException ioex) {
+          fail("Unable to create a locator with a shared configuration");
+        }
+
+        final Object[] result = new Object[4];
+        result[0] = locator1Port;
+        result[1] = jmxHost;
+        result[2] = jmxPort;
+        result[3] = httpPort;
+        return result;
+      }
+    });
+
+    HeadlessGfsh gfsh = getDefaultShell();
+    String jmxHost = (String)result[1];
+    int jmxPort = (Integer)result[2];
+    int httpPort = (Integer)result[3];
+
+    shellConnect(jmxHost, jmxPort, httpPort, gfsh);
+    // Create a cache in VM 1
+    VM dataMember = Host.getHost(0).getVM(1);
+    dataMember.invoke(new SerializableCallable() {
+      @Override
+      public Object call() {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        localProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locator1Port);
+        localProps.setProperty(DistributionConfig.NAME_NAME, "DataMember");
+        getSystem(localProps);
+        Cache cache = getCache();
+        assertNotNull(cache);
+        return CliUtil.getAllNormalMembers(cache);
+      }
+    });
+    return result;
+  }
+
+  private void shutdownAll() throws IOException {
+    VM locatorAndMgr = Host.getHost(0).getVM(3);
+    locatorAndMgr.invoke(new SerializableCallable() {
+      /**
+       * 
+       */
+      private static final long serialVersionUID = 1L;
+
+      @Override
+      public Object call() throws Exception {
+        GemFireCacheImpl cache = (GemFireCacheImpl)CacheFactory.getAnyInstance();
+        ShutdownAllRequest.send(cache.getDistributedSystem().getDistributionManager(), -1);
+        return null;
+      }
+    });
+
+    locatorAndMgr.invoke(SharedConfigurationDUnitTest.locatorCleanup);
+    //Clean up the directories
+    if (!serverNames.isEmpty()) {
+      for (String serverName : serverNames) {
+        final File serverDir = new File(serverName);
+        FileUtils.cleanDirectory(serverDir);
+        FileUtils.deleteDirectory(serverDir);
+      }
+    }
+    serverNames.clear();
+    serverNames = null;
+  }
+
+  private void verifyRegionCreateOnAllMembers(String regionName) {
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.DESCRIBE_REGION);
+    csb.addOption(CliStrings.DESCRIBE_REGION__NAME, regionName);
+    CommandResult cmdResult = executeCommand(csb.getCommandString());
+    String resultAsString = commandResultToString(cmdResult);
+
+    for (String serverName : serverNames) {
+      assertTrue(resultAsString.contains(serverName));
+    }
+  }     
+
+  private void verifyIndexCreationOnAllMembers(String indexName) {
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.LIST_INDEX);
+    CommandResult cmdResult = executeCommand(csb.getCommandString());
+    String resultAsString = commandResultToString(cmdResult);
+
+    for (String serverName : serverNames) {
+      assertTrue(resultAsString.contains(serverName));
+    }
+  }
+  
+  private void verifyAsyncEventQueueCreation() {
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.LIST_ASYNC_EVENT_QUEUES);
+    CommandResult cmdResult = executeCommand(csb.toString());
+    String resultAsString = commandResultToString(cmdResult);
+    
+    for (String serverName : serverNames) {
+      assertTrue(resultAsString.contains(serverName));
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/HeadlessGfsh.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/HeadlessGfsh.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/HeadlessGfsh.java
new file mode 100644
index 0000000..9ca9809
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/HeadlessGfsh.java
@@ -0,0 +1,376 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli;
+
+import com.gemstone.gemfire.management.internal.cli.shell.Gfsh;
+import com.gemstone.gemfire.management.internal.cli.shell.GfshConfig;
+import com.gemstone.gemfire.management.internal.cli.shell.jline.GfshUnsupportedTerminal;
+import edu.umd.cs.findbugs.annotations.SuppressWarnings;
+import jline.ConsoleReader;
+import org.springframework.shell.core.ExitShellRequest;
+import org.springframework.shell.event.ShellStatus.Status;
+
+import java.io.BufferedWriter;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileDescriptor;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.PrintStream;
+import java.io.PrintWriter;
+import java.io.Writer;
+import java.util.Properties;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.logging.Level;
+
+
+/**
+ * This is headless shell which can be used to submit random commands and get command-result It is used for commands
+ * testing but can be used as for anything like programmatically sending commands to operate on GemFire Distributed
+ * systems. TODO : Merge HeadlessGfsh and HeadlessGfshShell TODO : Provide constructor for optionally specifying
+ * GfshConfig to provide logDirectory and logLevel
+ *
+ * @author tushark
+ */
+@SuppressWarnings("rawtypes")
+public class HeadlessGfsh implements ResultHandler {
+
+  public static final String ERROR_RESULT = "_$_ERROR_RESULT";
+
+  private HeadlessGfshShell shell = null;
+  private LinkedBlockingQueue queue = new LinkedBlockingQueue<>();
+  private long timeout = 20;
+  public String outputString = null;
+
+  public HeadlessGfsh(String name, int timeout) throws ClassNotFoundException, IOException {
+    this(name, timeout, null);
+  }
+
+  public HeadlessGfsh(String name, int timeout, Properties envProps) throws ClassNotFoundException, IOException {
+    this.timeout = timeout;
+    System.setProperty("jline.terminal", GfshUnsupportedTerminal.class.getName());
+    this.shell = new HeadlessGfshShell(name, this);
+    this.shell.setEnvProperty(Gfsh.ENV_APP_RESULT_VIEWER, "non-basic");
+
+    if (envProps != null) {
+      for (String key : envProps.stringPropertyNames()) {
+        this.shell.setEnvProperty(key, envProps.getProperty(key));
+      }
+    }
+
+    // This allows us to avoid race conditions during startup - in particular a NPE on the ConsoleReader which is
+    // created in a separate thread during start()
+    CountDownLatch shellStarted = new CountDownLatch(1);
+    this.shell.addShellStatusListener((oldStatus, newStatus) -> {
+      if (newStatus.getStatus() == Status.STARTED) {
+        shellStarted.countDown();
+      }
+    });
+
+    this.shell.start();
+    this.setThreadLocalInstance();
+
+    try {
+      shellStarted.await();
+    } catch (InterruptedException e) {
+      e.printStackTrace(System.out);
+    }
+  }
+
+  public void setThreadLocalInstance() {
+    shell.setThreadLocalInstance();
+  }
+
+  //TODO : Have non-blocking method also where we move executeCommand call to separate thread-pool
+  public boolean executeCommand(String command) {
+    boolean status = false;
+    try {
+      outputString = null;
+      status = shell.executeCommand(command);
+    } catch (Exception e) {
+      outputString = e.getMessage();
+    }
+    return status;
+  }
+
+  int getCommandExecutionStatus() {
+    return shell.getCommandExecutionStatus();
+  }
+
+  @SuppressWarnings("unchecked")
+  @Override
+  public void handleExecutionResult(Object result, String sysout) {
+    queue.add(result);
+    outputString = sysout;
+  }
+
+  public Object getResult() throws InterruptedException {
+    //Dont wait for when some command calls gfsh.stop();
+    if (shell.stopCalledThroughAPI) return null;
+    try {
+      Object result = queue.poll(timeout, TimeUnit.SECONDS);
+      queue.clear();
+      return result;
+    } catch (InterruptedException e) {
+      e.printStackTrace();
+      throw e;
+    }
+  }
+
+  public void clear() {
+    queue.clear();
+    outputString = null;
+  }
+
+  public void clearEvents() {
+    queue.clear();
+    outputString = null;
+  }
+
+  public void terminate() {
+    shell.terminate();
+  }
+
+  public boolean isConnectedAndReady() {
+    return shell.isConnectedAndReady();
+  }
+
+  public String getErrorString() {
+    return shell.errorString;
+  }
+
+  public boolean hasError() {
+    return shell.hasError();
+  }
+
+  public String getError() {
+    return shell.errorString;
+  }
+
+  public static class HeadlessGfshShell extends Gfsh {
+
+    private ResultHandler handler = null;
+    private final Lock lock = new ReentrantLock();
+    private final Condition endOfShell = lock.newCondition();
+    private ByteArrayOutputStream output = null;
+    private String errorString = null;
+    private boolean hasError = false;
+    boolean stopCalledThroughAPI = false;
+
+    protected HeadlessGfshShell(String testName, ResultHandler handler) throws ClassNotFoundException, IOException {
+      super(false, new String[]{}, new HeadlessGfshConfig(testName));
+      this.handler = handler;
+    }
+
+    public void setThreadLocalInstance() {
+      gfshThreadLocal.set(this);
+    }
+
+    protected void handleExecutionResult(Object result) {
+      if (!result.equals(ERROR_RESULT)) {
+        super.handleExecutionResult(result);
+        handler.handleExecutionResult(result, output.toString());
+        output.reset();
+      } else {
+        //signal waiting queue with error condition with empty output
+        output.reset();
+        handler.handleExecutionResult(result, output.toString());
+      }
+    }
+
+    int getCommandExecutionStatus() {
+      return getLastExecutionStatus();
+    }
+
+    public void terminate() {
+      closeShell();
+      stopPromptLoop();
+      stop();
+    }
+
+    public void stop() {
+      stopCalledThroughAPI = true;
+    }
+
+    private void stopPromptLoop() {
+      lock.lock();
+      try {
+        endOfShell.signalAll();
+      } finally {
+        lock.unlock();
+      }
+    }
+
+    public String getErrorString() {
+      return errorString;
+    }
+
+    public boolean hasError() {
+      return hasError;
+    }
+
+    /**
+     * We override this method just to fool runner thread in reading from nothing. It waits for Condition endOfShell
+     * which is signalled when terminate is called. This achieves clean shutdown of runner thread.
+     */
+    @Override
+    public void promptLoop() {
+      lock.lock();
+      try {
+        while (true) {
+          try {
+            endOfShell.await();
+          } catch (InterruptedException e) {
+            //e.printStackTrace();
+          }
+          this.exitShellRequest = ExitShellRequest.NORMAL_EXIT;
+          setShellStatus(Status.SHUTTING_DOWN);
+          break;
+        }
+      } finally {
+        lock.unlock();
+      }
+    }
+
+    private static void setGfshOutErr(PrintStream outToUse) {
+      Gfsh.gfshout = outToUse;
+      Gfsh.gfsherr = outToUse;
+    }
+
+    /**
+     * This prints out error messages when Exceptions occur in shell. Capture it and set error flag=true and send
+     * ERROR_RESULT on the queue to signal thread waiting for CommandResult
+     */
+    @Override
+    public void logWarning(String message, Throwable t) {
+      super.logWarning(message, t);
+      errorString = message;
+      hasError = true;
+      //signal waiting queue with error condition
+      handleExecutionResult(ERROR_RESULT);
+    }
+
+    /**
+     * This prints out error messages when Exceptions occur in shell. Capture it and set error flag=true and send
+     * ERROR_RESULT on the queue to signal thread waiting for CommandResult
+     */
+    @Override
+    public void logSevere(String message, Throwable t) {
+      super.logSevere(message, t);
+      errorString = message;
+      hasError = true;
+      //signal waiting queue with error condition
+      handleExecutionResult(ERROR_RESULT);
+    }
+
+    /**
+     * Setup console-reader to capture Shell output
+     */
+    @Override
+    protected ConsoleReader createConsoleReader() {
+      try {
+        output = new ByteArrayOutputStream(1024 * 10);
+        PrintStream sysout = new PrintStream(output);
+        Writer wrappedOut = new BufferedWriter(new OutputStreamWriter(sysout));
+        setGfshOutErr(sysout);
+        return new ConsoleReader(new FileInputStream(FileDescriptor.in), new PrintWriter(wrappedOut));
+      } catch (IOException e) {
+        throw new RuntimeException(e);
+      }
+    }
+  }
+
+
+  /**
+   * HeadlessGfshConfig for tests. Taken from TestableGfsh
+   */
+  static class HeadlessGfshConfig extends GfshConfig {
+    {
+      // set vm as a gfsh vm
+      CliUtil.isGfshVM = true;
+    }
+
+    private File parentDir;
+    private String fileNamePrefix;
+    private String name;
+    private String generatedHistoryFileName = null;
+
+    public HeadlessGfshConfig(String name) {
+      this.name = name;
+
+      if (isDUnitTest(this.name)) {
+        fileNamePrefix = this.name;
+      } else {
+        fileNamePrefix = "non-hydra-client";
+      }
+
+      parentDir = new File("gfsh_files");
+      parentDir.mkdirs();
+    }
+
+    private static boolean isDUnitTest(String name) {
+      boolean isDUnitTest = false;
+      if (name != null) {
+        String[] split = name.split("_");
+        if (split.length != 0 && split[0].endsWith("DUnitTest")) {
+          isDUnitTest = true;
+        }
+      }
+      return isDUnitTest;
+    }
+
+    @Override
+    public String getLogFilePath() {
+      return new File(parentDir, getFileNamePrefix() + "-gfsh.log").getAbsolutePath();
+    }
+
+    private String getFileNamePrefix() {
+      String timeStamp = new java.sql.Time(System.currentTimeMillis()).toString();
+      timeStamp = timeStamp.replace(':', '_');
+      return fileNamePrefix + "-" + timeStamp;
+    }
+
+    @Override
+    public String getHistoryFileName() {
+      if (generatedHistoryFileName == null) {
+        String fileName = new File(parentDir, (getFileNamePrefix() + "-gfsh.history")).getAbsolutePath();
+        generatedHistoryFileName = fileName;
+        return fileName;
+      } else {
+        return generatedHistoryFileName;
+      }
+    }
+
+    @Override
+    public boolean isTestConfig() {
+      return true;
+    }
+
+    @Override
+    public Level getLogLevel() {
+      // Keep log level fine for tests
+      return Level.FINE;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/HeadlessGfshJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/HeadlessGfshJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/HeadlessGfshJUnitTest.java
new file mode 100644
index 0000000..0807898
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/HeadlessGfshJUnitTest.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli;
+
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.distributed.DistributedSystem;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.internal.AvailablePort;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.management.internal.MBeanJMXAdapter;
+import com.gemstone.gemfire.test.junit.categories.UnitTest;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import javax.management.ObjectName;
+import java.io.IOException;
+import java.util.Properties;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * TODO : Add more tests for error-catch, different type of results etc
+ *
+ * @author tushark
+ */
+@Category(UnitTest.class)
+public class HeadlessGfshJUnitTest {
+
+  @SuppressWarnings({"unused", "deprecation", "unused"})
+  @Test
+  public void testHeadlessGfshTest() throws ClassNotFoundException, IOException, InterruptedException {
+    GemFireCacheImpl cache = null;
+    DistributedSystem ds = null;
+    Properties pr = new Properties();
+    pr.put("name", "testHeadlessGfshTest");
+    pr.put(DistributionConfig.JMX_MANAGER_NAME, "true");
+    pr.put(DistributionConfig.JMX_MANAGER_START_NAME, "true");
+    int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
+    pr.put(DistributionConfig.JMX_MANAGER_PORT_NAME, String.valueOf(port));
+    pr.put(DistributionConfig.HTTP_SERVICE_PORT_NAME, "0");
+    pr.put(DistributionConfig.MCAST_PORT_NAME, "0");
+
+    ds = DistributedSystem.connect(pr);
+    cache = (GemFireCacheImpl) CacheFactory.create(ds);
+    ObjectName name = MBeanJMXAdapter.getDistributedSystemName();
+
+    HeadlessGfsh gfsh = new HeadlessGfsh("Test", 25);
+    for (int i = 0; i < 5; i++) {
+      gfsh.executeCommand("connect --jmx-manager=localhost[" + port + "]");
+      Object result = gfsh.getResult();
+      assertTrue(gfsh.isConnectedAndReady());
+      assertNotNull(result);
+      gfsh.clear();
+      gfsh.executeCommand("list members");
+      result = gfsh.getResult();
+      assertNotNull(result);
+      gfsh.executeCommand("disconnect");
+      gfsh.getResult();
+    }
+
+    long l1 = System.currentTimeMillis();
+    gfsh.executeCommand("exit");
+    long l2 = System.currentTimeMillis();
+    gfsh.getResult();
+    long l3 = System.currentTimeMillis();
+    System.out.println("L3-l2=" + (l3 - l2) + " Total time= " + (l3 - l1) / 1000);
+    gfsh.terminate();
+    cache.close();
+    ds.disconnect();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/ResultHandler.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/ResultHandler.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/ResultHandler.java
new file mode 100644
index 0000000..2b90b60
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/ResultHandler.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli;
+
+public interface ResultHandler {
+  
+  void handleExecutionResult(Object result, String sysout);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/TableBuilderJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/TableBuilderJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/TableBuilderJUnitTest.java
new file mode 100644
index 0000000..e5f1d86
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/TableBuilderJUnitTest.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli;
+
+import com.gemstone.gemfire.management.internal.cli.result.TableBuilder;
+import com.gemstone.gemfire.management.internal.cli.result.TableBuilder.Row;
+import com.gemstone.gemfire.management.internal.cli.result.TableBuilder.RowGroup;
+import com.gemstone.gemfire.management.internal.cli.result.TableBuilder.Table;
+import com.gemstone.gemfire.management.internal.cli.result.TableBuilderHelper;
+import com.gemstone.gemfire.management.internal.cli.shell.Gfsh;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+import java.io.IOException;
+import java.util.Properties;
+import java.util.Random;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ * TODO: fails when running integrationTest from gradle command-line or in Eclipse on Windows 7
+ * <p>
+ * com.gemstone.gemfire.management.internal.cli.TableBuilderJUnitTest > testBasicScrapping FAILED
+ * java.lang.AssertionError: Expected length < 100 is 101 at org.junit.Assert.fail(Assert.java:88) at
+ * com.gemstone.gemfire.management.internal.cli.TableBuilderJUnitTest.doTableBuilderTestUnit(TableBuilderJUnitTest.java:115)
+ * at com.gemstone.gemfire.management.internal.cli.TableBuilderJUnitTest.testBasicScrapping(TableBuilderJUnitTest.java:134)
+ * <p>
+ * com.gemstone.gemfire.management.internal.cli.TableBuilderJUnitTest > testManyColumns FAILED java.lang.AssertionError:
+ * Expected length < 100 is 101 at org.junit.Assert.fail(Assert.java:88) at com.gemstone.gemfire.management.internal.cli.TableBuilderJUnitTest.doTableBuilderTestUnit(TableBuilderJUnitTest.java:115)
+ * at com.gemstone.gemfire.management.internal.cli.TableBuilderJUnitTest.testManyColumns(TableBuilderJUnitTest.java:155)
+ *
+ * @author tushark
+ */
+@Category(IntegrationTest.class)
+public class TableBuilderJUnitTest {
+
+  @Rule
+  public TestName testName = new TestName();
+
+  private final Table createTable(int rows, int cols, int width, String separator) {
+    Table resultTable = TableBuilder.newTable();
+    resultTable.setTabularResult(true);
+    resultTable.setColumnSeparator(separator);
+
+    resultTable.newBlankRow();
+    resultTable.newRow().newLeftCol("Displaying all fields for member: ");
+    resultTable.newBlankRow();
+    RowGroup rowGroup = resultTable.newRowGroup();
+    Row row = rowGroup.newRow();
+    for (int colIndex = 0; colIndex < cols; colIndex++) {
+      row.newCenterCol("Field" + colIndex);
+    }
+
+    rowGroup.newRowSeparator('-', false);
+
+    int counter = rows;
+    for (int i = 0; i < counter; i++) {
+      row = rowGroup.newRow();
+      for (int k = 0; k < cols; k++) {
+        row.newLeftCol(getString(i, width / cols));
+      }
+    }
+    resultTable.newBlankRow();
+
+    return resultTable;
+  }
+
+  private Object getString(int i, int width) {
+    StringBuilder sb = new StringBuilder();
+    Random random = new Random();
+    int k = 0;
+    double d = random.nextDouble();
+    // .09 probability
+    if (d <= 0.9) {
+      k = random.nextInt(width);
+    } else {
+      k = width / 2 + random.nextInt(width);
+    }
+    random.nextInt(10);
+    for (int j = 0; j < k; j++) {
+      sb.append(i);
+      if (sb.length() > k) break;
+    }
+    return sb.toString();
+  }
+
+  private HeadlessGfsh createShell(Properties props) throws ClassNotFoundException, IOException {
+    String shellId = getClass().getSimpleName() + "_" + testName;
+    HeadlessGfsh shell = new HeadlessGfsh(shellId, 30, props);
+    return shell;
+  }
+
+  private void doTableBuilderTestUnit(int rows, int cols, String sep, boolean shouldTrim,
+      boolean expectTooManyColEx) throws ClassNotFoundException, IOException {
+    int width = Gfsh.getCurrentInstance().getTerminalWidth();
+    Table table = createTable(rows, cols, width, sep);
+    String st = table.buildTable();
+    System.out.println(st);
+
+    String[] array = st.split("\n");
+
+    int line = 0;
+    for (String s : array) {
+      System.out.println("For line " + line++ + " length is " + s.length() + " isWider = " + (s.length() > width));
+
+      if (shouldTrim) {
+        if (s.length() > width) {
+          fail("Expected length < " + width + " is " + s.length());
+        }
+      } else {
+        if (s.length() > 50 && s.length() <= width) {
+          fail("Expected length <= " + width + " is " + s.length());
+        }
+      }
+
+    }
+  }
+
+  /**
+   * Test Variations tablewide separator true false
+   */
+  @Test
+  public void testBasicScraping() throws ClassNotFoundException, IOException {
+    Properties props = new Properties();
+    props.setProperty(Gfsh.ENV_APP_RESULT_VIEWER, Gfsh.DEFAULT_APP_RESULT_VIEWER);
+    createShell(props);
+    assertTrue(TableBuilderHelper.shouldTrimColumns());
+    doTableBuilderTestUnit(15, 4, "|", true, false);
+  }
+
+
+  @Test
+  public void testSeparatorWithMultipleChars() throws ClassNotFoundException, IOException {
+    Properties props = new Properties();
+    props.setProperty(Gfsh.ENV_APP_RESULT_VIEWER, Gfsh.DEFAULT_APP_RESULT_VIEWER);
+    createShell(props);
+    assertTrue(TableBuilderHelper.shouldTrimColumns());
+    doTableBuilderTestUnit(15, 4, " | ", true, false);
+  }
+
+  /**
+   * multiple columns upto 8 : done
+   */
+  @Test
+  @Ignore("Bug 52051")
+  public void testManyColumns() throws ClassNotFoundException, IOException {
+    createShell(null);
+    assertTrue(TableBuilderHelper.shouldTrimColumns());
+    doTableBuilderTestUnit(15, 6, "|", true, true);
+  }
+
+  /**
+   * set gfsh env property result_viewer to basic disable for external reader
+   */
+  //
+  @Test
+  public void testDisableColumnAdjustment() throws ClassNotFoundException, IOException {
+    createShell(null);
+    assertFalse(TableBuilderHelper.shouldTrimColumns());
+    doTableBuilderTestUnit(15, 12, "|", false, false);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/CliCommandTestBase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/CliCommandTestBase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/CliCommandTestBase.java
new file mode 100644
index 0000000..a0fb8f8
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/CliCommandTestBase.java
@@ -0,0 +1,560 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache30.CacheTestCase;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.management.ManagementService;
+import com.gemstone.gemfire.management.internal.cli.CommandManager;
+import com.gemstone.gemfire.management.internal.cli.HeadlessGfsh;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.parser.CommandTarget;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import com.gemstone.gemfire.management.internal.cli.shell.Gfsh;
+import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+import dunit.Host;
+import dunit.SerializableCallable;
+import dunit.SerializableRunnable;
+import util.TestException;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * Base class for all the CLI/gfsh command dunit tests.
+ *
+ * @author Tushar Khairnar
+ * @author Abhishek Chaudhari
+ * @author David Hoots
+ * @author John Blum
+ */
+public class CliCommandTestBase extends CacheTestCase {
+
+  private static final long serialVersionUID = 1L;
+
+  protected static final String USE_HTTP_SYSTEM_PROPERTY = "useHTTP";
+
+  private ManagementService managementService;
+
+  private transient HeadlessGfsh shell;
+
+  private boolean useHttpOnConnect = Boolean.getBoolean("useHTTP");
+
+  private int httpPort;
+  private int jmxPort;
+
+  private String jmxHost;
+
+  public CliCommandTestBase(String name) {
+    super(name);
+  }
+
+  public void setUp() throws Exception {
+    super.setUp();
+  }
+
+  @Override
+  public void tearDown2() throws Exception {
+    destroyDefaultSetup();
+    super.tearDown2();
+  }
+
+  /**
+   * Create all of the components necessary for the default setup. The provided properties will be used when creating
+   * the default cache. This will create GFSH in the controller VM (VM[4]) (no cache) and the manager in VM[0] (with
+   * cache). When adding regions, functions, keys, whatever to your cache for tests, you'll need to use
+   * Host.getHost(0).getVM(0).invoke(new SerializableRunnable() { public void run() { ... } } in order to have this
+   * setup run in the same VM as the manager.
+   * <p>
+   *
+   * @param props the Properties used when creating the cache for this default setup.
+   * @return the default testable GemFire shell.
+   */
+  @SuppressWarnings("serial")
+  protected final HeadlessGfsh createDefaultSetup(final Properties props) {
+    Object[] result = (Object[]) Host.getHost(0).getVM(0).invoke(new SerializableCallable() {
+      public Object call() {
+        final Object[] result = new Object[3];
+        final Properties localProps = (props != null ? props : new Properties());
+
+        try {
+          jmxHost = InetAddress.getLocalHost().getHostName();
+        } catch (UnknownHostException ignore) {
+          jmxHost = "localhost";
+        }
+
+        if (!localProps.containsKey(DistributionConfig.NAME_NAME)) {
+          localProps.setProperty(DistributionConfig.NAME_NAME, "Manager");
+        }
+
+        final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
+
+        jmxPort = ports[0];
+        httpPort = ports[1];
+
+        localProps.setProperty(DistributionConfig.JMX_MANAGER_NAME, "true");
+        localProps.setProperty(DistributionConfig.JMX_MANAGER_START_NAME, "true");
+        localProps.setProperty(DistributionConfig.JMX_MANAGER_BIND_ADDRESS_NAME, String.valueOf(jmxHost));
+        localProps.setProperty(DistributionConfig.JMX_MANAGER_PORT_NAME, String.valueOf(jmxPort));
+        localProps.setProperty(DistributionConfig.HTTP_SERVICE_PORT_NAME, String.valueOf(httpPort));
+
+        getSystem(localProps);
+        verifyManagementServiceStarted(getCache());
+
+        result[0] = jmxHost;
+        result[1] = jmxPort;
+        result[2] = httpPort;
+
+        return result;
+      }
+    });
+
+    this.jmxHost = (String) result[0];
+    this.jmxPort = (Integer) result[1];
+    this.httpPort = (Integer) result[2];
+
+    return defaultShellConnect();
+  }
+
+  protected boolean useHTTPByTest() {
+    return false;
+  }
+
+  /**
+   * Destroy all of the components created for the default setup.
+   */
+  @SuppressWarnings("serial")
+  protected final void destroyDefaultSetup() {
+    if (this.shell != null) {
+      executeCommand(shell, "exit");
+      this.shell.terminate();
+      this.shell = null;
+    }
+
+    disconnectAllFromDS();
+
+    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
+      public void run() {
+        verifyManagementServiceStopped();
+      }
+    });
+  }
+
+  /**
+   * Start the default management service using the provided Cache.
+   *
+   * @param cache Cache to use when creating the management service
+   */
+  private void verifyManagementServiceStarted(Cache cache) {
+    assert (cache != null);
+
+    this.managementService = ManagementService.getExistingManagementService(cache);
+    assertNotNull(this.managementService);
+    assertTrue(this.managementService.isManager());
+    assertTrue(checkIfCommandsAreLoadedOrNot());
+  }
+
+  public static boolean checkIfCommandsAreLoadedOrNot() {
+    CommandManager manager;
+    try {
+      manager = CommandManager.getInstance();
+      Map<String, CommandTarget> commands = manager.getCommands();
+      Set set = commands.keySet();
+      if (commands.size() < 1) {
+        return false;
+      }
+      return true;
+    } catch (ClassNotFoundException | IOException e) {
+      throw new RuntimeException("Could not load commands", e);
+    }
+  }
+
+  /**
+   * Stop the default management service.
+   */
+  private void verifyManagementServiceStopped() {
+    if (this.managementService != null) {
+      assertFalse(this.managementService.isManager());
+      this.managementService = null;
+    }
+  }
+
+  /**
+   * Connect the default shell to the default JMX server.
+   *
+   * @return The default shell.
+   */
+  private HeadlessGfsh defaultShellConnect() {
+    HeadlessGfsh shell = getDefaultShell();
+    shellConnect(this.jmxHost, this.jmxPort, this.httpPort, shell);
+    return shell;
+  }
+
+  /**
+   * Connect a shell to the JMX server at the given host and port
+   *
+   * @param host    Host of the JMX server
+   * @param jmxPort Port of the JMX server
+   * @param shell   Shell to connect
+   */
+  protected void shellConnect(final String host, final int jmxPort, final int httpPort, HeadlessGfsh shell) {
+    assert (host != null);
+    assert (shell != null);
+
+    final CommandStringBuilder command = new CommandStringBuilder(CliStrings.CONNECT);
+    String endpoint;
+
+    if (useHttpOnConnect) {
+      endpoint = "http://" + host + ":" + httpPort + "/gemfire/v1";
+      command.addOption(CliStrings.CONNECT__USE_HTTP, Boolean.TRUE.toString());
+      command.addOption(CliStrings.CONNECT__URL, endpoint);
+    } else {
+      endpoint = host + "[" + jmxPort + "]";
+      command.addOption(CliStrings.CONNECT__JMX_MANAGER, endpoint);
+    }
+
+    CommandResult result = executeCommand(shell, command.toString());
+
+    if (!shell.isConnectedAndReady()) {
+      throw new TestException(
+          "Connect command failed to connect to manager " + endpoint + " result=" + commandResultToString(result));
+    }
+
+    info("Successfully connected to managing node using " + (useHttpOnConnect ? "HTTP" : "JMX"));
+    assertEquals(true, shell.isConnectedAndReady());
+  }
+
+  /**
+   * Get the default shell (will create one if it doesn't already exist).
+   *
+   * @return The default shell
+   */
+  protected synchronized final HeadlessGfsh getDefaultShell() {
+    if (this.shell == null) {
+      this.shell = createShell();
+    }
+
+    return this.shell;
+  }
+
+  /**
+   * Create a HeadlessGfsh object.
+   *
+   * @return The created shell.
+   */
+  protected HeadlessGfsh createShell() {
+    try {
+      Gfsh.SUPPORT_MUTLIPLESHELL = true;
+      String shellId = getClass().getSimpleName() + "_" + getName();
+      HeadlessGfsh shell = new HeadlessGfsh(shellId, 30);
+      //Added to avoid trimming of the columns
+      info("Started testable shell: " + shell);
+      return shell;
+    } catch (ClassNotFoundException e) {
+      throw new TestException(getStackTrace(e));
+    } catch (IOException e) {
+      throw new TestException(getStackTrace(e));
+    }
+  }
+
+  /**
+   * Execute a command using the default shell and clear the shell events before returning.
+   *
+   * @param command Command to execute
+   * @return The result of the command execution
+   */
+  protected CommandResult executeCommand(String command) {
+    assert (command != null);
+
+    return executeCommand(getDefaultShell(), command);
+  }
+
+  /**
+   * Execute a command in the provided shell and clear the shell events before returning.
+   *
+   * @param shell   Shell in which to execute the command.
+   * @param command Command to execute
+   * @return The result of the command execution
+   */
+  protected CommandResult executeCommand(HeadlessGfsh shell, String command) {
+    assert (shell != null);
+    assert (command != null);
+
+    CommandResult commandResult = executeCommandWithoutClear(shell, command);
+    shell.clearEvents();
+    return commandResult;
+  }
+
+  /**
+   * Execute a command using the default shell. Useful for getting additional information from the shell after the
+   * command has been executed (using getDefaultShell().???). Caller is responsible for calling
+   * getDefaultShell().clearEvents() when done.
+   *
+   * @param command Command to execute
+   * @return The result of the command execution
+   */
+  @SuppressWarnings("unused")
+  protected CommandResult executeCommandWithoutClear(String command) {
+    assert (command != null);
+
+    return executeCommandWithoutClear(getDefaultShell(), command);
+  }
+
+  /**
+   * Execute a command in the provided shell. Useful for getting additional information from the shell after the command
+   * has been executed (using getDefaultShell().???). Caller is responsible for calling getDefaultShell().clearEvents()
+   * when done.
+   *
+   * @param shell   Shell in which to execute the command.
+   * @param command Command to execute
+   * @return The result of the command execution
+   */
+  protected CommandResult executeCommandWithoutClear(HeadlessGfsh shell, String command) {
+    assert (shell != null);
+    assert (command != null);
+
+    try {
+      info("Executing command " + command + " with command Mgr " + CommandManager.getInstance());
+    } catch (ClassNotFoundException cnfex) {
+      throw new TestException(getStackTrace(cnfex));
+    } catch (IOException ioex) {
+      throw new TestException(getStackTrace(ioex));
+    }
+
+    shell.executeCommand(command);
+    if (shell.hasError()) {
+      error("executeCommand completed with error : " + shell.getError());
+    }
+
+    CommandResult result = null;
+    try {
+      result = (CommandResult) shell.getResult();
+    } catch (InterruptedException ex) {
+      error("shell received InterruptedException");
+    }
+
+    if (result != null) {
+      result.resetToFirstLine();
+    }
+
+    return result;
+  }
+
+  /**
+   * Utility method for viewing the results of a command.
+   *
+   * @param commandResult Results to dump
+   * @param printStream   Stream to dump the results to
+   */
+  protected void printResult(final CommandResult commandResult, PrintStream printStream) {
+    assert (commandResult != null);
+    assert (printStream != null);
+
+    commandResult.resetToFirstLine();
+    printStream.print(commandResultToString(commandResult));
+  }
+
+  protected String commandResultToString(final CommandResult commandResult) {
+    assertNotNull(commandResult);
+
+    commandResult.resetToFirstLine();
+
+    StringBuilder buffer = new StringBuilder(commandResult.getHeader());
+
+    while (commandResult.hasNextLine()) {
+      buffer.append(commandResult.nextLine());
+    }
+
+    buffer.append(commandResult.getFooter());
+
+    return buffer.toString();
+  }
+
+  /**
+   * Utility method for finding the CommandResult object in the Map of CommandOutput objects.
+   *
+   * @param commandOutput CommandOutput Map to search
+   * @return The CommandResult object or null if not found.
+   */
+  protected CommandResult extractCommandResult(Map<String, Object> commandOutput) {
+    assert (commandOutput != null);
+
+    for (Object resultObject : commandOutput.values()) {
+      if (resultObject instanceof CommandResult) {
+        CommandResult result = (CommandResult) resultObject;
+        result.resetToFirstLine();
+        return result;
+      }
+    }
+    return null;
+  }
+
+  /**
+   * Utility method to determine how many times a string occurs in another string. Note that when looking for matches
+   * substrings of other matches will be counted as a match. For example, looking for "AA" in the string "AAAA" will
+   * result in a return value of 3.
+   *
+   * @param stringToSearch String to search
+   * @param stringToCount  String to look for and count
+   * @return The number of matches.
+   */
+  protected int countMatchesInString(final String stringToSearch, final String stringToCount) {
+    assert (stringToSearch != null);
+    assert (stringToCount != null);
+
+    int length = stringToSearch.length();
+    int count = 0;
+    for (int i = 0; i < length; i++) {
+      if (stringToSearch.substring(i).startsWith(stringToCount)) {
+        count++;
+      }
+    }
+    return count;
+  }
+
+  /**
+   * Determines if a string contains a trimmed line that matches the pattern. So, any single line whose leading and
+   * trailing spaces have been removed which contains a string that exactly matches the given pattern will be considered
+   * a match.
+   *
+   * @param stringToSearch String to search
+   * @param stringPattern  Pattern to search for
+   * @return True if a match is found, false otherwise
+   */
+  protected boolean stringContainsLine(final String stringToSearch, final String stringPattern) {
+    assert (stringToSearch != null);
+    assert (stringPattern != null);
+
+    Pattern pattern = Pattern.compile("^\\s*" + stringPattern + "\\s*$", Pattern.MULTILINE);
+    Matcher matcher = pattern.matcher(stringToSearch);
+    return matcher.find();
+  }
+
+  /**
+   * Counts the number of distinct lines in a String.
+   *
+   * @param stringToSearch  String to search for lines.
+   * @param countBlankLines Whether to count blank lines (true to count)
+   * @return The number of lines found.
+   */
+  protected int countLinesInString(final String stringToSearch, final boolean countBlankLines) {
+    assert (stringToSearch != null);
+
+    int length = stringToSearch.length();
+    int count = 0;
+    char character = 0;
+    boolean foundNonSpaceChar = false;
+
+    for (int i = 0; i < length; i++) {
+      character = stringToSearch.charAt(i);
+      if (character == '\r' && (i + 1) < length && stringToSearch.charAt(i + 1) == '\n') {
+        i++;
+      }
+      if (character == '\n' || character == '\r') {
+        if (countBlankLines) {
+          count++;
+        } else {
+          if (foundNonSpaceChar) {
+            count++;
+          }
+        }
+        foundNonSpaceChar = false;
+      } else if (character != ' ' && character != '\t') {
+        foundNonSpaceChar = true;
+      }
+    }
+
+    // Even if the last line isn't terminated, it still counts as a line
+    if (character != '\n' && character != '\r') {
+      count++;
+    }
+
+    return count;
+  }
+
+  /**
+   * Get a specific line from the string (using \n or \r as a line separator).
+   *
+   * @param stringToSearch String to get the line from
+   * @param lineNumber     Line number to get
+   * @return The line
+   */
+  protected String getLineFromString(final String stringToSearch, final int lineNumber) {
+    assert (stringToSearch != null);
+    assert (lineNumber > 0);
+
+    int length = stringToSearch.length();
+    int count = 0;
+    int startIndex = 0;
+    char character;
+    int endIndex = length;
+
+    for (int i = 0; i < length; i++) {
+      character = stringToSearch.charAt(i);
+      if (character == '\r' && (i + 1) < length && stringToSearch.charAt(i + 1) == '\n') {
+        i++;
+      }
+      if (character == '\n' || character == '\r') {
+        if (lineNumber == 1) {
+          endIndex = i;
+          break;
+        }
+        if (++count == lineNumber - 1) {
+          startIndex = i + 1;
+        } else if (count >= lineNumber) {
+          endIndex = i;
+          break;
+        }
+      }
+    }
+
+    return stringToSearch.substring(startIndex, endIndex);
+  }
+
+  protected static String getStackTrace(Throwable aThrowable) {
+    StringWriter sw = new StringWriter();
+    aThrowable.printStackTrace(new PrintWriter(sw, true));
+    return sw.toString();
+  }
+
+  protected void info(String string) {
+    getLogWriter().info(string);
+  }
+
+  protected void debug(String string) {
+    getLogWriter().fine(string);
+  }
+
+  protected void error(String string) {
+    getLogWriter().error(string);
+  }
+
+  protected void error(String string, Throwable e) {
+    getLogWriter().error(string, e);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ConfigCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ConfigCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ConfigCommandsDUnitTest.java
new file mode 100644
index 0000000..81536db
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ConfigCommandsDUnitTest.java
@@ -0,0 +1,497 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.server.CacheServer;
+import com.gemstone.gemfire.distributed.Locator;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
+import com.gemstone.gemfire.distributed.internal.InternalLocator;
+import com.gemstone.gemfire.distributed.internal.SharedConfiguration;
+import com.gemstone.gemfire.internal.AvailablePort;
+import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.internal.FileUtil;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.xmlcache.CacheXmlGenerator;
+import com.gemstone.gemfire.internal.logging.LogWriterImpl;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.cli.Result.Status;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.remote.CommandProcessor;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+import dunit.DistributedTestCase;
+import dunit.Host;
+import dunit.SerializableCallable;
+import dunit.SerializableRunnable;
+import dunit.VM;
+import org.apache.commons.io.FileUtils;
+
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.lang.management.ManagementFactory;
+import java.lang.management.RuntimeMXBean;
+import java.util.Collections;
+import java.util.List;
+import java.util.Properties;
+
+/**
+ * Dunit class for testing GemFire config commands : export config
+ *
+ * @author David Hoots
+ * @author Sourabh Bansod
+ * @since 7.0
+ */
+public class ConfigCommandsDUnitTest extends CliCommandTestBase {
+  private static final long serialVersionUID = 1L;
+
+  File managerConfigFile = new File("Manager-cache.xml");
+  File managerPropsFile = new File("Manager-gf.properties");
+  File vm1ConfigFile = new File("VM1-cache.xml");
+  File vm1PropsFile = new File("VM1-gf.properties");
+  File vm2ConfigFile = new File("VM2-cache.xml");
+  File vm2PropsFile = new File("VM2-gf.properties");
+  File shellConfigFile = new File("Shell-cache.xml");
+  File shellPropsFile = new File("Shell-gf.properties");
+  File subDir = new File("ConfigCommandsDUnitTestSubDir");
+  File subManagerConfigFile = new File(subDir, managerConfigFile.getName());
+
+  public ConfigCommandsDUnitTest(String name) {
+    super(name);
+  }
+
+  public void tearDown2() throws Exception {
+    deleteTestFiles();
+    invokeInEveryVM(new SerializableRunnable() {
+
+      @Override
+      public void run() {
+        try {
+          deleteTestFiles();
+        } catch (IOException e) {
+          fail("error", e);
+        }
+      }
+    });
+    super.tearDown2();
+  }
+
+  public void testDescribeConfig() throws ClassNotFoundException, IOException {
+    createDefaultSetup(null);
+    final String controllerName = "Member2";
+
+    /***
+     * Create properties for the controller VM
+     */
+    final Properties localProps = new Properties();
+    localProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    localProps.setProperty(DistributionConfig.LOG_LEVEL_NAME, "info");
+    localProps.setProperty(DistributionConfig.STATISTIC_SAMPLING_ENABLED_NAME, "true");
+    localProps.setProperty(DistributionConfig.ENABLE_TIME_STATISTICS_NAME, "true");
+    localProps.setProperty(DistributionConfig.NAME_NAME, controllerName);
+    localProps.setProperty(DistributionConfig.GROUPS_NAME, "G1");
+    getSystem(localProps);
+    Cache cache = getCache();
+    int ports[] = AvailablePortHelper.getRandomAvailableTCPPorts(1);
+    CacheServer cs = getCache().addCacheServer();
+    cs.setPort(ports[0]);
+    cs.setMaxThreads(10);
+    cs.setMaxConnections(9);
+    cs.start();
+
+    RuntimeMXBean runtimeBean = ManagementFactory.getRuntimeMXBean();
+    List<String> jvmArgs = runtimeBean.getInputArguments();
+
+    getLogWriter().info("#SB Actual JVM Args : ");
+
+    for (String jvmArg : jvmArgs) {
+      getLogWriter().info("#SB JVM " + jvmArg);
+    }
+
+    InternalDistributedSystem system = (InternalDistributedSystem) cache.getDistributedSystem();
+    DistributionConfig config = system.getConfig();
+    config.setArchiveFileSizeLimit(1000);
+
+    String command = CliStrings.DESCRIBE_CONFIG + " --member=" + controllerName;
+    CommandProcessor cmdProcessor = new CommandProcessor();
+    cmdProcessor.createCommandStatement(command, Collections.EMPTY_MAP).process();
+
+    CommandResult cmdResult = executeCommand(command);
+
+    String resultStr = commandResultToString(cmdResult);
+    getLogWriter().info("#SB Hiding the defaults\n" + resultStr);
+
+    assertEquals(true, cmdResult.getStatus().equals(Status.OK));
+    assertEquals(true, resultStr.contains("G1"));
+    assertEquals(true, resultStr.contains(controllerName));
+    assertEquals(true, resultStr.contains("archive-file-size-limit"));
+    assertEquals(true, !resultStr.contains("copy-on-read"));
+
+    cmdResult = executeCommand(command + " --" + CliStrings.DESCRIBE_CONFIG__HIDE__DEFAULTS + "=false");
+    resultStr = commandResultToString(cmdResult);
+    getLogWriter().info("#SB No hiding of defaults\n" + resultStr);
+
+    assertEquals(true, cmdResult.getStatus().equals(Status.OK));
+    assertEquals(true, resultStr.contains("is-server"));
+    assertEquals(true, resultStr.contains(controllerName));
+    assertEquals(true, resultStr.contains("copy-on-read"));
+
+    cs.stop();
+  }
+
+  @SuppressWarnings("serial")
+  public void testExportConfig() throws IOException {
+    Properties localProps = new Properties();
+    localProps.setProperty(DistributionConfig.NAME_NAME, "Manager");
+    localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group1");
+    createDefaultSetup(localProps);
+
+    // Create a cache in another VM (VM1)
+    Host.getHost(0).getVM(1).invoke(new SerializableRunnable() {
+      public void run() {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.NAME_NAME, "VM1");
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group2");
+        getSystem(localProps);
+        getCache();
+      }
+    });
+
+    // Create a cache in a 3rd VM (VM2)
+    Host.getHost(0).getVM(2).invoke(new SerializableRunnable() {
+      public void run() {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.NAME_NAME, "VM2");
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group2");
+        getSystem(localProps);
+        getCache();
+      }
+    });
+
+    // Create a cache in the local VM
+    localProps = new Properties();
+    localProps.setProperty(DistributionConfig.NAME_NAME, "Shell");
+    getSystem(localProps);
+    Cache cache = getCache();
+
+    // Test export config for all members
+    deleteTestFiles();
+    CommandResult cmdResult = executeCommand("export config");
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    assertTrue(this.managerConfigFile.exists());
+    assertTrue(this.managerPropsFile.exists());
+    assertTrue(this.vm1ConfigFile.exists());
+    assertTrue(this.vm1PropsFile.exists());
+    assertTrue(this.vm2ConfigFile.exists());
+    assertTrue(this.vm2PropsFile.exists());
+    assertTrue(this.shellConfigFile.exists());
+    assertTrue(this.shellPropsFile.exists());
+
+    // Test exporting member
+    deleteTestFiles();
+    cmdResult = executeCommand("export config --member=Manager");
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    assertTrue(this.managerConfigFile.exists());
+    assertFalse(this.vm1ConfigFile.exists());
+    assertFalse(this.vm2ConfigFile.exists());
+    assertFalse(this.shellConfigFile.exists());
+
+    // Test exporting group
+    deleteTestFiles();
+    cmdResult = executeCommand("export config --group=Group2");
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    assertFalse(this.managerConfigFile.exists());
+    assertTrue(this.vm1ConfigFile.exists());
+    assertTrue(this.vm2ConfigFile.exists());
+    assertFalse(this.shellConfigFile.exists());
+
+    // Test export to directory
+    deleteTestFiles();
+    cmdResult = executeCommand("export config --dir=" + subDir.getAbsolutePath());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    assertFalse(this.managerConfigFile.exists());
+    assertTrue(this.subManagerConfigFile.exists());
+
+    // Test the contents of the file
+    StringWriter stringWriter = new StringWriter();
+    PrintWriter printWriter = new PrintWriter(stringWriter);
+    CacheXmlGenerator.generate(cache, printWriter, false, false, false);
+    String configToMatch = stringWriter.toString();
+
+    deleteTestFiles();
+    cmdResult = executeCommand("export config --member=Shell");
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    char[] fileContents = new char[configToMatch.length()];
+    try {
+      FileReader reader = new FileReader(shellConfigFile);
+      reader.read(fileContents);
+    } catch (Exception ex) {
+      fail("Unable to read file contents for comparison", ex);
+    }
+
+    assertEquals(configToMatch, new String(fileContents));
+  }
+
+  public void testAlterRuntimeConfig() throws ClassNotFoundException, IOException {
+    final String controller = "controller";
+    createDefaultSetup(null);
+    Properties localProps = new Properties();
+    localProps.setProperty(DistributionConfig.NAME_NAME, controller);
+    localProps.setProperty(DistributionConfig.LOG_LEVEL_NAME, "error");
+    getSystem(localProps);
+    final GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
+    final DistributionConfig config = cache.getSystem().getConfig();
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.ALTER_RUNTIME_CONFIG);
+    csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__MEMBER, controller);
+    csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__LOG__LEVEL, "info");
+    csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__LOG__FILE__SIZE__LIMIT, "50");
+    csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__ARCHIVE__DISK__SPACE__LIMIT, "32");
+    csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__ARCHIVE__FILE__SIZE__LIMIT, "49");
+    csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__SAMPLE__RATE, "2000");
+    csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__ARCHIVE__FILE, "stat.gfs");
+    csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__SAMPLING__ENABLED, "true");
+    csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__LOG__DISK__SPACE__LIMIT, "10");
+    CommandResult cmdResult = executeCommand(csb.getCommandString());
+    String resultString = commandResultToString(cmdResult);
+    getLogWriter().info("Result\n");
+    getLogWriter().info(resultString);
+    assertEquals(true, cmdResult.getStatus().equals(Status.OK));
+    assertEquals(LogWriterImpl.INFO_LEVEL, config.getLogLevel());
+    assertEquals(50, config.getLogFileSizeLimit());
+    assertEquals(32, config.getArchiveDiskSpaceLimit());
+    assertEquals(2000, config.getStatisticSampleRate());
+    assertEquals("stat.gfs", config.getStatisticArchiveFile().getName());
+    assertEquals(true, config.getStatisticSamplingEnabled());
+    assertEquals(10, config.getLogDiskSpaceLimit());
+
+
+    CommandProcessor commandProcessor = new CommandProcessor();
+    Result result = commandProcessor.createCommandStatement("alter runtime", Collections.EMPTY_MAP).process();
+  }
+
+  public void testAlterRuntimeConfigRandom() {
+    final String member1 = "VM1";
+    final String controller = "controller";
+    createDefaultSetup(null);
+    Properties localProps = new Properties();
+    localProps.setProperty(DistributionConfig.NAME_NAME, controller);
+    localProps.setProperty(DistributionConfig.LOG_LEVEL_NAME, "error");
+    getSystem(localProps);
+    final GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
+    final DistributionConfig config = cache.getSystem().getConfig();
+
+    Host.getHost(0).getVM(1).invoke(new SerializableRunnable() {
+      public void run() {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.NAME_NAME, member1);
+        getSystem(localProps);
+        Cache cache = getCache();
+      }
+    });
+
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.ALTER_RUNTIME_CONFIG);
+    CommandResult cmdResult = executeCommand(csb.getCommandString());
+    String resultAsString = commandResultToString(cmdResult);
+    getLogWriter().info("#SB Result\n");
+    getLogWriter().info(resultAsString);
+    assertEquals(true, cmdResult.getStatus().equals(Status.ERROR));
+    assertTrue(resultAsString.contains(CliStrings.ALTER_RUNTIME_CONFIG__RELEVANT__OPTION__MESSAGE));
+
+    csb = new CommandStringBuilder(CliStrings.ALTER_RUNTIME_CONFIG);
+    csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__LOG__DISK__SPACE__LIMIT, "2000000000");
+    cmdResult = executeCommand(csb.getCommandString());
+    resultAsString = commandResultToString(cmdResult);
+    getLogWriter().info("#SB Result\n");
+    getLogWriter().info(resultAsString);
+    assertEquals(true, cmdResult.getStatus().equals(Status.ERROR));
+
+  }
+
+  public void testAlterRuntimeConfigOnAllMembers() {
+    final String member1 = "VM1";
+    final String controller = "controller";
+    createDefaultSetup(null);
+    Properties localProps = new Properties();
+    localProps.setProperty(DistributionConfig.NAME_NAME, controller);
+    localProps.setProperty(DistributionConfig.LOG_LEVEL_NAME, "error");
+    getSystem(localProps);
+    final GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
+    final DistributionConfig config = cache.getSystem().getConfig();
+
+    Host.getHost(0).getVM(1).invoke(new SerializableRunnable() {
+      public void run() {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.NAME_NAME, member1);
+        getSystem(localProps);
+        Cache cache = getCache();
+      }
+    });
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.ALTER_RUNTIME_CONFIG);
+    csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__LOG__LEVEL, "info");
+    csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__LOG__FILE__SIZE__LIMIT, "50");
+    csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__ARCHIVE__DISK__SPACE__LIMIT, "32");
+    csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__ARCHIVE__FILE__SIZE__LIMIT, "49");
+    csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__SAMPLE__RATE, "2000");
+    csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__ARCHIVE__FILE, "stat.gfs");
+    csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__SAMPLING__ENABLED, "true");
+    csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__LOG__DISK__SPACE__LIMIT, "10");
+    CommandResult cmdResult = executeCommand(csb.getCommandString());
+    String resultString = commandResultToString(cmdResult);
+    getLogWriter().info("#SB Result\n");
+    getLogWriter().info(resultString);
+    assertEquals(true, cmdResult.getStatus().equals(Status.OK));
+    assertEquals(LogWriterImpl.INFO_LEVEL, config.getLogLevel());
+    assertEquals(50, config.getLogFileSizeLimit());
+    assertEquals(49, config.getArchiveFileSizeLimit());
+    assertEquals(32, config.getArchiveDiskSpaceLimit());
+    assertEquals(2000, config.getStatisticSampleRate());
+    assertEquals("stat.gfs", config.getStatisticArchiveFile().getName());
+    assertEquals(true, config.getStatisticSamplingEnabled());
+    assertEquals(10, config.getLogDiskSpaceLimit());
+
+    // Validate the changes in the vm1
+    Host.getHost(0).getVM(1).invoke(new SerializableRunnable() {
+      public void run() {
+        GemFireCacheImpl cacheVM1 = (GemFireCacheImpl) getCache();
+        final DistributionConfig configVM1 = cacheVM1.getSystem().getConfig();
+        assertEquals(LogWriterImpl.INFO_LEVEL, configVM1.getLogLevel());
+        assertEquals(50, configVM1.getLogFileSizeLimit());
+        assertEquals(49, configVM1.getArchiveFileSizeLimit());
+        assertEquals(32, configVM1.getArchiveDiskSpaceLimit());
+        assertEquals(2000, configVM1.getStatisticSampleRate());
+        assertEquals("stat.gfs", configVM1.getStatisticArchiveFile().getName());
+        assertEquals(true, configVM1.getStatisticSamplingEnabled());
+        assertEquals(10, configVM1.getLogDiskSpaceLimit());
+      }
+    });
+  }
+
+  /**
+   * Asserts that altering the runtime config correctly updates the shared configuration.
+   * <p>
+   * Disabled: this test frequently fails during unit test runs. See ticket #52204
+   */
+  public void disabledtestAlterUpdatesSharedConfig() {
+    disconnectAllFromDS();
+
+    final String groupName = "testAlterRuntimeConfigSharedConfigGroup";
+
+    // Start the Locator and wait for shared configuration to be available
+    final int locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
+    Host.getHost(0).getVM(3).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+
+        final File locatorLogFile = new File("locator-" + locatorPort + ".log");
+        final Properties locatorProps = new Properties();
+        locatorProps.setProperty(DistributionConfig.NAME_NAME, "Locator");
+        locatorProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        locatorProps.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "true");
+        try {
+          final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locatorPort, locatorLogFile, null,
+              locatorProps);
+
+          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+            @Override
+            public boolean done() {
+              return locator.isSharedConfigurationRunning();
+            }
+
+            @Override
+            public String description() {
+              return "Waiting for shared configuration to be started";
+            }
+          };
+          DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+        } catch (IOException ioex) {
+          fail("Unable to create a locator with a shared configuration");
+        }
+      }
+    });
+
+    // Start the default manager
+    Properties managerProps = new Properties();
+    managerProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    managerProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+    createDefaultSetup(managerProps);
+
+    // Create a cache in VM 1
+    VM vm = Host.getHost(0).getVM(1);
+    vm.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        //Make sure no previous shared config is screwing up this test.
+        FileUtil.delete(new File("ConfigDiskDir_Locator"));
+        FileUtil.delete(new File("cluster_config"));
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        localProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+        localProps.setProperty(DistributionConfig.LOG_LEVEL_NAME, "error");
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, groupName);
+        getSystem(localProps);
+
+        assertNotNull(getCache());
+        assertEquals("error", system.getConfig().getAttribute(DistributionConfig.LOG_LEVEL_NAME));
+        return null;
+      }
+    });
+
+    // Test altering the runtime config
+    CommandStringBuilder commandStringBuilder = new CommandStringBuilder(CliStrings.ALTER_RUNTIME_CONFIG);
+    commandStringBuilder.addOption(CliStrings.ALTER_RUNTIME_CONFIG__GROUP, groupName);
+    commandStringBuilder.addOption(CliStrings.ALTER_RUNTIME_CONFIG__LOG__LEVEL, "fine");
+    CommandResult cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    // Make sure the shared config was updated
+    Host.getHost(0).getVM(3).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        SharedConfiguration sharedConfig = ((InternalLocator) Locator.getLocator()).getSharedConfiguration();
+        Properties gemfireProperties;
+        try {
+          gemfireProperties = sharedConfig.getConfiguration(groupName).getGemfireProperties();
+          assertEquals("fine", gemfireProperties.get(DistributionConfig.LOG_LEVEL_NAME));
+        } catch (Exception e) {
+          fail("Error occurred in cluster configuration service", e);
+        }
+      }
+    });
+  }
+
+  private final void deleteTestFiles() throws IOException {
+    this.managerConfigFile.delete();
+    this.managerPropsFile.delete();
+    this.vm1ConfigFile.delete();
+    this.vm1PropsFile.delete();
+    this.vm2ConfigFile.delete();
+    this.vm2PropsFile.delete();
+    this.shellConfigFile.delete();
+    this.shellPropsFile.delete();
+
+    FileUtils.deleteDirectory(this.subDir);
+  }
+}


[45/50] [abbrv] incubator-geode git commit: Closes #51 *move SameHashDifferentTrace to test code*

Posted by kl...@apache.org.
Closes #51 *move SameHashDifferentTrace to test code*


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/24c170af
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/24c170af
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/24c170af

Branch: refs/heads/feature/GEODE-291
Commit: 24c170afb4dba6f745dd24ab30de99b24474a3b5
Parents: 11c62f2
Author: Darrel Schneider <ds...@pivotal.io>
Authored: Thu Dec 10 14:24:27 2015 -0800
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Thu Dec 10 14:24:27 2015 -0800

----------------------------------------------------------------------

----------------------------------------------------------------------



[47/50] [abbrv] incubator-geode git commit: added test for region multicast enabled

Posted by kl...@apache.org.
added test for region multicast enabled

updated multicast test


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/81c472f7
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/81c472f7
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/81c472f7

Branch: refs/heads/feature/GEODE-291
Commit: 81c472f714967da158a1039e5efda4fc478b56b4
Parents: 1c42379
Author: Hitesh Khamesra <hi...@yahoo.com>
Authored: Wed Dec 9 11:15:34 2015 -0800
Committer: Hitesh Khamesra <hi...@yahoo.com>
Committed: Thu Dec 10 15:31:12 2015 -0800

----------------------------------------------------------------------
 .../gemfire/distributed/internal/DMStats.java   |   1 +
 .../distributed/internal/DistributionStats.java |   3 +
 .../internal/LonerDistributionManager.java      |   2 +
 .../DistributedMulticastRegionDUnitTest.java    | 197 +++++++++++++++++++
 4 files changed, 203 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/81c472f7/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DMStats.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DMStats.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DMStats.java
index e7b155a..e79a40b 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DMStats.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DMStats.java
@@ -205,6 +205,7 @@ public interface DMStats {
    * returns the current value of the mcastWrites statistic
    */
   public int getMcastWrites();
+  public int getMcastReads();
 
   public long startSerialization();
   public void endSerialization(long start, int bytes);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/81c472f7/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionStats.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionStats.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionStats.java
index 39b4986..804b507 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionStats.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionStats.java
@@ -1094,6 +1094,9 @@ public class DistributionStats implements DMStats {
   public int getMcastWrites() {
     return stats.getInt(mcastWritesId);
   }
+  public int getMcastReads() {
+    return stats.getInt(mcastReadsId);
+  }
   public void incMcastReadBytes(int amount) {
     stats.incInt(mcastReadsId, 1);
     stats.incLong(mcastReadBytesId, amount);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/81c472f7/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/LonerDistributionManager.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/LonerDistributionManager.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/LonerDistributionManager.java
index 1fc9f5b..60158d1 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/LonerDistributionManager.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/LonerDistributionManager.java
@@ -451,6 +451,8 @@ public class LonerDistributionManager implements DM {
     @Override
     public int getMcastWrites() { return 0; }
     @Override
+    public int getMcastReads() { return 0; }
+    @Override
     public void incUcastReadBytes(int amount) {}
     @Override
     public void incMcastReadBytes(int amount) {}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/81c472f7/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedMulticastRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedMulticastRegionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedMulticastRegionDUnitTest.java
new file mode 100755
index 0000000..f8296cc
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedMulticastRegionDUnitTest.java
@@ -0,0 +1,197 @@
+package com.gemstone.gemfire.cache30;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+import com.gemstone.gemfire.cache.AttributesFactory;
+import com.gemstone.gemfire.cache.CacheException;
+import com.gemstone.gemfire.cache.DataPolicy;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionAttributes;
+import com.gemstone.gemfire.cache.Scope;
+import com.gemstone.gemfire.distributed.Locator;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.distributed.internal.InternalLocator;
+import com.gemstone.gemfire.distributed.internal.SharedConfiguration;
+import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.OffHeapTestUtil;
+
+import dunit.DistributedTestCase;
+import dunit.Host;
+import dunit.SerializableCallable;
+import dunit.SerializableRunnable;
+import dunit.VM;
+
+public class DistributedMulticastRegionDUnitTest extends CacheTestCase {
+
+  static int locatorVM = 3;
+  static String mcastport = "42786";
+  static String mcastttl = "0";
+  
+  private int locatorPort;
+
+  public DistributedMulticastRegionDUnitTest(String name) {
+    super(name);
+  }
+  
+  @Override
+  public void setUp() throws Exception {
+    clean();
+    super.setUp();    
+  }
+  
+  @Override
+  public void tearDown2() throws Exception {
+    clean();
+    super.tearDown2();
+  }
+  
+  private void clean(){
+    SerializableRunnable cleanVM =
+        new CacheSerializableRunnable("clean VM") {
+            public void run2() throws CacheException {
+              disconnectFromDS();
+            }
+        };
+    invokeInEveryVM(cleanVM);    
+  }
+  
+  public void testMulticastEnabled() {
+    final String name = "mcastRegion";
+    SerializableRunnable create =
+      new CacheSerializableRunnable("Create Region") {
+          public void run2() throws CacheException {
+            createRegion(name, getRegionAttributes());
+          }
+        };
+
+    locatorPort = startLocator();
+    Host host = Host.getHost(0);
+    final VM vm0 = host.getVM(0);
+    final VM vm1 = host.getVM(1);
+    //1. start locator with mcast port
+    vm0.invoke(create);
+    vm1.invoke(create);
+    
+    SerializableRunnable validateMulticastBeforeRegionOps =
+        new CacheSerializableRunnable("validateMulticast before region ops") {
+            public void run2() throws CacheException {
+              validateMulticastOpsBeforeRegionOps();
+            }
+        };
+      
+    vm0.invoke(validateMulticastBeforeRegionOps);
+    vm1.invoke(validateMulticastBeforeRegionOps);
+    
+    SerializableRunnable doPuts =
+      new CacheSerializableRunnable("do put") {
+          public void run2() throws CacheException {
+            final Region region =
+                getRootRegion().getSubregion(name);
+            for(int i =0 ; i < 5; i++) {
+              region.put(i, i);
+            }
+          }
+      };
+      
+    vm0.invoke(doPuts);
+    
+    SerializableRunnable validateMulticastAfterRegionOps =
+      new CacheSerializableRunnable("validateMulticast after region ops") {
+          public void run2() throws CacheException {
+            validateMulticastOpsAfterRegionOps();
+          }
+      };
+    
+      vm0.invoke(validateMulticastAfterRegionOps);
+      vm1.invoke(validateMulticastAfterRegionOps);
+   
+      closeLocator();      
+  }
+  
+  protected RegionAttributes getRegionAttributes() {
+    AttributesFactory factory = new AttributesFactory();
+    factory.setScope(Scope.DISTRIBUTED_ACK);
+    factory.setDataPolicy(DataPolicy.PRELOADED);
+    factory.setEarlyAck(false);
+    factory.setConcurrencyChecksEnabled(false);
+    factory.setMulticastEnabled(true);
+    return factory.create();
+  }
+  
+  public Properties getDistributedSystemProperties() {
+    Properties p = new Properties();
+    p.put(DistributionConfig.STATISTIC_SAMPLING_ENABLED_NAME, "true");
+    p.put(DistributionConfig.STATISTIC_ARCHIVE_FILE_NAME, "multicast");
+    p.put(DistributionConfig.MCAST_PORT_NAME, mcastport);
+    p.put(DistributionConfig.MCAST_TTL_NAME, mcastttl);
+    p.put(DistributionConfig.LOCATORS_NAME, "localhost[" + locatorPort +"]");
+    p.put(DistributionConfig.LOG_LEVEL_NAME, "info");
+    return p;
+  } 
+  
+  private void validateMulticastOpsAfterRegionOps() {
+    int writes = getGemfireCache().getDistributionManager().getStats().getMcastWrites();
+    int reads = getGemfireCache().getDistributionManager().getStats().getMcastReads();
+    assertTrue("Should have multicast writes or reads. Writes=  " + writes +  " ,read= " + reads, 
+        writes > 0 || reads > 0);
+  }
+  
+  private void validateMulticastOpsBeforeRegionOps() {
+    int writes = getGemfireCache().getDistributionManager().getStats().getMcastWrites();
+    int reads = getGemfireCache().getDistributionManager().getStats().getMcastReads();
+    int total = writes + reads;
+    assertTrue("Should not have any multicast writes or reads before region ops. Writes=  " + writes +  " ,read= " + reads, 
+        total == 0);
+  }
+  
+  private int startLocator() {
+  final int [] ports = AvailablePortHelper.getRandomAvailableTCPPorts(3);
+  final int locatorPort = ports[0];
+  
+  VM locator1Vm = Host.getHost(0).getVM(locatorVM);;
+    locator1Vm.invoke(new SerializableCallable() {
+      @Override
+      public Object call() {
+        final File locatorLogFile = new File(testName + "-locator-" + locatorPort + ".log");
+        final Properties locatorProps = new Properties();
+        locatorProps.setProperty(DistributionConfig.NAME_NAME, "LocatorWithMcast");
+        locatorProps.setProperty(DistributionConfig.MCAST_PORT_NAME, mcastport);
+        locatorProps.setProperty(DistributionConfig.MCAST_TTL_NAME, mcastttl);
+        locatorProps.setProperty(DistributionConfig.LOG_LEVEL_NAME, "info");
+        //locatorProps.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "true");
+        try {
+          final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locatorPort, null, null,
+              locatorProps);
+          System.out.println("test Locator started " + locatorPort);
+           } catch (IOException ioex) {
+          fail("Unable to create a locator with a shared configuration");
+        }
+  
+        return null;
+      }
+    });
+    return locatorPort;
+  }
+  
+  private void closeLocator() {
+    VM locator1Vm = Host.getHost(0).getVM(locatorVM);;
+    SerializableRunnable locatorCleanup = new SerializableRunnable() {
+      @Override
+      public void run() {
+        System.out.println("test Locator closing " + locatorPort);;
+        InternalLocator locator = InternalLocator.getLocator();
+        if (locator != null ) {
+          locator.stop();
+          System.out.println("test Locator closed " + locatorPort);;
+        }
+      }
+    };
+    locator1Vm.invoke(locatorCleanup);
+  }
+  
+}


[02/50] [abbrv] incubator-geode git commit: Merge commit 'c41f98c' into feature/GEODE-53

Posted by kl...@apache.org.
Merge commit 'c41f98c' into feature/GEODE-53


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/31e85e1b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/31e85e1b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/31e85e1b

Branch: refs/heads/feature/GEODE-291
Commit: 31e85e1b71c10d742bd00ba0ada7c6d503ac3330
Parents: b66c479 c41f98c
Author: William Markito <wm...@pivotal.io>
Authored: Wed Nov 25 11:57:47 2015 -0800
Committer: William Markito <wm...@pivotal.io>
Committed: Wed Nov 25 11:57:47 2015 -0800

----------------------------------------------------------------------
 gemfire-site/content/community/index.html       | 190 ++++++++++++++-----
 gemfire-site/content/index.html                 |  27 +--
 gemfire-site/content/releases/index.html        |  12 +-
 .../website/content/community/index.html        | 178 +++++++++++++----
 gemfire-site/website/content/index.html         |  49 +++--
 .../website/content/static/github-btn.html      |   2 -
 gemfire-site/website/layouts/footer.html        |   2 +-
 gemfire-site/website/layouts/header.html        |  10 +-
 8 files changed, 338 insertions(+), 132 deletions(-)
----------------------------------------------------------------------



[13/50] [abbrv] incubator-geode git commit: GEODE-563: Moving gfsh tests from closed

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/GemfireDataCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/GemfireDataCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/GemfireDataCommandsDUnitTest.java
new file mode 100644
index 0000000..1a64faf
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/GemfireDataCommandsDUnitTest.java
@@ -0,0 +1,2087 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.DataPolicy;
+import com.gemstone.gemfire.cache.PartitionAttributes;
+import com.gemstone.gemfire.cache.PartitionAttributesFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache.query.QueryInvalidException;
+import com.gemstone.gemfire.cache.query.data.Portfolio;
+import com.gemstone.gemfire.cache.query.internal.CompiledValue;
+import com.gemstone.gemfire.cache.query.internal.QCompiler;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.lang.StringUtils;
+import com.gemstone.gemfire.management.DistributedRegionMXBean;
+import com.gemstone.gemfire.management.ManagementService;
+import com.gemstone.gemfire.management.ManagerMXBean;
+import com.gemstone.gemfire.management.MemberMXBean;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.internal.cli.CliUtil;
+import com.gemstone.gemfire.management.internal.cli.HeadlessGfsh;
+import com.gemstone.gemfire.management.internal.cli.domain.DataCommandRequest;
+import com.gemstone.gemfire.management.internal.cli.dto.Car;
+import com.gemstone.gemfire.management.internal.cli.dto.Key1;
+import com.gemstone.gemfire.management.internal.cli.dto.ObjectWithCharAttr;
+import com.gemstone.gemfire.management.internal.cli.dto.Value1;
+import com.gemstone.gemfire.management.internal.cli.dto.Value2;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.json.GfJsonArray;
+import com.gemstone.gemfire.management.internal.cli.json.GfJsonException;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import com.gemstone.gemfire.management.internal.cli.result.CompositeResultData;
+import com.gemstone.gemfire.management.internal.cli.result.CompositeResultData.SectionResultData;
+import com.gemstone.gemfire.management.internal.cli.result.ResultData;
+import com.gemstone.gemfire.management.internal.cli.result.TabularResultData;
+import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+import dunit.DistributedTestCase;
+import dunit.Host;
+import dunit.SerializableCallable;
+import dunit.SerializableRunnable;
+import dunit.VM;
+import hydra.GsRandom;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Random;
+import java.util.Set;
+
+/**
+ * Dunit class for testing gemfire data commands : get, put, remove, select, rebalance
+ *
+ * @author ajayp
+ * @author tushark
+ */
+
+@SuppressWarnings("serial")
+public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
+
+  private static final long serialVersionUID = 1L;
+  private static final String REGION_NAME = "FunctionCommandsReplicatedRegion";
+  private static final String REBALANCE_REGION_NAME = "GemfireDataCommandsDUnitTestRegion";
+  private static final String REBALANCE_REGION2_NAME = "GemfireDataCommandsDUnitTestRegion2";
+  private static final String DATA_REGION_NAME = "GemfireDataCommandsTestRegion";
+  private static final String DATA_REGION_NAME_VM1 = "GemfireDataCommandsTestRegion_Vm1";
+  private static final String DATA_REGION_NAME_VM2 = "GemfireDataCommandsTestRegion_Vm2";
+  private static final String DATA_REGION_NAME_PATH = "/GemfireDataCommandsTestRegion";
+  private static final String DATA_REGION_NAME_VM1_PATH = "/GemfireDataCommandsTestRegion_Vm1";
+  private static final String DATA_REGION_NAME_VM2_PATH = "/GemfireDataCommandsTestRegion_Vm2";
+
+  private static final String DATA_PAR_REGION_NAME = "GemfireDataCommandsTestParRegion";
+  private static final String DATA_PAR_REGION_NAME_VM1 = "GemfireDataCommandsTestParRegion_Vm1";
+  private static final String DATA_PAR_REGION_NAME_VM2 = "GemfireDataCommandsTestParRegion_Vm2";
+  private static final String DATA_PAR_REGION_NAME_PATH = "/GemfireDataCommandsTestParRegion";
+  private static final String DATA_PAR_REGION_NAME_VM1_PATH = "/GemfireDataCommandsTestParRegion_Vm1";
+  private static final String DATA_PAR_REGION_NAME_VM2_PATH = "/GemfireDataCommandsTestParRegion_Vm2";
+
+  private static final String DATA_REGION_NAME_CHILD_1 = "ChildRegionRegion1";
+  private static final String DATA_REGION_NAME_CHILD_1_PATH = "/GemfireDataCommandsTestRegion/ChildRegionRegion1";
+  private static final String DATA_REGION_NAME_CHILD_1_2 = "ChildRegionRegion12";
+  private static final String DATA_REGION_NAME_CHILD_1_2_PATH = "/GemfireDataCommandsTestRegion/ChildRegionRegion1/ChildRegionRegion12";
+
+
+  private static final String keyTemplate = "('id':'?','name':'name?')";
+  private static final String valueTemplate = "('stateName':'State?','population':?1,'capitalCity':'capital?','areaInSqKm':?2)";
+  private static final String carTemplate = "\"('attributes':?map,'make':'?make','model':'?model','colors':?list,'attributeSet':?set)\"";
+
+  final static int COUNT = 5;
+
+  public GemfireDataCommandsDUnitTest(String name) {
+    super(name);
+  }
+
+  public static String getMemberId() {
+    Cache cache = new GemfireDataCommandsDUnitTest("test").getCache();
+    return cache.getDistributedSystem().getDistributedMember().getId();
+  }
+
+
+  void setupForGetPutRemoveLocateEntry(String testName) {
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final VM vm2 = Host.getHost(0).getVM(2);
+    Properties props = new Properties();
+    props.setProperty(DistributionConfig.NAME_NAME, testName + "Manager");
+    HeadlessGfsh gfsh = createDefaultSetup(props);
+    assertNotNull(gfsh);
+    assertEquals(true, gfsh.isConnectedAndReady());
+
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        Cache cache = getCache();
+        RegionFactory regionFactory = cache.createRegionFactory(RegionShortcut.REPLICATE);
+        Region dataRegion = regionFactory.create(DATA_REGION_NAME);
+        assertNotNull(dataRegion);
+        getLogWriter().info("Created Region " + dataRegion);
+
+        dataRegion = dataRegion.createSubregion(DATA_REGION_NAME_CHILD_1, dataRegion.getAttributes());
+        assertNotNull(dataRegion);
+        getLogWriter().info("Created Region " + dataRegion);
+
+        dataRegion = dataRegion.createSubregion(DATA_REGION_NAME_CHILD_1_2, dataRegion.getAttributes());
+        assertNotNull(dataRegion);
+        getLogWriter().info("Created Region " + dataRegion);
+
+        dataRegion = regionFactory.create(DATA_REGION_NAME_VM1);
+        assertNotNull(dataRegion);
+        getLogWriter().info("Created Region " + dataRegion);
+
+        PartitionAttributes partitionAttrs = new PartitionAttributesFactory().setRedundantCopies(2).create();
+        RegionFactory<Object, Object> partitionRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+        partitionRegionFactory.setPartitionAttributes(partitionAttrs);
+        Region dataParRegion = partitionRegionFactory.create(DATA_PAR_REGION_NAME);
+        assertNotNull(dataParRegion);
+        getLogWriter().info("Created Region " + dataParRegion);
+        dataParRegion = partitionRegionFactory.create(DATA_PAR_REGION_NAME_VM1);
+        assertNotNull(dataParRegion);
+        getLogWriter().info("Created Region " + dataParRegion);
+
+      }
+    });
+
+    vm2.invoke(new SerializableRunnable() {
+      public void run() {
+        Cache cache = getCache();
+        RegionFactory regionFactory = cache.createRegionFactory(RegionShortcut.REPLICATE);
+        Region dataRegion = regionFactory.create(DATA_REGION_NAME);
+        assertNotNull(dataRegion);
+        getLogWriter().info("Created Region " + dataRegion);
+
+        dataRegion = dataRegion.createSubregion(DATA_REGION_NAME_CHILD_1, dataRegion.getAttributes());
+        assertNotNull(dataRegion);
+        getLogWriter().info("Created Region " + dataRegion);
+
+        dataRegion = dataRegion.createSubregion(DATA_REGION_NAME_CHILD_1_2, dataRegion.getAttributes());
+        assertNotNull(dataRegion);
+        getLogWriter().info("Created Region " + dataRegion);
+
+        dataRegion = regionFactory.create(DATA_REGION_NAME_VM2);
+        assertNotNull(dataRegion);
+        getLogWriter().info("Created Region " + dataRegion);
+
+
+        PartitionAttributes partitionAttrs = new PartitionAttributesFactory().setRedundantCopies(2).create();
+        RegionFactory<Object, Object> partitionRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+        partitionRegionFactory.setPartitionAttributes(partitionAttrs);
+        Region dataParRegion = partitionRegionFactory.create(DATA_PAR_REGION_NAME);
+        assertNotNull(dataParRegion);
+        getLogWriter().info("Created Region " + dataParRegion);
+        dataParRegion = partitionRegionFactory.create(DATA_PAR_REGION_NAME_VM2);
+        assertNotNull(dataParRegion);
+        getLogWriter().info("Created Region " + dataParRegion);
+
+      }
+    });
+
+    final String vm1MemberId = (String) vm1.invoke(GemfireDataCommandsDUnitTest.class, "getMemberId");
+    final String vm2MemberId = (String) vm2.invoke(GemfireDataCommandsDUnitTest.class, "getMemberId");
+    getLogWriter().info("Vm1 ID : " + vm1MemberId);
+    getLogWriter().info("Vm2 ID : " + vm2MemberId);
+
+    final VM manager = Host.getHost(0).getVM(0);
+
+    SerializableRunnable checkRegionMBeans = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        final ManagementService service = ManagementService.getManagementService(cache);
+
+        final WaitCriterion waitForMaangerMBean = new WaitCriterion() {
+          @Override
+          public boolean done() {
+            ManagerMXBean bean1 = service.getManagerMXBean();
+            DistributedRegionMXBean bean2 = service.getDistributedRegionMXBean(DATA_REGION_NAME_PATH);
+            if (bean1 == null) {
+              getLogWriter().info("Still probing for ManagerMBean");
+              return false;
+            } else {
+              getLogWriter().info("Still probing for DistributedRegionMXBean=" + bean2);
+              return (bean2 != null);
+            }
+          }
+
+          @Override
+          public String description() {
+            return "Probing for ManagerMBean";
+          }
+        };
+
+        DistributedTestCase.waitForCriterion(waitForMaangerMBean, 30000, 2000, true);
+
+        assertNotNull(service.getMemberMXBean());
+        assertNotNull(service.getManagerMXBean());
+        DistributedRegionMXBean bean = service.getDistributedRegionMXBean(DATA_REGION_NAME_PATH);
+        assertNotNull(bean);
+
+        WaitCriterion waitForRegionMBeans = new WaitCriterion() {
+          @Override
+          public boolean done() {
+
+            DistributedRegionMXBean beans[] = new DistributedRegionMXBean[6];
+            beans[0] = service.getDistributedRegionMXBean(DATA_REGION_NAME_PATH);
+            beans[1] = service.getDistributedRegionMXBean(DATA_REGION_NAME_VM1_PATH);
+            beans[2] = service.getDistributedRegionMXBean(DATA_REGION_NAME_VM2_PATH);
+            beans[3] = service.getDistributedRegionMXBean(DATA_PAR_REGION_NAME_PATH);
+            beans[4] = service.getDistributedRegionMXBean(DATA_PAR_REGION_NAME_VM1_PATH);
+            beans[5] = service.getDistributedRegionMXBean(DATA_PAR_REGION_NAME_VM2_PATH);
+            //SubRegion Bug : Proxy creation has some issues.
+            //beans[6] = service.getDistributedRegionMXBean(DATA_REGION_NAME_CHILD_1_PATH);
+            //beans[7] = service.getDistributedRegionMXBean(DATA_REGION_NAME_CHILD_1_2_PATH);
+            boolean flag = true;
+            for (DistributedRegionMXBean b : beans) {
+              if (b == null) {
+                flag = false;
+                break;
+              }
+            }
+
+            if (!flag) {
+              getLogWriter().info(
+                  "Still probing for regionMbeans " + DATA_REGION_NAME_PATH + "=" + beans[0] + " " + DATA_REGION_NAME_VM1_PATH + "=" + beans[1] + " " + DATA_REGION_NAME_VM2_PATH + "=" + beans[2] + " " + DATA_PAR_REGION_NAME_PATH + "=" + beans[3] + " " + DATA_PAR_REGION_NAME_VM1_PATH + "=" + beans[4] + " " + DATA_PAR_REGION_NAME_VM2_PATH + "=" + beans[5] + " "
+                  //+ DATA_REGION_NAME_CHILD_1_PATH
+                  // +"="+ beans[6]  + " " + DATA_REGION_NAME_CHILD_1_2_PATH
+                  // +"=" + beans[7]
+              );
+              return false;
+            } else {
+              getLogWriter().info(
+                  "Probing complete for regionMbeans " + DATA_REGION_NAME_PATH + "=" + beans[0] + " " + DATA_REGION_NAME_VM1_PATH + "=" + beans[1] + " " + DATA_REGION_NAME_VM2_PATH + "=" + beans[2] + " " + DATA_PAR_REGION_NAME_PATH + "=" + beans[3] + " " + DATA_PAR_REGION_NAME_VM1_PATH + "=" + beans[4] + " " + DATA_PAR_REGION_NAME_VM2_PATH + "=" + beans[5] + " "
+                  //+ DATA_REGION_NAME_CHILD_1_PATH
+                  // +"="+ beans[6]  + " " + DATA_REGION_NAME_CHILD_1_2_PATH
+                  // +"=" + beans[7]
+              );
+              //Fails here Rishi Need Fix here
+              //if(bean1.getMemberCount()==2 && bean1.getMemberCount()==1 && bean1.getMemberCount()==1)              
+              return true;
+              //else{
+              //   getLogWriter().info("Still probing for regionMbeans for aggregation  bean1=" + bean1.getMemberCount() + " bean2="+ bean2.getMemberCount() + " bean3" + bean3.getMemberCount());
+              //   return false;
+              // }
+            }
+          }
+
+          @Override
+          public String description() {
+            return "Probing for regionMbeans";
+          }
+        };
+
+        DistributedTestCase.waitForCriterion(waitForRegionMBeans, 30000, 2000, true);
+
+        String regions[] = {DATA_REGION_NAME_PATH, DATA_REGION_NAME_VM1_PATH, DATA_REGION_NAME_VM2_PATH, DATA_PAR_REGION_NAME_PATH, DATA_PAR_REGION_NAME_VM1_PATH, DATA_PAR_REGION_NAME_VM2_PATH, /*DATA_REGION_NAME_CHILD_1_PATH, DATA_REGION_NAME_CHILD_1_2_PATH*/};
+
+        for (String region : regions) {
+          bean = service.getDistributedRegionMXBean(region);
+          assertNotNull(bean);
+          String[] membersName = bean.getMembers();
+          getLogWriter().info(
+              "Members Array for region " + region + " : " + StringUtils.objectToString(membersName, true, 10));
+          if (bean.getMemberCount() < 1) fail(
+              "Even after waiting mbean reports number of member hosting region " + DATA_REGION_NAME_VM1_PATH + " is less than one");
+          //assertEquals(1, membersName.length); //exists in one members vm1
+        }
+      }
+    };
+    manager.invoke(checkRegionMBeans);
+  }
+
+  void setupForSelect() {
+    setupForGetPutRemoveLocateEntry("setupForSelect");
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final VM vm2 = Host.getHost(0).getVM(2);
+
+    //To avoid pagination issues and Gfsh waiting for user input
+    executeCommand("set variable --name=APP_FETCH_SIZE --value=" + COUNT);
+
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        Cache cache = CacheFactory.getAnyInstance();
+        String regions[] = {DATA_PAR_REGION_NAME_PATH, DATA_PAR_REGION_NAME_VM1_PATH, DATA_REGION_NAME_CHILD_1_PATH, DATA_REGION_NAME_CHILD_1_2_PATH};
+        for (String r : regions) {
+          Region dataRegion = cache.getRegion(r);
+          for (int j = 0; j < 10; j++) {
+            dataRegion.put(new Integer(j), new Portfolio(j));
+          }
+        }
+        Region dataRegion = cache.getRegion(DATA_REGION_NAME_PATH);
+        for (int j = 0; j < 10; j++) {
+          dataRegion.put(new Integer(j), new Value1(j));
+        }
+
+        dataRegion = cache.getRegion(DATA_REGION_NAME_VM1_PATH);
+        for (int j = 0; j < 10; j++) {
+          dataRegion.put(new Integer(j), new Value1WithValue2(j));
+        }
+      }
+    });
+
+    vm2.invoke(new SerializableRunnable() {
+      public void run() {
+        Cache cache = CacheFactory.getAnyInstance();
+        String regions[] = {DATA_REGION_NAME_VM2_PATH, DATA_PAR_REGION_NAME_VM2_PATH};
+        for (String r : regions) {
+          Region dataRegion = cache.getRegion(r);
+          for (int j = 0; j < 10; j++) {
+            dataRegion.put(new Integer(j), new Portfolio(j));
+          }
+        }
+      }
+    });
+  }
+
+  private void doQueryRegionsAssociatedMembers(String queryTemplate, int expectedMembers, boolean returnAll,
+      String... regions) {
+    Cache cache = CacheFactory.getAnyInstance();
+
+    String query = queryTemplate;
+    int i = 1;
+    for (String r : regions) {
+      query = query.replace("?" + i, r);
+      i++;
+    }
+    getLogWriter().info("Checking members for query : " + query);
+    QCompiler compiler = new QCompiler();
+    Set<String> regionsInQuery = null;
+    try {
+      CompiledValue compiledQuery = compiler.compileQuery(query);
+      Set regionSet = new HashSet();
+      compiledQuery.getRegionsInQuery(regionSet, null);//GFSH ENV VARIBLES
+      regionsInQuery = Collections.unmodifiableSet(regionSet);
+      getLogWriter().info("Region in query : " + regionsInQuery);
+      if (regionsInQuery.size() > 0) {
+        Set<DistributedMember> members = DataCommands.getQueryRegionsAssociatedMembers(regionsInQuery, cache,
+            returnAll);
+        getLogWriter().info("Members for Region in query : " + members);
+        if (expectedMembers != -1) {
+          assertNotNull(members);
+          assertEquals(expectedMembers, members.size());
+        } else assertEquals(0, members.size());
+      } else {
+        assertEquals(-1, expectedMembers);//Regions do not exist at all
+      }
+    } catch (QueryInvalidException qe) {
+      fail("Invalid Query", qe);
+    }
+  }
+
+  public void doTestGetRegionAssociatedMembersForSelect() {
+    final VM manager = Host.getHost(0).getVM(0);
+    final String queryTemplate1 = "select * from ?1, ?2 ";
+    //final String queryTemplate2 = "select * from ?1, ?2, ?3";    
+    manager.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        doQueryRegionsAssociatedMembers(queryTemplate1, 0, true,
+            new String[]{DATA_REGION_NAME_VM1_PATH, DATA_REGION_NAME_VM2_PATH});
+        doQueryRegionsAssociatedMembers(queryTemplate1, 2, true,
+            new String[]{DATA_REGION_NAME_PATH, DATA_REGION_NAME_CHILD_1_PATH});
+        doQueryRegionsAssociatedMembers(queryTemplate1, 1, false,
+            new String[]{DATA_REGION_NAME_PATH, DATA_REGION_NAME_CHILD_1_PATH});
+        doQueryRegionsAssociatedMembers(queryTemplate1, 1, true,
+            new String[]{DATA_REGION_NAME_VM1_PATH, DATA_REGION_NAME_PATH});
+        doQueryRegionsAssociatedMembers(queryTemplate1, 1, false,
+            new String[]{DATA_REGION_NAME_VM1_PATH, DATA_REGION_NAME_PATH});
+        doQueryRegionsAssociatedMembers(queryTemplate1, 1, true,
+            new String[]{DATA_REGION_NAME_VM2_PATH, DATA_REGION_NAME_PATH});
+        doQueryRegionsAssociatedMembers(queryTemplate1, 1, false,
+            new String[]{DATA_REGION_NAME_VM2_PATH, DATA_REGION_NAME_PATH});
+        doQueryRegionsAssociatedMembers(queryTemplate1, 0, true,
+            new String[]{DATA_PAR_REGION_NAME_VM2_PATH, DATA_PAR_REGION_NAME_VM1_PATH});
+        doQueryRegionsAssociatedMembers(queryTemplate1, 0, false,
+            new String[]{DATA_PAR_REGION_NAME_VM2_PATH, DATA_PAR_REGION_NAME_VM1_PATH});
+        doQueryRegionsAssociatedMembers(queryTemplate1, -1, true,
+            new String[]{DATA_PAR_REGION_NAME_VM2_PATH, "/jfgkdfjgkd"}); //one wrong region
+        doQueryRegionsAssociatedMembers(queryTemplate1, -1, false,
+            new String[]{DATA_PAR_REGION_NAME_VM2_PATH, "/jfgkdfjgkd"}); //one wrong region
+        doQueryRegionsAssociatedMembers(queryTemplate1, -1, true,
+            new String[]{"/dhgfdhgf", "/dhgddhd"}); // both regions wrong
+        doQueryRegionsAssociatedMembers(queryTemplate1, -1, false,
+            new String[]{"/dhgfdhgf", "/dhgddhd"}); // both regions wrong
+      }
+    });
+  }
+
+  public void doTestSelectProjection() {
+    Random random = new Random(System.nanoTime());
+    int randomInteger = random.nextInt(COUNT);
+    String query = "query --query=\"select ID , status , createTime , pk, floatMinValue from " + DATA_PAR_REGION_NAME_PATH + " where ID <= " + randomInteger + "\" --interactive=false";
+    CommandResult cmdResult = executeCommand(query);
+    printCommandOutput(cmdResult);
+    validateSelectResult(cmdResult, true, (randomInteger + 1),
+        new String[]{"ID", "status", "createTime", "pk", "floatMinValue"});
+  }
+
+  public void doTestSelectProjectionProcessCommand() {
+    final VM manager = Host.getHost(0).getVM(0);
+    manager.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        Random random = new Random(System.nanoTime());
+        int randomInteger = random.nextInt(COUNT);
+        String query = "query --query=\"select ID , status , createTime , pk, floatMinValue from " + DATA_PAR_REGION_NAME_PATH + " where ID <= " + randomInteger + "\" --interactive=false";
+        ManagementService service = ManagementService.getExistingManagementService(getCache());
+        MemberMXBean member = service.getMemberMXBean();
+        String cmdResult = member.processCommand(query);
+        assertNotNull(cmdResult);
+        getLogWriter().info("Text Command Output : " + cmdResult);
+      }
+    });
+  }
+
+  public void doTestSelectProjectionWithNestedField() {
+    Random random = new Random(System.nanoTime());
+    int randomInteger = random.nextInt(COUNT);
+    String query = "query --query=\"select employeeId, name, department, value2 from " + DATA_REGION_NAME_VM1_PATH + " where employeeId <= " + randomInteger + "\" --interactive=false";
+    CommandResult cmdResult = executeCommand(query);
+    printCommandOutput(cmdResult);
+    String expectedCols[] = {"employeeId", "name", "department", "value2"};
+    validateSelectResult(cmdResult, true, (randomInteger + 1), expectedCols);
+
+    // Test with collections
+    query = "query --query=\"select ID , status , createTime , pk, floatMinValue, collectionHolderMap from " + DATA_PAR_REGION_NAME_PATH + " where ID <= " + randomInteger + "\" --interactive=false";
+    cmdResult = executeCommand(query);
+    printCommandOutput(cmdResult);
+    expectedCols = new String[]{"ID", "status", "createTime", "pk", "floatMinValue", "collectionHolderMap"};
+    validateSelectResult(cmdResult, true, (randomInteger + 1), expectedCols);
+  }
+
+  public void doTestSelectBeansAsResult() {
+    Random random = new Random(System.nanoTime());
+    int randomInteger = random.nextInt(COUNT);
+    String query = "query --query=\"select * from " + DATA_REGION_NAME_PATH + " where employeeId <= " + randomInteger + "\" --interactive=false";
+    CommandResult cmdResult = executeCommand(query);
+    printCommandOutput(cmdResult);
+    String expectedCols[] = {"name", "lastName", "department", "age", "employeeId"};
+    validateSelectResult(cmdResult, true, (randomInteger + 1), expectedCols);
+  }
+
+  public void doTestSelectBeansWithNestedFieldAsResult() {
+    Random random = new Random(System.nanoTime());
+    int randomInteger = random.nextInt(COUNT);
+    String query = "query --query=\"select employeeId, name, department, value2 from " + DATA_REGION_NAME_VM1_PATH + " where employeeId <= " + randomInteger + "\" --interactive=false";
+    CommandResult cmdResult = executeCommand(query);
+    printCommandOutput(cmdResult);
+    String expectedCols[] = {"employeeId", "name", "department", "value2"};
+    validateSelectResult(cmdResult, true, (randomInteger + 1), expectedCols);
+  }
+
+  public void doTestSelectWithGfshEnvVariables() {
+    Random random = new Random(System.nanoTime());
+    int randomInteger = random.nextInt(COUNT);
+    String query = "query --query=\"select ID , status , createTime , pk, floatMinValue from ${DATA_REGION} where ID <= ${PORTFOLIO_ID}" + " and status='${STATUS}'" + "\" --interactive=false";
+    executeCommand("set variable --name=DATA_REGION --value=" + DATA_REGION_NAME_PATH);
+    executeCommand("set variable --name=PORTFOLIO_ID --value=" + randomInteger);
+    executeCommand("set variable --name=STATUS --value=" + (new GsRandom().nextBoolean() ? "active" : "inactive"));
+    CommandResult cmdResult = executeCommand(query);
+    printCommandOutput(cmdResult);
+    validateSelectResult(cmdResult, true, -1, null);
+    ExpectedException ex = addExpectedException(QueryInvalidException.class.getSimpleName(), Host.getHost(0).getVM(0));
+    try {
+      query = "query --query=\"select ID , status , createTime , pk, floatMinValue from ${DATA_REGION2} where ID <= ${PORTFOLIO_ID2}" + " and status='${STATUS2}'" + "\" --interactive=false";
+      cmdResult = executeCommand(query);
+      printCommandOutput(cmdResult);
+      validateSelectResult(cmdResult, false, 0, null);
+    } finally {
+      ex.remove();
+    }
+  }
+
+  public void doTestBug48013() {
+    String query = "query --query=\"SELECT e FROM " + DATA_REGION_NAME_PATH + ".entries e\" --interactive=false";
+    CommandResult cmdResult = executeCommand(query);
+    printCommandOutput(cmdResult);
+    validateResult(cmdResult, true);
+  }
+
+
+  public void testSelectCommand() {
+    setupForSelect();
+    doTestGetRegionAssociatedMembersForSelect();
+    doTestSelectWithGfshEnvVariables();
+    doTestSelectProjection();
+    doTestBug48013();
+    doTestSelectProjectionProcessCommand();
+    doTestSelectProjectionWithNestedField();
+    doTestSelectBeansAsResult();
+    doTestSelectBeansWithNestedFieldAsResult();
+  }
+
+
+  public void testPrimitivesWithDataCommands() {
+    setupForGetPutRemoveLocateEntry("testPrimitives");
+    Byte byteKey = Byte.parseByte("41");
+    Byte byteValue = Byte.parseByte("31");
+    Short shortKey = Short.parseShort("123");
+    Short shortValue = Short.parseShort("121");
+    Integer integerKey = Integer.parseInt("123456");
+    Integer integerValue = Integer.parseInt("12345678");
+    Float floatKey = Float.valueOf("12432.2325");
+    Float flaotValue = Float.valueOf("111111.1111");
+    Double doubleKey = Double.valueOf("12432.235425");
+    Double doubleValue = Double.valueOf("111111.111111");
+
+    getLogWriter().info("Testing Byte Wrappers");
+    testGetPutLocateEntryFromShellAndGemfire(byteKey, byteValue, Byte.class, true, true);
+    getLogWriter().info("Testing Short Wrappers");
+    testGetPutLocateEntryFromShellAndGemfire(shortKey, shortValue, Short.class, true, true);
+    getLogWriter().info("Testing Integer Wrappers");
+    testGetPutLocateEntryFromShellAndGemfire(integerKey, integerValue, Integer.class, true, true);
+    getLogWriter().info("Testing Float Wrappers");
+    testGetPutLocateEntryFromShellAndGemfire(floatKey, flaotValue, Float.class, true, true);
+    getLogWriter().info("Testing Double Wrappers");
+    testGetPutLocateEntryFromShellAndGemfire(doubleKey, doubleValue, Double.class, true, true);
+  }
+
+  private void testGetPutLocateEntryFromShellAndGemfire(final Object key, final Object value, Class klass,
+      boolean addRegionPath, boolean expResult) {
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+
+    SerializableRunnable putTask = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(DATA_REGION_NAME_PATH);
+        assertNotNull(region);
+        region.clear();
+        region.put(key, value);
+      }
+    };
+
+    SerializableRunnable getTask = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(DATA_REGION_NAME_PATH);
+        assertNotNull(region);
+        assertEquals(true, region.containsKey(key));
+        assertEquals(value, region.get(key));
+      }
+    };
+
+    SerializableRunnable removeTask = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(DATA_REGION_NAME_PATH);
+        assertNotNull(region);
+        assertEquals(true, region.containsKey(key));
+        region.remove(key);
+      }
+    };
+
+
+    SerializableRunnable clearTask = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(DATA_REGION_NAME_PATH);
+        assertNotNull(region);
+        region.clear();
+      }
+    };
+
+    String getCommand = "get --key=" + key + " --key-class=" + klass.getCanonicalName() + " --value-class=" + klass.getCanonicalName();
+    if (addRegionPath) getCommand += " --region=" + DATA_REGION_NAME_PATH;
+
+    String locateEntryCommand = "locate entry --key=" + key + " --key-class=" + klass.getCanonicalName() + " --value-class=" + klass.getCanonicalName();
+    if (addRegionPath) locateEntryCommand += " --region=" + DATA_REGION_NAME_PATH;
+
+    String removeCommand = "remove --key=" + key + " --key-class=" + klass.getCanonicalName();
+    if (addRegionPath) removeCommand += " --region=" + DATA_REGION_NAME_PATH;
+    String putCommand = "put --key=" + key + " --key-class=" + klass.getCanonicalName() + " --value=" + value + " --value-class=" + klass.getCanonicalName();
+    if (addRegionPath) putCommand += " --region=" + DATA_REGION_NAME_PATH;
+
+    if (expResult) {
+      //Do put from shell check gemfire get do gemfire remove
+      CommandResult cmdResult = executeCommand(putCommand);
+      printCommandOutput(cmdResult);
+      validateResult(cmdResult, true);
+      vm1.invoke(getTask);
+      vm1.invoke(removeTask);
+
+      vm1.invoke(clearTask);
+
+      //Do put from gemfire check from shell do gemfire remove
+      vm1.invoke(putTask);
+      cmdResult = executeCommand(getCommand);
+      printCommandOutput(cmdResult);
+      validateResult(cmdResult, true);
+      cmdResult = executeCommand(locateEntryCommand);
+      printCommandOutput(cmdResult);
+      validateResult(cmdResult, true);
+      vm1.invoke(removeTask);
+
+      vm1.invoke(clearTask);
+
+      //Do put from shell check from gemfire do remove from shell get from shell exepct false    
+      cmdResult = executeCommand(putCommand);
+      printCommandOutput(cmdResult);
+      validateResult(cmdResult, true);
+      vm1.invoke(getTask);
+      cmdResult = executeCommand(removeCommand);
+      printCommandOutput(cmdResult);
+      validateResult(cmdResult, true);
+      cmdResult = executeCommand(getCommand);
+      printCommandOutput(cmdResult);
+      validateResult(cmdResult, false);
+      cmdResult = executeCommand(locateEntryCommand);
+      printCommandOutput(cmdResult);
+      validateResult(cmdResult, false);
+    } else {
+      //Do put from shell check gemfire get do gemfire remove
+      CommandResult cmdResult = executeCommand(putCommand);
+      printCommandOutput(cmdResult);
+      validateResult(cmdResult, false);
+      vm1.invoke(clearTask);
+
+      //Do put from gemfire check from shell do gemfire remove
+      vm1.invoke(putTask);
+      cmdResult = executeCommand(getCommand);
+      printCommandOutput(cmdResult);
+      validateResult(cmdResult, false);
+      cmdResult = executeCommand(locateEntryCommand);
+      printCommandOutput(cmdResult);
+      validateResult(cmdResult, false);
+      vm1.invoke(removeTask);
+      vm1.invoke(clearTask);
+
+      //Do put from shell check from gemfire do remove from shell get from shell exepct false    
+      cmdResult = executeCommand(putCommand);
+      printCommandOutput(cmdResult);
+      validateResult(cmdResult, false);
+    }
+  }
+
+  public void testSimplePutCommand() {
+    final String keyPrefix = "testKey";
+    final String valuePrefix = "testValue";
+
+    setupForGetPutRemoveLocateEntry("tesSimplePut");
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final VM vm2 = Host.getHost(0).getVM(2);
+
+    for (int i = 0; i < COUNT; i++) {
+      String command = "put";
+      String key = keyPrefix + i;
+      String value = valuePrefix + i;
+      command = command + " " + "--key=" + key + " --value=" + value + " --region=" + DATA_REGION_NAME_PATH;
+      CommandResult cmdResult = executeCommand(command);
+      printCommandOutput(cmdResult);
+      validateResult(cmdResult, true);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    }
+
+    //Bug : 51587 : GFSH command failing when ; is present in either key or value in put operation
+    String command = "put";
+    String key = keyPrefix + "\\;" + COUNT;
+    String value = valuePrefix + "\\;" + COUNT;
+    command = command + " " + "--key=" + key + " --value=" + value + " --region=" + DATA_REGION_NAME_PATH;
+    CommandResult cmdResult = executeCommand(command);
+    printCommandOutput(cmdResult);
+    validateResult(cmdResult, true);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    SerializableRunnable checkPutKeys = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(DATA_REGION_NAME_PATH);
+        assertNotNull(region);
+        for (int i = 0; i < COUNT; i++) {
+          String key = keyPrefix + i;
+          assertEquals(true, region.containsKey(key));
+        }
+        //Validation for Bug 51587
+        String key = keyPrefix + "\\;" + COUNT;
+        assertEquals(true, region.containsKey(key));
+      }
+    };
+
+    vm1.invoke(checkPutKeys);
+    vm2.invoke(checkPutKeys);
+  }
+
+  private void validateResult(CommandResult cmdResult, boolean expected) {
+    if (ResultData.TYPE_COMPOSITE.equals(cmdResult.getType())) {
+      CompositeResultData rd = (CompositeResultData) cmdResult.getResultData();
+      SectionResultData section = rd.retrieveSectionByIndex(0);
+      boolean result = (Boolean) section.retrieveObject("Result");
+      assertEquals(expected, result);
+    } else fail("Expected CompositeResult Returned Result Type " + cmdResult.getType());
+  }
+
+  private void validateLocationsResult(CommandResult cmdResult, int expected) {
+    if (ResultData.TYPE_COMPOSITE.equals(cmdResult.getType())) {
+      CompositeResultData rd = (CompositeResultData) cmdResult.getResultData();
+      SectionResultData section = rd.retrieveSectionByIndex(0);
+      int result = (Integer) section.retrieveObject("Locations Found");
+      assertEquals(expected, result);
+    } else fail("Expected CompositeResult Returned Result Type " + cmdResult.getType());
+  }
+
+  private void validateJSONGetResult(CommandResult cmdResult, String[] expectedCols) {
+    CompositeResultData rd = (CompositeResultData) cmdResult.getResultData();
+    SectionResultData section = rd.retrieveSectionByIndex(0);
+    TabularResultData table = section.retrieveTableByIndex(0);
+    GfJsonArray array = table.getHeaders();
+    assertEquals(expectedCols.length, array.size());
+    try {
+      for (String col : expectedCols) {
+        boolean found = false;
+        getLogWriter().info("Validating column " + col);
+        for (int i = 0; i < array.size(); i++) {
+          String header = (String) array.get(i);
+          if (col.equals(header)) found = true;
+        }
+        assertEquals(true, found);
+      }
+    } catch (GfJsonException e) {
+      fail("Error accessing table data", e);
+    }
+  }
+
+  private void validateJSONGetResultValues(CommandResult cmdResult, String[] expectedCols) {
+    CompositeResultData rd = (CompositeResultData) cmdResult.getResultData();
+    SectionResultData section = rd.retrieveSectionByIndex(0);
+    TabularResultData table = section.retrieveTableByIndex(0);
+    GfJsonArray array = table.getHeaders();
+    assertEquals(expectedCols.length, array.size());
+    try {
+      for (String col : expectedCols) {
+        boolean found = false;
+        getLogWriter().info("Validating column " + col);
+        for (int i = 0; i < array.size(); i++) {
+          String header = (String) array.get(i);
+          if (col.equals(header)) found = true;
+        }
+        assertEquals(true, found);
+
+        List<String> values = table.retrieveAllValues(col);
+        for (String value : values) {
+          assertNotSame("null", value);
+        }
+
+      }
+    } catch (GfJsonException e) {
+      fail("Error accessing table data", e);
+    }
+  }
+
+  private void validateSelectResult(CommandResult cmdResult, boolean expectedFlag, int expectedRows, String[] cols) {
+    if (ResultData.TYPE_COMPOSITE.equals(cmdResult.getType())) {
+      CompositeResultData rd = (CompositeResultData) cmdResult.getResultData();
+      SectionResultData section = rd.retrieveSectionByIndex(0);
+      boolean result = (Boolean) section.retrieveObject("Result");
+      assertEquals(expectedFlag, result);
+      if (expectedFlag && expectedRows != -1) {
+        int rowsReturned = (Integer) section.retrieveObject("Rows");
+        assertEquals(expectedRows, rowsReturned);
+        if (rowsReturned > 0 && cols != null) {
+          try {
+            TabularResultData table = section.retrieveTableByIndex(0);
+            GfJsonArray array = table.getHeaders();
+            assertEquals(cols.length, array.size());
+            for (String col : cols) {
+              boolean found = false;
+              getLogWriter().info("Validating column " + col);
+              for (int i = 0; i < array.size(); i++) {
+                String header = (String) array.get(i);
+                if (col.equals(header)) found = true;
+              }
+              assertEquals(true, found);
+            }
+          } catch (GfJsonException e) {
+            fail("Error accessing table data", e);
+          }
+        }
+      }
+    } else fail("Expected CompositeResult Returned Result Type " + cmdResult.getType());
+  }
+
+
+  public void testSimplePutIfAbsentCommand() {
+    final String keyPrefix = "testKey";
+    final String valuePrefix = "testValue";
+
+    setupForGetPutRemoveLocateEntry("testSimplePutIfAbsent");
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final VM vm2 = Host.getHost(0).getVM(2);
+
+
+    SerializableRunnable putKeys = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(DATA_REGION_NAME_PATH);
+        assertNotNull(region);
+        region.clear();
+        for (int i = 0; i < COUNT; i++) {
+          String key = keyPrefix + i;
+          String value = valuePrefix + i;
+          region.put(key, value);
+        }
+        assertEquals(COUNT, region.size());
+      }
+    };
+    vm1.invoke(putKeys);
+
+    for (int i = 0; i < COUNT; i++) {
+      String command = "put";
+      String key = keyPrefix + i;
+      String value = valuePrefix + i + i;
+      command = command + " " + "--key=" + key + " --value=" + value + " --region=" + DATA_REGION_NAME_PATH + " --skip-if-exists=true";
+      CommandResult cmdResult = executeCommand(command);
+      printCommandOutput(cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      validateResult(cmdResult, true);
+    }
+
+    SerializableRunnable checkPutIfAbsentKeys = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(DATA_REGION_NAME_PATH);
+        assertNotNull(region);
+        for (int i = COUNT + 1; i < COUNT; i++) {
+          String key = keyPrefix + i;
+          String notExpectedvalue = valuePrefix + i + i;
+          String expectedvalue = valuePrefix + i;
+          String value = (String) region.get(key);
+          assertNotNull(value);
+          assertEquals(value, expectedvalue);
+          if (value.equals(notExpectedvalue)) fail("Value is overriden even if put-If-absent was true");
+        }
+      }
+    };
+
+    vm1.invoke(checkPutIfAbsentKeys);
+    vm2.invoke(checkPutIfAbsentKeys);
+
+  }
+
+  public void testSimpleRemoveCommand() {
+    final String keyPrefix = "testKey";
+    final String valuePrefix = "testValue";
+
+    setupForGetPutRemoveLocateEntry("testSimpleRemove");
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final VM vm2 = Host.getHost(0).getVM(2);
+
+
+    SerializableRunnable putKeys = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(DATA_REGION_NAME_PATH);
+        assertNotNull(region);
+        region.clear();
+        for (int i = 0; i < COUNT; i++) {
+          String key = keyPrefix + i;
+          String value = valuePrefix + i;
+          region.put(key, value);
+        }
+        assertEquals(COUNT, region.size());
+      }
+    };
+    vm1.invoke(putKeys);
+
+    for (int i = 0; i < COUNT; i++) {
+      String command = "remove";
+      String key = keyPrefix + i;
+      String value = valuePrefix + i;
+      command = command + " " + "--key=" + key + " --region=" + DATA_REGION_NAME_PATH;
+      CommandResult cmdResult = executeCommand(command);
+      printCommandOutput(cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      validateResult(cmdResult, true);
+    }
+
+    SerializableRunnable checkRemoveKeys = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(DATA_REGION_NAME_PATH);
+        assertNotNull(region);
+        for (int i = 0; i < COUNT; i++) {
+          String key = keyPrefix + i;
+          assertEquals(false, region.containsKey(key));
+        }
+        assertEquals(0, region.size());
+      }
+    };
+
+    vm1.invoke(checkRemoveKeys);
+    vm2.invoke(checkRemoveKeys);
+  }
+
+  public void testSimpleGetLocateEntryCommand() {
+    final String keyPrefix = "testKey";
+    final String valuePrefix = "testValue";
+
+    setupForGetPutRemoveLocateEntry("testSimpleGetLocateEntry");
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final VM vm2 = Host.getHost(0).getVM(2);
+
+    SerializableRunnable putKeys = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(DATA_REGION_NAME_PATH);
+        assertNotNull(region);
+        region.clear();
+        for (int i = 0; i < COUNT; i++) {
+          String key = keyPrefix + i;
+          String value = valuePrefix + i;
+          region.put(key, value);
+        }
+      }
+    };
+
+    vm1.invoke(putKeys);
+    for (int i = 0; i < COUNT; i++) {
+      String command = "get";
+      String key = keyPrefix + i;
+      String value = valuePrefix + i;
+      command = command + " " + "--key=" + key + " --region=" + DATA_REGION_NAME_PATH;
+      CommandResult cmdResult = executeCommand(command);
+      printCommandOutput(cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      validateResult(cmdResult, true);
+
+      command = "locate entry";
+      command = command + " " + "--key=" + key + " --region=" + DATA_REGION_NAME_PATH;
+      cmdResult = executeCommand(command);
+      printCommandOutput(cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      validateResult(cmdResult, true);
+
+    }
+
+  }
+
+  public void testRecursiveLocateEntryCommand() {
+    final String keyPrefix = "testKey";
+    final String valuePrefix = "testValue";
+
+    setupForGetPutRemoveLocateEntry("testRecursiveLocateEntry");
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final VM vm2 = Host.getHost(0).getVM(2);
+
+    SerializableRunnable putKeys = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(DATA_REGION_NAME_PATH);
+        Region region2 = cache.getRegion(DATA_REGION_NAME_CHILD_1_PATH);
+        Region region3 = cache.getRegion(DATA_REGION_NAME_CHILD_1_2_PATH);
+        assertNotNull(region);
+        region.clear();
+        for (int i = 0; i < COUNT; i++) {
+          String key = keyPrefix + i;
+          String value = valuePrefix + i;
+          region.put(key, value);
+          region2.put(key, value);
+          region3.put(key, value);
+        }
+      }
+    };
+
+    vm1.invoke(putKeys);
+    for (int i = 0; i < COUNT; i++) {
+      String key = keyPrefix + i;
+      String command = "locate entry";
+      command = command + " " + "--key=" + key + " --region=" + DATA_REGION_NAME_PATH + " --recursive=true";
+      CommandResult cmdResult = executeCommand(command);
+      printCommandOutput(cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      validateResult(cmdResult, true);
+      validateLocationsResult(cmdResult, 6); //3 Regions X 2 members = 6
+    }
+
+  }
+
+  public void testGetLocateEntryFromRegionOnDifferentVM() {
+    final String keyPrefix = "testKey";
+    final String valuePrefix = "testValue";
+
+    setupForGetPutRemoveLocateEntry("testGetLocateEntryFromRegionOnDifferentVM");
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final VM vm2 = Host.getHost(0).getVM(2);
+
+    SerializableRunnable putKeys1 = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(DATA_REGION_NAME_VM1_PATH);
+        Region parRegion = cache.getRegion(DATA_PAR_REGION_NAME_VM1_PATH);
+        assertNotNull(region);
+        region.clear();
+        for (int i = 0; i < COUNT; i++) {
+          if (i % 2 == 0) {
+            String key = keyPrefix + i;
+            String value = valuePrefix + i;
+            region.put(key, value);
+            parRegion.put(key, value);
+          }
+        }
+      }
+    };
+
+    SerializableRunnable putKeys2 = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(DATA_REGION_NAME_VM2_PATH);
+        Region parRegion = cache.getRegion(DATA_PAR_REGION_NAME_VM2_PATH);
+        assertNotNull(region);
+        region.clear();
+        for (int i = 0; i < COUNT; i++) {
+          if (i % 2 != 0) {
+            String key = keyPrefix + i;
+            String value = valuePrefix + i;
+            region.put(key, value);
+            parRegion.put(key, value);
+          }
+        }
+      }
+    };
+
+    vm1.invoke(putKeys1);
+    vm2.invoke(putKeys2);
+    for (int i = 0; i < COUNT; i++) {
+      String command = "get";
+      String key = keyPrefix + i;
+      String value = valuePrefix + i;
+      if (i % 2 == 0) command = command + " " + "--key=" + key + " --region=" + DATA_REGION_NAME_VM1_PATH;
+      else if (i % 2 == 1) command = command + " " + "--key=" + key + " --region=" + DATA_REGION_NAME_VM2_PATH;
+      CommandResult cmdResult = executeCommand(command);
+      printCommandOutput(cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      validateResult(cmdResult, true);
+
+      command = "locate entry";
+      if (i % 2 == 0) command = command + " " + "--key=" + key + " --region=" + DATA_REGION_NAME_VM1_PATH;
+      else if (i % 2 == 1) command = command + " " + "--key=" + key + " --region=" + DATA_REGION_NAME_VM2_PATH;
+      cmdResult = executeCommand(command);
+      printCommandOutput(cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      validateResult(cmdResult, true);
+
+
+      command = "locate entry";
+      if (i % 2 == 0) command = command + " " + "--key=" + key + " --region=" + DATA_PAR_REGION_NAME_VM1_PATH;
+      else if (i % 2 == 1) command = command + " " + "--key=" + key + " --region=" + DATA_PAR_REGION_NAME_VM2_PATH;
+      cmdResult = executeCommand(command);
+      printCommandOutput(cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      validateResult(cmdResult, true);
+      validateLocationsResult(cmdResult, 1); //1 Regions X (2-1) 2 Copies but redundancy not satisfied = 1
+    }
+  }
+
+  public void testGetLocateEntryLocationsForPR() {
+    final String keyPrefix = "testKey";
+    final String valuePrefix = "testValue";
+
+    setupForGetPutRemoveLocateEntry("testGetLocateEntryLocationsForPR");
+    final VM vm1 = Host.getHost(0).getVM(1);
+
+    SerializableRunnable putKeys1 = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(DATA_PAR_REGION_NAME_PATH);
+        assertNotNull(region);
+        for (int i = 0; i < COUNT; i++) {
+          String key = keyPrefix + i;
+          String value = valuePrefix + i;
+          region.put(key, value);
+        }
+      }
+    };
+
+    vm1.invoke(putKeys1);
+
+    for (int i = 0; i < COUNT; i++) {
+      String key = keyPrefix + i;
+      String command = "locate entry";
+      command = command + " " + "--key=" + key + " --region=" + DATA_PAR_REGION_NAME_PATH;
+      CommandResult cmdResult = executeCommand(command);
+      printCommandOutput(cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      validateResult(cmdResult, true);
+      validateLocationsResult(cmdResult, 2); //2 Members
+    }
+  }
+
+  public void testPutFromRegionOnDifferentVM() {
+    final String keyPrefix = "testKey";
+    final String valuePrefix = "testValue";
+
+    setupForGetPutRemoveLocateEntry("testPutFromRegionOnDifferentVM");
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final VM vm2 = Host.getHost(0).getVM(2);
+
+    for (int i = 0; i < COUNT; i++) {
+      String command = "put";
+      String key = keyPrefix + i;
+      String value = valuePrefix + i;
+      if (i % 2 == 0)
+        command = command + " " + "--key=" + key + " --value=" + value + " --region=" + DATA_REGION_NAME_VM1_PATH;
+      else command = command + " " + "--key=" + key + " --value=" + value + " --region=" + DATA_REGION_NAME_VM2_PATH;
+      CommandResult cmdResult = executeCommand(command);
+      printCommandOutput(cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      validateResult(cmdResult, true);
+    }
+
+    SerializableRunnable checkPutKeysInVM1 = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(DATA_REGION_NAME_VM1_PATH);
+        assertNotNull(region);
+        for (int i = 0; i < COUNT; i++) {
+          if (i % 2 == 0) {
+            String key = keyPrefix + i;
+            assertEquals(true, region.containsKey(key));
+          }
+        }
+      }
+    };
+
+    SerializableRunnable checkPutKeysInVM2 = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(DATA_REGION_NAME_VM2_PATH);
+        assertNotNull(region);
+        for (int i = 0; i < COUNT; i++) {
+          if (i % 2 != 0) {
+            String key = keyPrefix + i;
+            assertEquals(true, region.containsKey(key));
+          }
+        }
+      }
+    };
+
+    vm1.invoke(checkPutKeysInVM1);
+    vm2.invoke(checkPutKeysInVM2);
+  }
+
+  public void testGetLocateEntryJsonKeys() {
+    final String keyPrefix = "testKey";
+
+    setupForGetPutRemoveLocateEntry("testGetLocateEntryJsonKeys");
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final VM vm2 = Host.getHost(0).getVM(2);
+
+    SerializableRunnable putKeys = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(DATA_REGION_NAME_PATH);
+        assertNotNull(region);
+        region.clear();
+        for (int i = 0; i < COUNT; i++) {
+          String keyString = keyPrefix + i;
+          Key1 key = new Key1();
+          key.setId(keyString);
+          key.setName("name" + keyString);
+          Value2 value2 = new Value2();
+          value2.setStateName("State" + keyString);
+          value2.setCapitalCity("capital" + keyString);
+          value2.setPopulation(i * 100);
+          value2.setAreaInSqKm(i * 100.4365);
+          region.put(key, value2);
+        }
+
+        //Added for Bug #51175
+        List<String> colors = new ArrayList<String>();
+        colors.add("White");
+        colors.add("Red");
+        colors.add("Blue");
+        Map<String, String> attrMap = new HashMap<String, String>();
+        attrMap.put("power", "90hp");
+        attrMap.put("turningRadius", "4mtr");
+        attrMap.put("engineCapacity", "1248cc");
+        attrMap.put("turboGeometry", "VGT");
+
+        Set<String> attrSet = new HashSet<String>();
+        attrSet.add("power");
+        attrSet.add("turningRadius");
+
+        for (int i = COUNT; i < COUNT + 5; i++) {
+          String keyString = keyPrefix + i;
+          Key1 key = new Key1();
+          key.setId(keyString);
+          key.setName("name" + keyString);
+          Car car = new Car();
+          car.setMake("Make" + keyString);
+          car.setModel("Model" + keyString);
+          car.setColors(colors);
+          car.setAttributes(attrMap);
+          car.setAttributeSet(attrSet);
+          region.put(key, car);
+        }
+      }
+    };
+
+    String expectedCols[] = {"id", "name", "stateName", "capitalCity", "population", "areaInSqKm"};
+    vm1.invoke(putKeys);
+    for (int i = 0; i < COUNT; i++) {
+      String command = "get";
+      String keyString = keyPrefix + i;
+      String population = "" + i * 100;
+      String area = "" + i * (100.4365);
+      String keyJson = keyTemplate.replaceAll("\\?", keyString);
+      String valueJson = valueTemplate.replaceAll("\\?1", population);
+      valueJson = valueJson.replaceAll("\\?2", area);
+      valueJson = valueJson.replaceAll("\\?", keyString);
+      getLogWriter().info("Getting key with json key : " + keyJson);
+      command = command + " " + "--key=" + keyJson + " --region=" + DATA_REGION_NAME_PATH + " --key-class=" + Key1.class.getCanonicalName();
+      command = command + " --value-class=" + Value2.class.getCanonicalName();
+      CommandResult cmdResult = executeCommand(command);
+      printCommandOutput(cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      validateResult(cmdResult, true);
+      validateJSONGetResult(cmdResult, expectedCols);
+
+      command = "locate entry";
+      command = command + " " + "--key=" + keyJson + " --region=" + DATA_REGION_NAME_PATH + " --key-class=" + Key1.class.getCanonicalName();
+      command = command + " --value-class=" + Value2.class.getCanonicalName();
+      cmdResult = executeCommand(command);
+      printCommandOutput(cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      validateResult(cmdResult, true);
+    }
+
+    //Added for Bug #51175
+    expectedCols = new String[]{"id", "name", "attributes", "make", "model", "colors", "attributeSet"};
+    for (int i = COUNT; i < COUNT + 5; i++) {
+      String command = "get";
+      String keyString = keyPrefix + i;
+      String population = "" + i * 100;
+      String area = "" + i * (100.4365);
+      String keyJson = keyTemplate.replaceAll("\\?", keyString);
+      String valueJson = valueTemplate.replaceAll("\\?1", population);
+      valueJson = valueJson.replaceAll("\\?2", area);
+      valueJson = valueJson.replaceAll("\\?", keyString);
+      getLogWriter().info("Getting key with json key : " + keyJson);
+      command = command + " " + "--key=" + keyJson + " --region=" + DATA_REGION_NAME_PATH + " --key-class=" + Key1.class.getCanonicalName();
+      command = command + " --value-class=" + Value2.class.getCanonicalName();
+      CommandResult cmdResult = executeCommand(command);
+      printCommandOutput(cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      validateResult(cmdResult, true);
+      //validateJSONGetResult(cmdResult, expectedCols);
+      validateJSONGetResultValues(cmdResult, expectedCols);
+
+      command = "locate entry";
+      command = command + " " + "--key=" + keyJson + " --region=" + DATA_REGION_NAME_PATH + " --key-class=" + Key1.class.getCanonicalName();
+      command = command + " --value-class=" + Value2.class.getCanonicalName();
+      cmdResult = executeCommand(command);
+      printCommandOutput(cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      validateResult(cmdResult, true);
+    }
+  }
+
+  public void testPutJsonKeys() {
+    final String keyPrefix = "testKey";
+
+    setupForGetPutRemoveLocateEntry("testPutJsonKeys");
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final VM vm2 = Host.getHost(0).getVM(2);
+
+    for (int i = 0; i < COUNT; i++) {
+      String command = "put";
+      String keyString = keyPrefix + i;
+      String population = "" + i * 100;
+      String area = "" + i * (100.4365);
+      String keyJson = keyTemplate.replaceAll("\\?", keyString);
+      String valueJson = valueTemplate.replaceAll("\\?1", population);
+      valueJson = valueJson.replaceAll("\\?2", area);
+      valueJson = valueJson.replaceAll("\\?", keyString);
+      getLogWriter().info("Putting key with json key : " + keyJson);
+      getLogWriter().info("Putting key with json valye : " + valueJson);
+      command = command + " " + "--key=" + keyJson + " --value=" + valueJson + " --region=" + DATA_REGION_NAME_PATH;
+      command = command + " --key-class=" + Key1.class.getCanonicalName() + " --value-class=" + Value2.class.getCanonicalName();
+      ;
+      CommandResult cmdResult = executeCommand(command);
+      printCommandOutput(cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      validateResult(cmdResult, true);
+    }
+
+    //Bug #51175
+    for (int i = COUNT; i < COUNT + 5; i++) {
+      String command = "put";
+      String keyString = keyPrefix + i;
+      String id = "" + i * 100;
+      String make = "" + i * (100.4365);
+      String model = "" + i * (100.4365);
+      String list = "['red','white','blue']";
+      String set = "['red','white','blue']";
+      String map = "{'power':'90hp'}";
+      String keyJson = keyTemplate.replaceAll("\\?", keyString);
+
+      String valueJson = carTemplate.replaceAll("\\?make", make);
+      valueJson = valueJson.replaceAll("\\?model", model);
+      valueJson = valueJson.replaceAll("\\?list", list);
+      valueJson = valueJson.replaceAll("\\?set", set);
+      valueJson = valueJson.replaceAll("\\?map", map);
+
+      getLogWriter().info("Putting key with json key : " + keyJson);
+      getLogWriter().info("Putting key with json valye : " + valueJson);
+      command = command + " " + "--key=" + keyJson + " --value=" + valueJson + " --region=" + DATA_REGION_NAME_PATH;
+      command = command + " --key-class=" + Key1.class.getCanonicalName() + " --value-class=" + Car.class.getCanonicalName();
+      ;
+      CommandResult cmdResult = executeCommand(command);
+      printCommandOutput(cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      validateResult(cmdResult, true);
+    }
+
+    SerializableRunnable checkPutKeys = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(DATA_REGION_NAME_PATH);
+        assertNotNull(region);
+        for (int i = 0; i < COUNT + 5; i++) {
+          String keyString = keyPrefix + i;
+          Key1 key = new Key1();
+          key.setId(keyString);
+          key.setName("name" + keyString);
+          assertEquals(true, region.containsKey(key));
+
+          //Bug #51175
+          if (i >= COUNT) {
+            Car car = (Car) region.get(key);
+            assertNotNull(car.getAttributes());
+            assertNotNull(car.getAttributeSet());
+            assertNotNull(car.getColors());
+          }
+
+        }
+      }
+    };
+
+    vm1.invoke(checkPutKeys);
+    vm2.invoke(checkPutKeys);
+
+    doBugCheck50449();
+  }
+
+  public void doBugCheck50449() {
+    String command = "put --key-class=" + ObjectWithCharAttr.class.getCanonicalName() + " --value=456 --key=\"('name':'hesdfdsfy2','t':456, 'c':'d')\"" + " --region=" + DATA_REGION_NAME_PATH;
+    CommandResult cmdResult = executeCommand(command);
+    printCommandOutput(cmdResult);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    validateResult(cmdResult, true);
+
+    command = "put --key-class=" + ObjectWithCharAttr.class.getCanonicalName() + " --value=123 --key=\"('name':'hesdfdsfy2','t':123, 'c':'d')\"" + " --region=" + DATA_REGION_NAME_PATH;
+    cmdResult = executeCommand(command);
+    printCommandOutput(cmdResult);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    validateResult(cmdResult, true);
+
+    command = "get --key-class=" + ObjectWithCharAttr.class.getCanonicalName() + " --key=\"('name':'','t':123, 'c':'d')\"" + " --region=" + DATA_REGION_NAME_PATH;
+    cmdResult = executeCommand(command);
+    printCommandOutput(cmdResult);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    validateResult(cmdResult, true);
+
+    command = "get --key-class=" + ObjectWithCharAttr.class.getCanonicalName() + " --key=\"('name':'','t':456, 'c':'d')\"" + " --region=" + DATA_REGION_NAME_PATH;
+    cmdResult = executeCommand(command);
+    printCommandOutput(cmdResult);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    validateResult(cmdResult, true);
+
+    // check wrong key
+    command = "get --key-class=" + ObjectWithCharAttr.class.getCanonicalName() + " --key=\"('name':'','t':999, 'c':'d')\"" + " --region=" + DATA_REGION_NAME_PATH;
+    cmdResult = executeCommand(command);
+    printCommandOutput(cmdResult);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    validateResult(cmdResult, false);
+  }
+
+  public void testRemoveJsonCommand() {
+    final String keyPrefix = "testKey";
+
+    setupForGetPutRemoveLocateEntry("testRemoveJsonCommand");
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final VM vm2 = Host.getHost(0).getVM(2);
+
+    SerializableRunnable putKeys = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(DATA_REGION_NAME_PATH);
+        assertNotNull(region);
+        region.clear();
+        for (int i = 0; i < COUNT; i++) {
+          String keyString = keyPrefix + i;
+          Key1 key = new Key1();
+          key.setId(keyString);
+          key.setName("name" + keyString);
+          Value2 value2 = new Value2();
+          value2.setStateName("State" + keyString);
+          value2.setCapitalCity("capital" + keyString);
+          value2.setPopulation(i * 100);
+          value2.setAreaInSqKm(i * 100.4365);
+          region.put(key, value2);
+        }
+      }
+    };
+
+    vm1.invoke(putKeys);
+
+    for (int i = 0; i < COUNT; i++) {
+      String command = "remove";
+      String keyString = keyPrefix + i;
+      String keyJson = keyTemplate.replaceAll("\\?", keyString);
+      getLogWriter().info("Removing key with json key : " + keyJson);
+      command = command + " " + "--key=" + keyJson + " --region=" + DATA_REGION_NAME_PATH + " --key-class=" + Key1.class.getCanonicalName();
+      CommandResult cmdResult = executeCommand(command);
+      printCommandOutput(cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      validateResult(cmdResult, true);
+    }
+
+    SerializableRunnable checkRemoveKeys = new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(DATA_REGION_NAME_PATH);
+        assertNotNull(region);
+        for (int i = 0; i < COUNT; i++) {
+          String keyString = keyPrefix + i;
+          Key1 key = new Key1();
+          key.setId(keyString);
+          key.setName("name" + keyString);
+          assertEquals(false, region.containsKey(key));
+        }
+        assertEquals(0, region.size());
+      }
+    };
+
+    vm1.invoke(checkRemoveKeys);
+    vm2.invoke(checkRemoveKeys);
+  }
+
+  private Region<?, ?> createParReg(String regionName, Cache cache) {
+    RegionFactory regionFactory = cache.createRegionFactory();
+    regionFactory.setDataPolicy(DataPolicy.PARTITION);
+    return regionFactory.create(regionName);
+  }
+
+  private Region<?, ?> createReplicatedRegion(String regionName, Cache cache) {
+    RegionFactory regionFactory = cache.createRegionFactory();
+    regionFactory.setDataPolicy(DataPolicy.REPLICATE);
+    return regionFactory.create(regionName);
+  }
+
+  public void testImportExportData() throws InterruptedException, IOException {
+    final String regionName = "Region1";
+    final String exportFileName = "export.gfd";
+    final VM manager = Host.getHost(0).getVM(0);
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final File exportFile = new File(exportFileName);
+    final String filePath = exportFile.getCanonicalPath();
+
+    try {
+      if (!exportFile.exists()) {
+        exportFile.createNewFile();
+      }
+      exportFile.deleteOnExit();
+
+      createDefaultSetup(null);
+
+      manager.invoke(new SerializableCallable() {
+        public Object call() {
+          Region region = createParReg(regionName, getCache());
+          return region.put("Manager", "ASD");
+        }
+      });
+
+      vm1.invoke(new SerializableCallable() {
+        @Override
+        public Object call() throws Exception {
+          Region region = createParReg(regionName, getCache());
+          return region.put("VM1", "QWE");
+        }
+      });
+
+      CommandStringBuilder csb = new CommandStringBuilder(CliStrings.EXPORT_DATA);
+      csb.addOption(CliStrings.EXPORT_DATA__REGION, regionName);
+      csb.addOption(CliStrings.EXPORT_DATA__MEMBER, "Manager");
+      csb.addOption(CliStrings.EXPORT_DATA__FILE, filePath);
+      String commandString = csb.toString();
+
+      CommandResult cmdResult = executeCommand(commandString);
+      String resultAsString = commandResultToString(cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      getLogWriter().info("Command Output");
+      getLogWriter().info(resultAsString);
+
+      vm1.invoke(new SerializableRunnable() {
+        public void run() {
+          Region region = getCache().getRegion(regionName);
+          region.destroy("Manager");
+          region.destroy("VM1");
+        }
+      });
+
+      /**
+       * Import the data 
+       */
+
+      csb = new CommandStringBuilder(CliStrings.IMPORT_DATA);
+      csb.addOption(CliStrings.IMPORT_DATA__REGION, regionName);
+      csb.addOption(CliStrings.IMPORT_DATA__FILE, filePath);
+      csb.addOption(CliStrings.IMPORT_DATA__MEMBER, "Manager");
+
+      commandString = csb.toString();
+      cmdResult = executeCommand(commandString);
+      resultAsString = commandResultToString(cmdResult);
+
+      getLogWriter().info("Result of import data");
+      getLogWriter().info(resultAsString);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+      /**
+       *  Validate the region entries after import
+       *  They must match the entries before export
+       */
+
+      manager.invoke(new SerializableRunnable() {
+        public void run() {
+          Region region = getCache().getRegion(regionName);
+          assertEquals(region.get("Manager"), "ASD");
+          assertEquals(region.get("VM1"), "QWE");
+        }
+      });
+
+      //Test for bad input
+      csb = new CommandStringBuilder(CliStrings.EXPORT_DATA);
+      csb.addOption(CliStrings.EXPORT_DATA__REGION, "FDSERW");
+      csb.addOption(CliStrings.EXPORT_DATA__FILE, filePath);
+      csb.addOption(CliStrings.EXPORT_DATA__MEMBER, "Manager");
+      commandString = csb.getCommandString();
+
+      cmdResult = executeCommand(commandString);
+      resultAsString = commandResultToString(cmdResult);
+      getLogWriter().info("Result of import data with wrong region name");
+      getLogWriter().info(resultAsString);
+      assertEquals(Result.Status.ERROR, cmdResult.getStatus());
+
+      csb = new CommandStringBuilder(CliStrings.IMPORT_DATA);
+      csb.addOption(CliStrings.IMPORT_DATA__REGION, regionName);
+      csb.addOption(CliStrings.IMPORT_DATA__FILE, "#WEQW");
+      csb.addOption(CliStrings.IMPORT_DATA__MEMBER, "Manager");
+      commandString = csb.getCommandString();
+
+      cmdResult = executeCommand(commandString);
+      resultAsString = commandResultToString(cmdResult);
+      getLogWriter().info("Result of import data with wrong file");
+      getLogWriter().info(resultAsString);
+      assertEquals(Result.Status.ERROR, cmdResult.getStatus());
+
+    } finally {
+      exportFile.delete();
+    }
+  }
+
+  void setupWith2Regions() {
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final VM vm2 = Host.getHost(0).getVM(2);
+    createDefaultSetup(null);
+
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+
+        // no need to close cache as it will be closed as part of teardown2
+        Cache cache = getCache();
+
+        RegionFactory<Integer, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+        Region region = dataRegionFactory.create(REBALANCE_REGION_NAME);
+        for (int i = 0; i < 10; i++) {
+          region.put("key" + (i + 200), "value" + (i + 200));
+        }
+        region = dataRegionFactory.create(REBALANCE_REGION2_NAME);
+        for (int i = 0; i < 50; i++) {
+          region.put("key" + (i + 200), "value" + (i + 200));
+        }
+      }
+    });
+
+    vm2.invoke(new SerializableRunnable() {
+      public void run() {
+
+        // no need to close cache as it will be closed as part of teardown2
+        Cache cache = getCache();
+
+        RegionFactory<Integer, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+        Region region = dataRegionFactory.create(REBALANCE_REGION_NAME);
+        for (int i = 0; i < 150; i++) {
+          region.put("key" + (i + 400), "value" + (i + 400));
+        }
+        region = dataRegionFactory.create(REBALANCE_REGION2_NAME);
+        for (int i = 0; i < 100; i++) {
+          region.put("key" + (i + 200), "value" + (i + 200));
+        }
+      }
+    });
+  }
+
+  SerializableRunnable checkRegionMBeans = new SerializableRunnable() {
+    @Override
+    public void run() {
+      final WaitCriterion waitForMaangerMBean = new WaitCriterion() {
+        @Override
+        public boolean done() {
+          final ManagementService service = ManagementService.getManagementService(getCache());
+          final DistributedRegionMXBean bean = service.getDistributedRegionMXBean(
+              Region.SEPARATOR + REBALANCE_REGION_NAME);
+          if (bean == null) {
+            getLogWriter().info("Still probing for checkRegionMBeans ManagerMBean");
+            return false;
+          } else {
+            // verify that bean is proper before executing tests
+            if (bean.getMembers() != null && bean.getMembers().length > 1 && bean.getMemberCount() > 0 && service.getDistributedSystemMXBean().listRegions().length >= 2) {
+              return true;
+            } else {
+              return false;
+            }
+          }
+        }
+
+        @Override
+        public String description() {
+          return "Probing for testRebalanceCommandForSimulateWithNoMember ManagerMBean";
+        }
+      };
+      DistributedTestCase.waitForCriterion(waitForMaangerMBean, 2 * 60 * 1000, 2000, true);
+      DistributedRegionMXBean bean = ManagementService.getManagementService(getCache()).getDistributedRegionMXBean(
+          "/" + REBALANCE_REGION_NAME);
+      assertNotNull(bean);
+    }
+  };
+
+  public void testRebalanceCommandForTimeOut() {
+    setupTestRebalanceForEntireDS();
+
+    //check if DistributedRegionMXBean is available so that command will not fail
+    final VM manager = Host.getHost(0).getVM(0);
+    manager.invoke(checkRegionMBeans);
+    getLogWriter().info("testRebalanceCommandForTimeOut verified Mbean and executin command");
+    String command = "rebalance --time-out=1";
+    CommandResult cmdResult = executeCommand(command);
+    getLogWriter().info("testRebalanceCommandForTimeOut just after executing " + cmdResult);
+    if (cmdResult != null) {
+      String stringResult = commandResultToString(cmdResult);
+      getLogWriter().info("testRebalanceCommandForTimeOut stringResult : " + stringResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    } else {
+      fail("testRebalanceCommandForTimeOut failed as did not get CommandResult");
+    }
+  }
+
+  public void testRebalanceCommandForTimeOutForRegion() {
+    setupTestRebalanceForEntireDS();
+
+    //check if DistributedRegionMXBean is available so that command will not fail
+    final VM manager = Host.getHost(0).getVM(0);
+    manager.invoke(checkRegionMBeans);
+
+    getLogWriter().info("testRebalanceCommandForTimeOutForRegion verified Mbean and executin command");
+
+    String command = "rebalance --time-out=1 --include-region=" + "/" + REBALANCE_REGION_NAME;
+    CommandResult cmdResult = executeCommand(command);
+
+    getLogWriter().info("testRebalanceCommandForTimeOutForRegion just after executing " + cmdResult);
+
+    if (cmdResult != null) {
+      String stringResult = commandResultToString(cmdResult);
+      getLogWriter().info("testRebalanceCommandForTimeOutForRegion stringResult : " + stringResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    } else {
+      fail("testRebalanceCommandForTimeOut failed as did not get CommandResult");
+    }
+  }
+
+  public void testRebalanceCommandForSimulate() {
+    setupTestRebalanceForEntireDS();
+
+    //check if DistributedRegionMXBean is available so that command will not fail
+    final VM manager = Host.getHost(0).getVM(0);
+    manager.invoke(checkRegionMBeans);
+
+    getLogWriter().info("testRebalanceCommandForSimulate verified Mbean and executin command");
+    String command = "rebalance --simulate=true --include-region=" + "/" + REBALANCE_REGION_NAME;
+    CommandResult cmdResult = executeCommand(command);
+    getLogWriter().info("testRebalanceCommandForSimulate just after executing " + cmdResult);
+    if (cmdResult != null) {
+      String stringResult = commandResultToString(cmdResult);
+      getLogWriter().info("testRebalanceCommandForSimulate stringResult : " + stringResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    } else {
+      fail("testRebalanceCommandForSimulate failed as did not get CommandResult");
+    }
+  }
+
+  public void testRebalanceCommandForSimulateWithNoMember() {
+    setupTestRebalanceForEntireDS();
+
+    //check if DistributedRegionMXBean is available so that command will not fail
+    final VM manager = Host.getHost(0).getVM(0);
+    manager.invoke(checkRegionMBeans);
+
+    getLogWriter().info("testRebalanceCommandForSimulateWithNoMember verified Mbean and executin command");
+
+    String command = "rebalance --simulate=true";
+    CommandResult cmdResult = executeCommand(command);
+
+    getLogWriter().info("testRebalanceCommandForSimulateWithNoMember just after executing " + cmdResult);
+
+    if (cmdResult != null) {
+      String stringResult = commandResultToString(cmdResult);
+      getLogWriter().info("testRebalanceCommandForSimulateWithNoMember stringResult : " + stringResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    } else {
+      fail("testRebalanceCommandForSimulateWithNoMember failed as did not get CommandResult");
+    }
+  }
+
+  public void testRebalanceForIncludeRegionFunction() {
+    // setup();
+    setupWith2Regions();
+
+    //check if DistributedRegionMXBean is available so that command will not fail
+    final VM manager = Host.getHost(0).getVM(0);
+    manager.invoke(checkRegionMBeans);
+    getLogWriter().info("testRebalanceForIncludeRegionFunction verified Mbean and executin command");
+    String command = "rebalance --include-region=" + "/" + REBALANCE_REGION_NAME + ",/" + REBALANCE_REGION2_NAME;
+    CommandResult cmdResult = executeCommand(command);
+    getLogWriter().info("testRebalanceForIncludeRegionFunction just after executing " + cmdResult);
+    if (cmdResult != null) {
+      String stringResult = commandResultToString(cmdResult);
+      getLogWriter().info("testRebalanceForIncludeRegionFunction stringResult : " + stringResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    } else {
+      fail("testRebalanceForIncludeRegionFunction failed as did not get CommandResult");
+    }
+  }
+
+  public void testSimulateForEntireDS() {
+    setupTestRebalanceForEntireDS();
+    //check if DistributedRegionMXBean is available so that command will not fail
+    final VM manager = Host.getHost(0).getVM(0);
+    manager.invoke(checkRegionMBeans);
+
+    getLogWriter().info("testSimulateForEntireDS verified Mbean and executin command");
+
+    String command = "rebalance --simulate=true";
+
+    CommandResult cmdResult = executeCommand(command);
+
+    getLogWriter().info("testSimulateForEntireDS just after executing " + cmdResult);
+
+    if (cmdResult != null) {
+      String stringResult = commandResultToString(cmdResult);
+      getLogWriter().info("testSimulateForEntireDS stringResult : " + stringResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    } else {
+      fail("testRebalanceForIncludeRegionFunction failed as did not get CommandResult");
+    }
+  }
+
+  public void testRebalanceForEntireDS() {
+    setupTestRebalanceForEntireDS();
+    //check if DistributedRegionMXBean is available so that command will not fail
+    final VM manager = Host.getHost(0).getVM(0);
+    manager.invoke(checkRegionMBeans);
+    getLogWriter().info("testRebalanceForEntireDS verified Mbean and executin command");
+    String command = "rebalance";
+    CommandResult cmdResult = executeCommand(command);
+    getLogWriter().info("testRebalanceForEntireDS just after executing " + cmdResult);
+    if (cmdResult != null) {
+      String stringResult = commandResultToString(cmdResult);
+      getLogWriter().info("testRebalanceForEntireDS stringResult : " + stringResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    } else {
+      fail("testRebalanceForIncludeRegionFunction failed as did not get CommandResult");
+    }
+  }
+
+  void setupTestRebalanceForEntireDS() {
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final VM vm2 = Host.getHost(0).getVM(2);
+    createDefaultSetup(null);
+
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+
+        // no need to close cache as it will be closed as part of teardown2
+        Cache cache = getCache();
+
+        RegionFactory<Integer, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+        Region region = dataRegionFactory.create(REBALANCE_REGION_NAME);
+        for (int i = 0; i < 10; i++) {
+          region.put("key" + (i + 200), "value" + (i + 200));
+        }
+        region = dataRegionFactory.create(REBALANCE_REGION_NAME + "Another1");
+        for (int i = 0; i < 100; i++) {
+          region.put("key" + (i + 200), "value" + (i + 200));
+        }
+      }
+    });
+
+    vm2.invoke(new SerializableRunnable() {
+      public void run() {
+
+        // no need to close cache as it will be closed as part of teardown2
+        Cache cache = getCache();
+
+        RegionFactory<Integer, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+        Region region = dataRegionFactory.create(REBALANCE_REGION_NAME);
+        for (int i = 0; i < 100; i++) {
+          region.put("key" + (i + 400), "value" + (i + 400));
+        }
+        region = dataRegionFactory.create(REBALANCE_REGION_NAME + "Another2");
+        for (int i = 0; i < 10; i++) {
+          region.put("key" + (i + 200), "value" + (i + 200));
+        }
+      }
+    });
+  }
+
+  private static void printCommandOutput(CommandResult cmdResult) {
+    assertNotNull(cmdResult);
+    getLogWriter().info("Command Output : ");
+    StringBuilder sb = new StringBuilder();
+    cmdResult.resetToFirstLine();
+    while (cmdResult.hasNextLine()) {
+      sb.append(cmdResult.nextLine()).append(DataCommandRequest.NEW_LINE);
+    }
+    getLogWriter().info(sb.toString());
+    getLogWriter().info("");
+  }
+
+  public static class Value1WithValue2 extends Value1 {
+    private Value2 value2 = null;
+
+    public Value1WithValue2(int i) {
+      super(i);
+      value2 = new Value2(i);
+    }
+
+    public Value2 getValue2() {
+      return value2;
+    }
+
+    public void setValue2(Value2 value2) {
+      this.value2 = value2;
+    }
+  }
+
+  public void testRebalanceForExcludeRegionFunction() {
+    setupWith2Regions();
+
+    //check if DistributedRegionMXBean is available so that command will not fail
+    final VM manager = Host.getHost(0).getVM(0);
+    manager.invoke(checkRegionMBeans);
+
+    getLogWriter().info("testRebalanceForExcludeRegionFunction verified Mbean and executing command");
+
+    String command = "rebalance --exclude-region=" + "/" + REBALANCE_REGION2_NAME;
+    getLogWriter().info("testRebalanceForExcludeRegionFunction command : " + command);
+    CommandResult cmdResult = executeCommand(command);
+    getLogWriter().info("testRebalanceForExcludeRegionFunction just after executing " + cmdResult);
+    if (cmdResult != null) {
+      String stringResult = commandResultToString(cmdResult);
+      getLogWriter().info("testRebalanceForExcludeRegionFunction stringResult : " + stringResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    } else {
+      fail("testRebalanceForIncludeRegionFunction failed as did not get CommandResult");
+    }
+  }
+
+  public void waitForListClientMbean(final String regionName) {
+
+    final VM manager = Host.getHost(0).getVM(0);
+
+    manager.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        final ManagementService service = ManagementService.getManagementService(cache);
+
+        final WaitCriterion waitForMaangerMBean = new WaitCriterion() {
+          @Override
+          public boolean done() {
+            ManagerMXBean bean1 = service.getManagerMXBean();
+            DistributedRegionMXBean bean2 = service.getDistributedRegionMXBean(regionName);
+            if (bean1 == null) {
+              getLogWriter().info("waitForListClientMbean Still probing for ManagerMBean");
+              return false;
+            } else {
+              getLogWriter().info("waitForListClientMbean Still probing for DistributedRegionMXBean=" + bean2);
+              if (bean2 == null) {
+                bean2 = service.getDistributedRegionMXBean(Region.SEPARATOR + regionName);
+              }
+              if (bean2 == null) {
+                getLogWriter().info(
+                    "waitForListClientMbean Still probing for DistributedRegionMXBean with separator = " + bean2);
+                return false;
+              } else {
+                getLogWriter().info(
+                    "waitForListClientMbean Still probing for DistributedRegionMXBean with separator Not null  " + bean2.getMembers().length);
+                if (bean2.getMembers().length > 1) {
+                  return true;
+                } else {
+                  return false;
+                }
+              }
+            }
+          }
+
+          @Override
+          public String description() {
+            return "waitForListClientMbean Probing for ManagerMBean";
+          }
+        };
+
+        DistributedTestCase.waitForCriterion(waitForMaangerMBean, 30000, 2000, true);
+        DistributedRegionMXBean bean = service.getDistributedRegionMXBean(regionName);
+        if (bean == null) {
+          bean = service.getDistributedRegionMXBean(Region.SEPARATOR + regionName);
+        }
+        assertNotNull(bean);
+      }
+    });
+
+  }
+
+  public void testRegionsViaMbeanAndFunctions() {
+
+    setupForGetPutRemoveLocateEntry("tesSimplePut");
+    waitForListClientMbean(DATA_REGION_NAME_PATH);
+    final VM manager = Host.getHost(0).getVM(0);
+
+    String memSizeFromMbean = (String) manager.invoke(new SerializableCallable() {
+      public Object call() {
+        Cache cache = GemFireCacheImpl.getInstance();
+        DistributedRegionMXBean bean = ManagementService.getManagementService(cache).getDistributedRegionMXBean(
+            DATA_REGION_NAME_PATH);
+
+        if (bean == null)// try with slash ahead
+          bean = ManagementService.getManagementService(cache).getDistributedRegionMXBean(
+              Region.SEPARATOR + DATA_REGION_NAME_PATH);
+
+        if (bean == null) {
+          return null;
+        }
+
+        String[] membersName = bean.getMembers();
+        return "" + membersName.length;
+      }
+    });
+
+    getLogWriter().info("testRegionsViaMbeanAndFunctions memSizeFromMbean= " + memSizeFromMbean);
+
+    String memSizeFromFunctionCall = (String) manager.invoke(new SerializableCallable() {
+      public Object call() {
+        Cache cache = GemFireCacheImpl.getInstance();
+        CliUtil.getMembersForeRegionViaFunction(cache, DATA_REGION_NAME_PATH);
+        return "" + CliUtil.getMembersForeRegionViaFunction(cache, DATA_REGION_NAME_PATH).size();
+      }
+    });
+
+    getLogWriter().info("testRegionsViaMbeanAndFunctions memSizeFromFunctionCall= " + memSizeFromFunctionCall);
+    assertTrue(memSizeFromFunctionCall.equals(memSizeFromMbean));
+  }
+
+  public void testRegionsViaMbeanAndFunctionsForPartRgn() {
+    setupWith2Regions();
+    waitForListClientMbean(REBALANCE_REGION_NAME);
+    final VM manager = Host.getHost(0).getVM(0);
+
+    String memSizeFromMbean = (String) manager.invoke(new SerializableCallable() {
+      public Object call() {
+        Cache cache = GemFireCacheImpl.getInstance();
+        DistributedRegionMXBean bean = ManagementService.getManagementService(cache).getDistributedRegionMXBean(
+            REBALANCE_REGION_NAME);
+
+        if (bean == null) {
+          bean = ManagementService.getManagementService(cache).getDistributedRegionMXBean(
+              Region.SEPARATOR + REBALANCE_REGION_NAME);
+        }
+
+        if (bean == null) {
+          return null;
+        }
+
+        String[] membersName = bean.getMembers();
+        return "" + membersName.length;
+      }
+    });
+
+    getLogWriter().info("testRegionsViaMbeanAndFunctionsForPartRgn memSizeFromMbean= " + memSizeFromMbean);
+
+    String memSizeFromFunctionCall = (String) manager.invoke(new SerializableCallable() {
+      public Object call() {
+        Cache cache = GemFireCacheImpl.getInstance();
+        return "" + CliUtil.getMembersForeRegionViaFunction(cache, REBALANCE_REGION_NAME).size();
+      }
+    });
+
+    getLogWriter().info(
+        "testRegionsViaMbeanAndFunctionsForPartRgn memSizeFromFunctionCall= " + memSizeFromFunctionCall);
+    assertTrue(memSizeFromFunctionCall.equals(memSizeFromMbean));
+  }
+
+  public void tearDown2() throws Exception {
+    super.tearDown2();
+  }
+}



[43/50] [abbrv] incubator-geode git commit: Revision 68dfcab10d68b1babb2035bc4c87c93acf52077c closed #48

Posted by kl...@apache.org.
Revision 68dfcab10d68b1babb2035bc4c87c93acf52077c closed #48


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/34eb0fe4
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/34eb0fe4
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/34eb0fe4

Branch: refs/heads/feature/GEODE-291
Commit: 34eb0fe441b0759895240f605f5e33c67a657ead
Parents: aa27c6a
Author: Darrel Schneider <ds...@pivotal.io>
Authored: Thu Dec 10 11:42:19 2015 -0800
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Thu Dec 10 11:42:19 2015 -0800

----------------------------------------------------------------------

----------------------------------------------------------------------



[10/50] [abbrv] incubator-geode git commit: GEODE-563: Moving gfsh tests from closed

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/QueueCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/QueueCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/QueueCommandsDUnitTest.java
new file mode 100644
index 0000000..54aed63
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/QueueCommandsDUnitTest.java
@@ -0,0 +1,385 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueue;
+import com.gemstone.gemfire.distributed.Locator;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.distributed.internal.InternalLocator;
+import com.gemstone.gemfire.distributed.internal.SharedConfiguration;
+import com.gemstone.gemfire.internal.AvailablePort;
+import com.gemstone.gemfire.internal.ClassBuilder;
+import com.gemstone.gemfire.internal.FileUtil;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+import dunit.DistributedTestCase;
+import dunit.Host;
+import dunit.SerializableRunnable;
+import dunit.VM;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+/**
+ * A distributed test suite of test cases for testing the queue commands that are part of Gfsh.
+ *
+ * @author David Hoots
+ * @since 8.0
+ */
+public class QueueCommandsDUnitTest extends CliCommandTestBase {
+  private static final long serialVersionUID = 1L;
+
+  final List<String> filesToBeDeleted = new CopyOnWriteArrayList<String>();
+
+  public QueueCommandsDUnitTest(final String testName) {
+    super(testName);
+  }
+
+  public void testAsyncEventQueue() throws IOException {
+    final String queue1Name = "testAsyncEventQueue1";
+    final String queue2Name = "testAsyncEventQueue2";
+    final String diskStoreName = "testAsyncEventQueueDiskStore";
+
+    Properties localProps = new Properties();
+    localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group0");
+    createDefaultSetup(localProps);
+
+    CommandResult cmdResult = executeCommand(CliStrings.LIST_ASYNC_EVENT_QUEUES);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    assertTrue(commandResultToString(cmdResult).contains("No Async Event Queues Found"));
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final String vm1Name = "VM" + vm1.getPid();
+    final File diskStoreDir = new File(new File(".").getAbsolutePath(), diskStoreName);
+    this.filesToBeDeleted.add(diskStoreDir.getAbsolutePath());
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        diskStoreDir.mkdirs();
+
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.NAME_NAME, vm1Name);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group1");
+        getSystem(localProps);
+        getCache();
+      }
+    });
+
+    final VM vm2 = Host.getHost(0).getVM(2);
+    final String vm2Name = "VM" + vm2.getPid();
+    vm2.invoke(new SerializableRunnable() {
+      public void run() {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.NAME_NAME, vm2Name);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group2");
+        getSystem(localProps);
+        getCache();
+      }
+    });
+
+    // Deploy a JAR file with an AsyncEventListener/GatewayEventFilter/GatewayEventSubstitutionFilter
+    // that can be instantiated on each server
+    final File jarFile = new File(new File(".").getAbsolutePath(), "QueueCommandsDUnit.jar");
+    QueueCommandsDUnitTest.this.filesToBeDeleted.add(jarFile.getAbsolutePath());
+
+    ClassBuilder classBuilder = new ClassBuilder();
+    byte[] jarBytes = classBuilder.createJarFromClassContent("com/qcdunit/QueueCommandsDUnitTestHelper",
+        "package com.qcdunit;" +
+            "import java.util.List; import java.util.Properties;" +
+            "import com.gemstone.gemfire.internal.cache.xmlcache.Declarable2; import com.gemstone.gemfire.cache.asyncqueue.AsyncEvent;" +
+            "import com.gemstone.gemfire.cache.wan.GatewayEventFilter; import com.gemstone.gemfire.cache.wan.GatewayEventSubstitutionFilter;" +
+            "import com.gemstone.gemfire.cache.asyncqueue.AsyncEventListener; import com.gemstone.gemfire.cache.wan.GatewayQueueEvent;" +
+            "import com.gemstone.gemfire.cache.EntryEvent;" +
+            "public class QueueCommandsDUnitTestHelper implements Declarable2, GatewayEventFilter, GatewayEventSubstitutionFilter, AsyncEventListener {" +
+            "Properties props;" +
+            "public boolean processEvents(List<AsyncEvent> events) { return true; }" +
+            "public void afterAcknowledgement(GatewayQueueEvent event) {}" +
+            "public boolean beforeEnqueue(GatewayQueueEvent event) { return true; }" +
+            "public boolean beforeTransmit(GatewayQueueEvent event) { return true; }" +
+            "public Object getSubstituteValue(EntryEvent event) { return null; }" +
+            "public void close() {}" +
+            "public void init(final Properties props) {this.props = props;}" +
+            "public Properties getConfig() {return this.props;}}");
+    writeJarBytesToFile(jarFile, jarBytes);
+
+    cmdResult = executeCommand("deploy --jar=QueueCommandsDUnit.jar");
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    CommandStringBuilder commandStringBuilder = new CommandStringBuilder(CliStrings.CREATE_DISK_STORE);
+    commandStringBuilder.addOption(CliStrings.CREATE_DISK_STORE__NAME, diskStoreName);
+    commandStringBuilder.addOption(CliStrings.CREATE_DISK_STORE__GROUP, "Group1");
+    commandStringBuilder.addOption(CliStrings.CREATE_DISK_STORE__DIRECTORY_AND_SIZE, diskStoreDir.getAbsolutePath());
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    String stringResult = commandResultToString(cmdResult);
+    assertEquals(3, countLinesInString(stringResult, false));
+    assertEquals(false, stringResult.contains("ERROR"));
+    assertTrue(stringContainsLine(stringResult, vm1Name + ".*Success"));
+
+    commandStringBuilder = new CommandStringBuilder(CliStrings.CREATE_ASYNC_EVENT_QUEUE);
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__ID, queue1Name);
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__GROUP, "Group1");
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__BATCH_SIZE, "514");
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__PERSISTENT, "true");
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__DISK_STORE, diskStoreName);
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__MAXIMUM_QUEUE_MEMORY, "213");
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__BATCHTIMEINTERVAL, "946");
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__PARALLEL, "true");
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__ENABLEBATCHCONFLATION, "true");
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__DISPATCHERTHREADS, "2");
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__ORDERPOLICY, "PARTITION");
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__GATEWAYEVENTFILTER,
+        "com.qcdunit.QueueCommandsDUnitTestHelper");
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__SUBSTITUTION_FILTER,
+        "com.qcdunit.QueueCommandsDUnitTestHelper");
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__DISKSYNCHRONOUS, "false");
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__LISTENER,
+        "com.qcdunit.QueueCommandsDUnitTestHelper");
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__LISTENER_PARAM_AND_VALUE, "param1");
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__LISTENER_PARAM_AND_VALUE, "param2#value2");
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(3, countLinesInString(stringResult, false));
+    assertEquals(false, stringResult.contains("ERROR"));
+    assertTrue(stringContainsLine(stringResult, vm1Name + ".*Success"));
+
+    // Verify that the queue was created on the correct member
+    cmdResult = executeCommand(CliStrings.LIST_ASYNC_EVENT_QUEUES);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(3, countLinesInString(stringResult, false));
+    assertTrue(stringContainsLine(stringResult,
+        vm1Name + " .*" + queue1Name + " .*514 .*true .*" + diskStoreName + " .*213 .*" + " .*com.qcdunit.QueueCommandsDUnitTestHelper" + ".*"));
+    assertTrue(stringContainsLine(stringResult, vm1Name + ".*param2=value2.*"));
+    assertTrue(stringContainsLine(stringResult, vm1Name + ".*param1=[^\\w].*"));
+    assertFalse(stringContainsLine(stringResult, vm2Name + ".*" + queue1Name + ".*"));
+
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        Cache cache = getCache();
+        AsyncEventQueue queue = cache.getAsyncEventQueue(queue1Name);
+        assertEquals(queue.getBatchSize(), 514);
+        assertEquals(queue.isPersistent(), true);
+        assertEquals(queue.getDiskStoreName(), diskStoreName);
+        assertEquals(queue.getMaximumQueueMemory(), 213);
+        assertEquals(queue.getBatchTimeInterval(), 946);
+        assertEquals(queue.isParallel(), true);
+        assertEquals(queue.isBatchConflationEnabled(), true);
+        assertEquals(queue.getDispatcherThreads(), 2);
+        assertEquals(queue.getOrderPolicy().toString(), "PARTITION");
+        assertEquals(queue.getGatewayEventFilters().size(), 1);
+        assertEquals(queue.getGatewayEventFilters().get(0).getClass().getName(),
+            "com.qcdunit.QueueCommandsDUnitTestHelper");
+        assertEquals(queue.getGatewayEventSubstitutionFilter().getClass().getName(),
+            "com.qcdunit.QueueCommandsDUnitTestHelper");
+        assertEquals(queue.isDiskSynchronous(), false);
+        assertEquals(queue.getAsyncEventListener().getClass().getName(), "com.qcdunit.QueueCommandsDUnitTestHelper");
+      }
+    });
+
+    commandStringBuilder = new CommandStringBuilder(CliStrings.CREATE_ASYNC_EVENT_QUEUE);
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__ID, queue2Name);
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__LISTENER,
+        "com.qcdunit.QueueCommandsDUnitTestHelper");
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(5, countLinesInString(stringResult, false));
+    assertTrue(stringContainsLine(stringResult, "Manager.*Success"));
+    assertTrue(stringContainsLine(stringResult, vm2Name + ".*Success"));
+    assertTrue(stringContainsLine(stringResult, vm1Name + ".*Success"));
+
+    // Verify that the queue was created on the correct members
+    cmdResult = executeCommand(CliStrings.LIST_ASYNC_EVENT_QUEUES);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(6, countLinesInString(stringResult, false));
+    assertTrue(stringContainsLine(stringResult,
+        "Manager .*" + queue2Name + " .*100 .*false .*null .*100 .*" + " .*com.qcdunit.QueueCommandsDUnitTestHelper"));
+    assertTrue(stringContainsLine(stringResult,
+        vm1Name + " .*" + queue1Name + " .*514 .*true .*" + diskStoreName + " .*213 .*" + " .*com.qcdunit.QueueCommandsDUnitTestHelper" + ".*"));
+    assertTrue(stringContainsLine(stringResult,
+        vm1Name + " .*" + queue2Name + " .*100 .*false .*null .*100 .*" + " .*com.qcdunit.QueueCommandsDUnitTestHelper"));
+    assertTrue(stringContainsLine(stringResult,
+        vm2Name + " .*" + queue2Name + " .*100 .*false .*null .*100 .*" + " .*com.qcdunit.QueueCommandsDUnitTestHelper"));
+  }
+
+  /**
+   * Asserts that creating async event queues correctly updates the shared configuration.
+   */
+  public void testCreateUpdatesSharedConfig() throws IOException {
+    disconnectAllFromDS();
+
+    final String queueName = "testAsyncEventQueueQueue";
+    final String groupName = "testAsyncEventQueueSharedConfigGroup";
+
+    // Start the Locator and wait for shared configuration to be available
+    final int locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
+    Host.getHost(0).getVM(3).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+
+        final File locatorLogFile = new File("locator-" + locatorPort + ".log");
+        final Properties locatorProps = new Properties();
+        locatorProps.setProperty(DistributionConfig.NAME_NAME, "Locator");
+        locatorProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        locatorProps.setProperty(DistributionConfig.LOG_LEVEL_NAME, "fine");
+        locatorProps.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "true");
+        try {
+          final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locatorPort, locatorLogFile, null,
+              locatorProps);
+
+          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+            @Override
+            public boolean done() {
+              return locator.isSharedConfigurationRunning();
+            }
+
+            @Override
+            public String description() {
+              return "Waiting for shared configuration to be started";
+            }
+          };
+          DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+        } catch (IOException ioex) {
+          fail("Unable to create a locator with a shared configuration");
+        }
+      }
+    });
+
+    // Start the default manager
+    Properties managerProps = new Properties();
+    managerProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    managerProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+    createDefaultSetup(managerProps);
+
+    // Create a cache in VM 1
+    VM vm = Host.getHost(0).getVM(1);
+    vm.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        localProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, groupName);
+        getSystem(localProps);
+        assertNotNull(getCache());
+      }
+    });
+
+    // Deploy a JAR file with an AsyncEventListener that can be instantiated on each server
+    final File jarFile = new File(new File(".").getAbsolutePath(), "QueueCommandsDUnit.jar");
+    QueueCommandsDUnitTest.this.filesToBeDeleted.add(jarFile.getAbsolutePath());
+
+    ClassBuilder classBuilder = new ClassBuilder();
+    byte[] jarBytes = classBuilder.createJarFromClassContent("com/qcdunit/QueueCommandsDUnitTestListener",
+        "package com.qcdunit;" +
+            "import java.util.List; import java.util.Properties;" +
+            "import com.gemstone.gemfire.internal.cache.xmlcache.Declarable2; import com.gemstone.gemfire.cache.asyncqueue.AsyncEvent;" +
+            "import com.gemstone.gemfire.cache.asyncqueue.AsyncEventListener;" +
+            "public class QueueCommandsDUnitTestListener implements Declarable2, AsyncEventListener {" +
+            "Properties props;" +
+            "public boolean processEvents(List<AsyncEvent> events) { return true; }" +
+            "public void close() {}" +
+            "public void init(final Properties props) {this.props = props;}" +
+            "public Properties getConfig() {return this.props;}}");
+    writeJarBytesToFile(jarFile, jarBytes);
+
+    CommandResult cmdResult = executeCommand("deploy --jar=QueueCommandsDUnit.jar");
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    // Test creating the queue
+    CommandStringBuilder commandStringBuilder = new CommandStringBuilder(CliStrings.CREATE_ASYNC_EVENT_QUEUE);
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__ID, queueName);
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__GROUP, groupName);
+    commandStringBuilder.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__LISTENER,
+        "com.qcdunit.QueueCommandsDUnitTestListener");
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    // Make sure the queue exists in the shared config
+    Host.getHost(0).getVM(3).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        SharedConfiguration sharedConfig = ((InternalLocator) Locator.getLocator()).getSharedConfiguration();
+        String xmlFromConfig;
+        try {
+          xmlFromConfig = sharedConfig.getConfiguration(groupName).getCacheXmlContent();
+          assertTrue(xmlFromConfig.contains(queueName));
+        } catch (Exception e) {
+          fail("Error occurred in cluster configuration service", e);
+        }
+      }
+    });
+
+    //Close cache in the vm1 and restart it to get the shared configuration
+    vm = Host.getHost(0).getVM(1);
+    vm.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        assertNotNull(cache);
+        cache.close();
+
+        assertTrue(cache.isClosed());
+
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        localProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, groupName);
+        localProps.setProperty(DistributionConfig.USE_CLUSTER_CONFIGURATION_NAME, "true");
+        getSystem(localProps);
+        cache = getCache();
+        assertNotNull(cache);
+        AsyncEventQueue aeq = cache.getAsyncEventQueue(queueName);
+
+        assertNotNull(aeq);
+      }
+    });
+  }
+
+  @Override
+  public void tearDown2() throws Exception {
+    for (String path : this.filesToBeDeleted) {
+      try {
+        final File fileToDelete = new File(path);
+        FileUtil.delete(fileToDelete);
+        if (path.endsWith(".jar")) {
+          executeCommand("undeploy --jar=" + fileToDelete.getName());
+        }
+      } catch (IOException e) {
+        getLogWriter().error("Unable to delete file", e);
+      }
+    }
+    this.filesToBeDeleted.clear();
+    super.tearDown2();
+  }
+
+  private void writeJarBytesToFile(File jarFile, byte[] jarBytes) throws IOException {
+    final OutputStream outStream = new FileOutputStream(jarFile);
+    outStream.write(jarBytes);
+    outStream.close();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/SharedConfigurationCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/SharedConfigurationCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/SharedConfigurationCommandsDUnitTest.java
new file mode 100644
index 0000000..adf5b5f
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/SharedConfigurationCommandsDUnitTest.java
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.distributed.Locator;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.distributed.internal.InternalLocator;
+import com.gemstone.gemfire.distributed.internal.SharedConfiguration;
+import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.internal.ClassBuilder;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.cli.Result.Status;
+import com.gemstone.gemfire.management.internal.cli.CliUtil;
+import com.gemstone.gemfire.management.internal.cli.HeadlessGfsh;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+import com.gemstone.gemfire.management.internal.configuration.SharedConfigurationDUnitTest;
+import com.gemstone.gemfire.management.internal.configuration.domain.Configuration;
+import dunit.DistributedTestCase;
+import dunit.Host;
+import dunit.SerializableCallable;
+import dunit.SerializableRunnable;
+import dunit.VM;
+import org.apache.commons.io.FileUtils;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.Properties;
+import java.util.Set;
+
+/***
+ * DUnit test to test export and import of shared configuration.
+ *
+ * @author bansods
+ */
+public class SharedConfigurationCommandsDUnitTest extends CliCommandTestBase {
+
+  private static final long serialVersionUID = 1L;
+  private static final int TIMEOUT = 10000;
+  private static final int INTERVAL = 500;
+
+  public SharedConfigurationCommandsDUnitTest(String name) {
+    super(name);
+  }
+
+  File newDeployableJarFile = new File("DeployCommandsDUnit1.jar");
+  private transient ClassBuilder classBuilder = new ClassBuilder();
+
+  @SuppressWarnings("unchecked")
+  public void testExportImportSharedConfiguration() {
+    disconnectAllFromDS();
+
+    final String region1Name = "r1";
+    final String region2Name = "r2";
+    final String groupName = "testRegionSharedConfigGroup";
+    final String sharedConfigZipFileName = "sharedConfig.zip";
+    final String deployedJarName = "DeployCommandsDUnit1.jar";
+    final String logLevel = "info";
+    final String startArchiveFileName = "stats.gfs";
+    final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(3);
+
+    // TODO Sourabh - the code below is similar to CliCommandTestBase.createDefaultSetup(..); we may want to consider
+    // refactoring this and combine the duplicate code blocks using either the Template Method and/or Strategy design
+    // patterns.  We can talk about this.
+    // Start the Locator and wait for shared configuration to be available
+    final int locator1Port = ports[0];
+    final String locator1Name = "locator1-" + locator1Port;
+    VM locatorAndMgr = Host.getHost(0).getVM(3);
+    Object[] result = (Object[]) locatorAndMgr.invoke(new SerializableCallable() {
+      @Override
+      public Object call() {
+        int httpPort;
+        int jmxPort;
+        String jmxHost;
+
+        try {
+          jmxHost = InetAddress.getLocalHost().getHostName();
+        } catch (UnknownHostException ignore) {
+          jmxHost = "localhost";
+        }
+
+        final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
+
+        jmxPort = ports[0];
+        httpPort = ports[1];
+
+        final File locatorLogFile = new File("locator-" + locator1Port + ".log");
+
+        final Properties locatorProps = new Properties();
+        locatorProps.setProperty(DistributionConfig.NAME_NAME, locator1Name);
+        locatorProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        locatorProps.setProperty(DistributionConfig.LOG_LEVEL_NAME, "config");
+        locatorProps.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "true");
+        locatorProps.setProperty(DistributionConfig.JMX_MANAGER_NAME, "true");
+        locatorProps.setProperty(DistributionConfig.JMX_MANAGER_START_NAME, "true");
+        locatorProps.setProperty(DistributionConfig.JMX_MANAGER_BIND_ADDRESS_NAME, String.valueOf(jmxHost));
+        locatorProps.setProperty(DistributionConfig.JMX_MANAGER_PORT_NAME, String.valueOf(jmxPort));
+        locatorProps.setProperty(DistributionConfig.HTTP_SERVICE_PORT_NAME, String.valueOf(httpPort));
+
+        try {
+          final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locator1Port, locatorLogFile,
+              null, locatorProps);
+          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+            @Override
+            public boolean done() {
+              return locator.isSharedConfigurationRunning();
+            }
+
+            @Override
+            public String description() {
+              return "Waiting for shared configuration to be started";
+            }
+          };
+          DistributedTestCase.waitForCriterion(wc, TIMEOUT, INTERVAL, true);
+        } catch (IOException ioex) {
+          fail("Unable to create a locator with a shared configuration");
+        }
+
+        final Object[] result = new Object[4];
+        result[0] = jmxHost;
+        result[1] = jmxPort;
+        result[2] = httpPort;
+        result[3] = CliUtil.getAllNormalMembers(CacheFactory.getAnyInstance());
+
+        return result;
+      }
+    });
+
+    HeadlessGfsh gfsh = getDefaultShell();
+    String jmxHost = (String) result[0];
+    int jmxPort = (Integer) result[1];
+    int httpPort = (Integer) result[2];
+    Set<DistributedMember> normalMembers1 = (Set<DistributedMember>) result[3];
+
+    shellConnect(jmxHost, jmxPort, httpPort, gfsh);
+    // Create a cache in VM 1
+    VM dataMember = Host.getHost(0).getVM(1);
+    normalMembers1 = (Set<DistributedMember>) dataMember.invoke(new SerializableCallable() {
+      @Override
+      public Object call() {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        localProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locator1Port);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, groupName);
+        localProps.setProperty(DistributionConfig.NAME_NAME, "DataMember");
+        getSystem(localProps);
+        Cache cache = getCache();
+        assertNotNull(cache);
+        return CliUtil.getAllNormalMembers(cache);
+      }
+    });
+    // Create a JAR file
+    try {
+      this.classBuilder.writeJarFromName("DeployCommandsDUnitA", this.newDeployableJarFile);
+    } catch (IOException e) {
+      // TODO Auto-generated catch block
+      e.printStackTrace();
+    }
+
+    // Deploy the JAR
+    CommandResult cmdResult = executeCommand("deploy --jar=" + deployedJarName);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    //Create the region1 on the group
+    CommandStringBuilder commandStringBuilder = new CommandStringBuilder(CliStrings.CREATE_REGION);
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__REGION, region1Name);
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__REGIONSHORTCUT, "REPLICATE");
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__STATISTICSENABLED, "true");
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__GROUP, groupName);
+
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    commandStringBuilder = new CommandStringBuilder(CliStrings.CREATE_REGION);
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__REGION, region2Name);
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__REGIONSHORTCUT, "PARTITION");
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__STATISTICSENABLED, "true");
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    //Alter runtime configuration 
+    commandStringBuilder = new CommandStringBuilder(CliStrings.ALTER_RUNTIME_CONFIG);
+    commandStringBuilder.addOption(CliStrings.ALTER_RUNTIME_CONFIG__LOG__LEVEL, logLevel);
+    commandStringBuilder.addOption(CliStrings.ALTER_RUNTIME_CONFIG__LOG__FILE__SIZE__LIMIT, "50");
+    commandStringBuilder.addOption(CliStrings.ALTER_RUNTIME_CONFIG__ARCHIVE__DISK__SPACE__LIMIT, "32");
+    commandStringBuilder.addOption(CliStrings.ALTER_RUNTIME_CONFIG__ARCHIVE__FILE__SIZE__LIMIT, "49");
+    commandStringBuilder.addOption(CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__SAMPLE__RATE, "120");
+    commandStringBuilder.addOption(CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__ARCHIVE__FILE, startArchiveFileName);
+    commandStringBuilder.addOption(CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__SAMPLING__ENABLED, "true");
+    commandStringBuilder.addOption(CliStrings.ALTER_RUNTIME_CONFIG__LOG__DISK__SPACE__LIMIT, "10");
+    cmdResult = executeCommand(commandStringBuilder.getCommandString());
+    String resultString = commandResultToString(cmdResult);
+
+    getLogWriter().info("#SB Result\n");
+    getLogWriter().info(resultString);
+    assertEquals(true, cmdResult.getStatus().equals(Status.OK));
+
+    commandStringBuilder = new CommandStringBuilder(CliStrings.STATUS_SHARED_CONFIG);
+    cmdResult = executeCommand(commandStringBuilder.getCommandString());
+    resultString = commandResultToString(cmdResult);
+    getLogWriter().info("#SB Result\n");
+    getLogWriter().info(resultString);
+    assertEquals(Status.OK, cmdResult.getStatus());
+
+    commandStringBuilder = new CommandStringBuilder(CliStrings.EXPORT_SHARED_CONFIG);
+    commandStringBuilder.addOption(CliStrings.EXPORT_SHARED_CONFIG__FILE, sharedConfigZipFileName);
+    cmdResult = executeCommand(commandStringBuilder.getCommandString());
+    resultString = commandResultToString(cmdResult);
+    getLogWriter().info("#SB Result\n");
+    getLogWriter().info(resultString);
+    assertEquals(Status.OK, cmdResult.getStatus());
+
+    //Import into a running system should fail
+    commandStringBuilder = new CommandStringBuilder(CliStrings.IMPORT_SHARED_CONFIG);
+    commandStringBuilder.addOption(CliStrings.IMPORT_SHARED_CONFIG__ZIP, sharedConfigZipFileName);
+    cmdResult = executeCommand(commandStringBuilder.getCommandString());
+    assertEquals(Status.ERROR, cmdResult.getStatus());
+
+    //Stop the data members and remove the shared configuration in the locator.
+    dataMember.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        Cache cache = getCache();
+        cache.close();
+        assertTrue(cache.isClosed());
+        disconnectFromDS();
+        return null;
+      }
+    });
+
+    //Clear shared configuration in this locator to test the import shared configuration
+    locatorAndMgr.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        InternalLocator locator = InternalLocator.getLocator();
+        SharedConfiguration sc = locator.getSharedConfiguration();
+        assertNotNull(sc);
+        sc.clearSharedConfiguration();
+        return null;
+      }
+    });
+
+    //Now execute import shared configuration 
+    //Now import the shared configuration and it should succeed.
+    commandStringBuilder = new CommandStringBuilder(CliStrings.IMPORT_SHARED_CONFIG);
+    commandStringBuilder.addOption(CliStrings.IMPORT_SHARED_CONFIG__ZIP, sharedConfigZipFileName);
+    cmdResult = executeCommand(commandStringBuilder.getCommandString());
+    assertEquals(Status.OK, cmdResult.getStatus());
+
+    //Start a new locator , test if it has all the imported shared configuration artifacts
+    VM newLocator = Host.getHost(0).getVM(2);
+    final int locator2Port = ports[1];
+    final String locator2Name = "Locator2-" + locator2Port;
+
+    newLocator.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        final File locatorLogFile = new File("locator-" + locator2Port + ".log");
+        final Properties locatorProps = new Properties();
+        locatorProps.setProperty(DistributionConfig.NAME_NAME, locator2Name);
+        locatorProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        locatorProps.setProperty(DistributionConfig.LOG_LEVEL_NAME, "fine");
+        locatorProps.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "true");
+        locatorProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locator1Port);
+
+        try {
+          final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locator2Port, locatorLogFile,
+              null, locatorProps);
+
+          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+            @Override
+            public boolean done() {
+              return locator.isSharedConfigurationRunning();
+            }
+
+            @Override
+            public String description() {
+              return "Waiting for shared configuration to be started";
+            }
+          };
+          DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+
+          SharedConfiguration sc = locator.getSharedConfiguration();
+          assertNotNull(sc);
+          Configuration groupConfig = sc.getConfiguration(groupName);
+          assertNotNull(groupConfig);
+          assertTrue(groupConfig.getCacheXmlContent().contains(region1Name));
+
+          Configuration clusterConfig = sc.getConfiguration(SharedConfiguration.CLUSTER_CONFIG);
+          assertNotNull(clusterConfig);
+          assertTrue(clusterConfig.getCacheXmlContent().contains(region2Name));
+          assertTrue(clusterConfig.getJarNames().contains(deployedJarName));
+          assertTrue(
+              clusterConfig.getGemfireProperties().getProperty(DistributionConfig.LOG_LEVEL_NAME).equals(logLevel));
+          assertTrue(
+              clusterConfig.getGemfireProperties().getProperty(DistributionConfig.STATISTIC_ARCHIVE_FILE_NAME).equals(
+                  startArchiveFileName));
+        } catch (IOException ioex) {
+          fail("Unable to create a locator with a shared configuration");
+        } catch (Exception e) {
+          fail("Error occurred in cluster configuration service", e);
+        }
+      }
+    });
+
+    //Clean up
+    File sharedConfigZipFile = new File(sharedConfigZipFileName);
+    FileUtils.deleteQuietly(sharedConfigZipFile);
+    FileUtils.deleteQuietly(newDeployableJarFile);
+  }
+
+  @Override
+  public void tearDown2() throws Exception {
+    super.tearDown2();
+    for (int i = 0; i < 4; i++) {
+      Host.getHost(0).getVM(i).invoke(SharedConfigurationDUnitTest.locatorCleanup);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShellCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShellCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShellCommandsDUnitTest.java
new file mode 100644
index 0000000..0fc6be4
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShellCommandsDUnitTest.java
@@ -0,0 +1,365 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.distributed.AbstractLauncher.Status;
+import com.gemstone.gemfire.distributed.LocatorLauncher;
+import com.gemstone.gemfire.distributed.LocatorLauncher.LocatorState;
+import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.internal.lang.StringUtils;
+import com.gemstone.gemfire.internal.util.IOUtils;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.internal.cli.domain.DataCommandRequest;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import com.gemstone.gemfire.management.internal.cli.shell.Gfsh;
+import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+import org.junit.Before;
+
+import java.io.File;
+import java.util.concurrent.TimeUnit;
+
+public class ShellCommandsDUnitTest extends CliCommandTestBase {
+
+  private static final long serialVersionUID = 1L;
+
+  public ShellCommandsDUnitTest(String name) {
+    super(name);
+  }
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    getDefaultShell();
+  }
+
+  protected CommandResult connectToLocator(final int locatorPort) {
+    return executeCommand(new CommandStringBuilder(CliStrings.CONNECT).addOption(CliStrings.CONNECT__LOCATOR,
+        "localhost[" + locatorPort + "]").toString());
+  }
+
+  public void testConnectToLocatorBecomesManager() {
+    final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
+
+    final int jmxManagerPort = ports[0];
+    final int locatorPort = ports[1];
+
+    System.setProperty("gemfire.jmx-manager-port", String.valueOf(jmxManagerPort));
+    System.setProperty("gemfire.jmx-manager-http-port", "0");
+
+    assertEquals(String.valueOf(jmxManagerPort), System.getProperty("gemfire.jmx-manager-port"));
+    assertEquals("0", System.getProperty("gemfire.jmx-manager-http-port"));
+
+    final String pathname = (getClass().getSimpleName() + "_" + getTestName());
+    final File workingDirectory = new File(pathname);
+
+    workingDirectory.mkdir();
+
+    assertTrue(workingDirectory.isDirectory());
+
+    final LocatorLauncher locatorLauncher = new LocatorLauncher.Builder().setBindAddress(null).setForce(
+        true).setMemberName(pathname).setPort(locatorPort).setWorkingDirectory(
+        IOUtils.tryGetCanonicalPathElseGetAbsolutePath(workingDirectory)).build();
+
+    assertNotNull(locatorLauncher);
+    assertEquals(locatorPort, locatorLauncher.getPort().intValue());
+
+    try {
+      // fix for bug 46729
+      locatorLauncher.start();
+
+      final LocatorState locatorState = locatorLauncher.waitOnStatusResponse(60, 10, TimeUnit.SECONDS);
+
+      assertNotNull(locatorState);
+      assertEquals(Status.ONLINE, locatorState.getStatus());
+
+      final Result result = connectToLocator(locatorPort);
+
+      assertNotNull(result);
+      assertEquals(Result.Status.OK, result.getStatus());
+    } finally {
+      assertEquals(Status.STOPPED, locatorLauncher.stop().getStatus());
+      assertEquals(Status.NOT_RESPONDING, locatorLauncher.status().getStatus());
+    }
+  }
+
+  public void testEchoWithVariableAtEnd() {
+    Gfsh gfshInstance = Gfsh.getCurrentInstance();
+
+    if (gfshInstance == null) {
+      fail("In testEcho command gfshInstance is null");
+    }
+    getLogWriter().info("Gsh " + gfshInstance);
+
+    gfshInstance.setEnvProperty("TESTSYS", "SYS_VALUE");
+    printAllEnvs(gfshInstance);
+
+    String command = "echo --string=\"Hello World! This is ${TESTSYS}\"";
+    CommandResult cmdResult = executeCommand(command);
+    printCommandOutput(cmdResult);
+
+    if (cmdResult != null) {
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      String stringResult = commandResultToString(cmdResult);
+      assertEquals("Hello World! This is SYS_VALUE", StringUtils.trim(stringResult));
+    } else {
+      fail("testEchoWithVariableAtEnd failed");
+    }
+  }
+
+  public void testEchoWithNoVariable() {
+    Gfsh gfshInstance = Gfsh.getCurrentInstance();
+
+    if (gfshInstance == null) {
+      fail("In testEcho command gfshInstance is null");
+    }
+
+    gfshInstance.setEnvProperty("TESTSYS", "SYS_VALUE");
+    printAllEnvs(gfshInstance);
+
+    String command = "echo --string=\"Hello World! This is Pivotal\"";
+
+    CommandResult cmdResult = executeCommand(command);
+    printCommandOutput(cmdResult);
+
+    if (cmdResult != null) {
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      String stringResult = commandResultToString(cmdResult);
+      assertTrue(stringResult.contains("Hello World! This is Pivotal"));
+    } else {
+      fail("testEchoWithNoVariable failed");
+    }
+  }
+
+  public void testEchoWithVariableAtStart() {
+    Gfsh gfshInstance = Gfsh.getCurrentInstance();
+
+    if (gfshInstance == null) {
+      fail("In testEcho command gfshInstance is null");
+    }
+
+    gfshInstance.setEnvProperty("TESTSYS", "SYS_VALUE");
+    printAllEnvs(gfshInstance);
+
+    String command = "echo --string=\"${TESTSYS} Hello World! This is Pivotal\"";
+    CommandResult cmdResult = executeCommand(command);
+    printCommandOutput(cmdResult);
+
+    if (cmdResult != null) {
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      String stringResult = commandResultToString(cmdResult);
+      assertTrue(stringResult.contains("SYS_VALUE Hello World! This is Pivotal"));
+    } else {
+      fail("testEchoWithVariableAtStart failed");
+    }
+  }
+
+  public void testEchoWithMultipleVariables() {
+    Gfsh gfshInstance = Gfsh.getCurrentInstance();
+
+    if (gfshInstance == null) {
+      fail("In testEcho command gfshInstance is null");
+    }
+
+    gfshInstance.setEnvProperty("TESTSYS", "SYS_VALUE");
+    printAllEnvs(gfshInstance);
+
+    String command = "echo --string=\"${TESTSYS} Hello World! This is Pivotal ${TESTSYS}\"";
+    CommandResult cmdResult = executeCommand(command);
+    printCommandOutput(cmdResult);
+
+    if (cmdResult != null) {
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      String stringResult = commandResultToString(cmdResult);
+      assertTrue(stringResult.contains("SYS_VALUE Hello World! This is Pivotal SYS_VALUE"));
+    } else {
+      fail("testEchoWithMultipleVariables failed");
+    }
+  }
+
+  public void testEchoAllPropertyVariables() {
+    Gfsh gfshInstance = Gfsh.getCurrentInstance();
+
+    if (gfshInstance == null) {
+      fail("In testEcho command gfshInstance is null");
+    }
+
+    String command = "echo --string=\"$*\"";
+    CommandResult cmdResult = executeCommand(command);
+    printCommandOutput(cmdResult);
+
+    if (cmdResult != null) {
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    } else {
+      fail("testEchoAllPropertyVariables failed");
+    }
+  }
+
+  public void testEchoForSingleVariable() {
+    Gfsh gfshInstance = Gfsh.getCurrentInstance();
+
+    if (gfshInstance == null) {
+      fail("In testEcho command gfshInstance is null");
+    }
+
+    gfshInstance.setEnvProperty("TESTSYS", "SYS_VALUE");
+    printAllEnvs(gfshInstance);
+
+    String command = "echo --string=${TESTSYS}";
+    CommandResult cmdResult = executeCommand(command);
+    printCommandOutput(cmdResult);
+
+
+    if (cmdResult != null) {
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      String stringResult = commandResultToString(cmdResult);
+      assertTrue(stringResult.contains("SYS_VALUE"));
+    } else {
+      fail("testEchoForSingleVariable failed");
+    }
+  }
+
+  public void testEchoForSingleVariable2() {
+    Gfsh gfshInstance = Gfsh.getCurrentInstance();
+
+    if (gfshInstance == null) {
+      fail("In testEcho command gfshInstance is null");
+    }
+
+    gfshInstance.setEnvProperty("TESTSYS", "SYS_VALUE");
+    printAllEnvs(gfshInstance);
+
+    String command = "echo --string=\"${TESTSYS} ${TESTSYS}\"";
+    CommandResult cmdResult = executeCommand(command);
+    printCommandOutput(cmdResult);
+
+    if (cmdResult != null) {
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      String stringResult = commandResultToString(cmdResult);
+      assertTrue(stringResult.contains("SYS_VALUE"));
+    } else {
+      fail("testEchoForSingleVariable2 failed");
+    }
+  }
+
+  public void testDebug() {
+    Gfsh gfshInstance = Gfsh.getCurrentInstance();
+
+    if (gfshInstance == null) {
+      fail("In testDebug command gfshInstance is null");
+    }
+
+    gfshInstance.setDebug(false);
+    String command = "debug --state=ON";
+    CommandResult cmdResult = executeCommand(command);
+    printCommandOutput(cmdResult);
+
+    if (cmdResult != null) {
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    } else {
+      fail("testDebug failed");
+    }
+    assertEquals(gfshInstance.getDebug(), true);
+
+  }
+
+  public void testHistory() {
+    Gfsh gfshInstance = Gfsh.getCurrentInstance();
+
+    if (gfshInstance == null) {
+      fail("In testHistory command gfshInstance is null");
+    }
+
+    gfshInstance.setDebug(false);
+    String command = "history";
+    CommandResult cmdResult = executeCommand(command);
+    printCommandOutput(cmdResult);
+
+    if (cmdResult != null) {
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    } else {
+      fail("testHistory failed");
+    }
+  }
+
+  public void testHistoryWithFileName() {
+    Gfsh gfshInstance = Gfsh.getCurrentInstance();
+
+    if (gfshInstance == null) {
+      fail("In testHistory command gfshInstance is null");
+    }
+
+    String historyFileName = gfshInstance.getGfshConfig().getHistoryFileName();
+    File historyFile = new File(historyFileName);
+    String fileName = historyFile.getParent();
+    fileName = fileName + File.separator + getClass().getSimpleName() + "_" + getName() + "-exported.history";
+
+    String command = "history --file=" + fileName;
+    CommandResult cmdResult = executeCommand(command);
+    printCommandOutput(cmdResult);
+
+    if (cmdResult != null) {
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+    } else {
+      fail("testHistory failed");
+    }
+  }
+
+  public void testClearHistory() {
+    Gfsh gfshInstance = Gfsh.getCurrentInstance();
+
+    if (gfshInstance == null) {
+      fail("In testClearHistory command gfshInstance is null");
+    }
+
+    gfshInstance.setDebug(false);
+    String command = "history --clear";
+    CommandResult cmdResult = executeCommand(command);
+    printCommandOutput(cmdResult);
+
+    if (cmdResult != null) {
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      getLogWriter().info("testClearHistory cmdResult=" + commandResultToString(cmdResult));
+      String resultString = commandResultToString(cmdResult);
+      getLogWriter().info("testClearHistory resultString=" + resultString);
+      assertTrue(resultString.contains(CliStrings.HISTORY__MSG__CLEARED_HISTORY));
+      assertTrue(gfshInstance.getGfshHistory().getHistoryList().size() <= 1);
+    } else {
+      fail("testClearHistory failed");
+    }
+  }
+
+  private static void printCommandOutput(CommandResult cmdResult) {
+    assertNotNull(cmdResult);
+    getLogWriter().info("Command Output : ");
+    StringBuilder sb = new StringBuilder();
+    cmdResult.resetToFirstLine();
+    while (cmdResult.hasNextLine()) {
+      sb.append(cmdResult.nextLine()).append(DataCommandRequest.NEW_LINE);
+    }
+    getLogWriter().info(sb.toString());
+    getLogWriter().info("");
+  }
+
+  private void printAllEnvs(Gfsh gfsh) {
+    getLogWriter().info("printAllEnvs : " + StringUtils.objectToString(gfsh.getEnv(), false, 0));
+    /*
+    getLogWriter().info("Gfsh printAllEnvs : " + HydraUtil.ObjectToString(getDefaultShell().getEnv()));    
+    getLogWriter().info("gfsh " + gfsh + " default shell " + getDefaultShell());*/
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowDeadlockDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowDeadlockDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowDeadlockDUnitTest.java
new file mode 100644
index 0000000..2d67129
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowDeadlockDUnitTest.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.execute.Function;
+import com.gemstone.gemfire.cache.execute.FunctionContext;
+import com.gemstone.gemfire.cache.execute.FunctionService;
+import com.gemstone.gemfire.cache.execute.ResultCollector;
+import com.gemstone.gemfire.cache30.CacheTestCase;
+import com.gemstone.gemfire.distributed.DistributedLockService;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.distributed.internal.deadlock.GemFireDeadlockDetector;
+import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.cli.Result.Status;
+import com.gemstone.gemfire.management.internal.cli.CliUtil;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.remote.CommandProcessor;
+import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+import dunit.Host;
+import dunit.SerializableCallable;
+import dunit.SerializableRunnable;
+import dunit.VM;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * This DUnit tests uses same code as GemFireDeadlockDetectorDUnitTest and uses the command processor for executing the
+ * "show deadlock" command
+ */
+public class ShowDeadlockDUnitTest extends CacheTestCase {
+
+  /**
+   *
+   */
+  private static final long serialVersionUID = 1L;
+  private static final Set<Thread> stuckThreads = Collections.synchronizedSet(new HashSet<Thread>());
+  private static final Map<String, String> EMPTY_ENV = Collections.emptyMap();
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    // This test does not require an actual Gfsh connection to work, however when run as part of a suite, prior tests
+    // may mess up the environment causing this test to fail. Setting this prevents false failures.
+    CliUtil.isGfshVM = false;
+  }
+
+  @Override
+  public void tearDown2() throws Exception {
+    invokeInEveryVM(new SerializableRunnable() {
+      private static final long serialVersionUID = 1L;
+
+      public void run() {
+        for (Thread thread : stuckThreads) {
+          thread.interrupt();
+        }
+      }
+    });
+    CliUtil.isGfshVM = true;
+  }
+
+  public ShowDeadlockDUnitTest(String name) {
+    super(name);
+  }
+
+  public void testNoDeadlock() throws ClassNotFoundException, IOException {
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+
+    //Make sure a deadlock from a previous test is cleared.
+    disconnectAllFromDS();
+
+    createCache(vm0);
+    createCache(vm1);
+    createCache(new Properties());
+
+    String fileName = "dependency.txt";
+    GemFireDeadlockDetector detect = new GemFireDeadlockDetector();
+    assertEquals(null, detect.find().findCycle());
+
+    CommandProcessor commandProcessor = new CommandProcessor();
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.SHOW_DEADLOCK);
+    csb.addOption(CliStrings.SHOW_DEADLOCK__DEPENDENCIES__FILE, fileName);
+    Result result = commandProcessor.createCommandStatement(csb.toString(), EMPTY_ENV).process();
+
+    String deadLockOutputFromCommand = getResultAsString(result);
+
+    getLogWriter().info("output = " + deadLockOutputFromCommand);
+    assertEquals(true, result.hasIncomingFiles());
+    assertEquals(true, result.getStatus().equals(Status.OK));
+    assertEquals(true, deadLockOutputFromCommand.startsWith(CliStrings.SHOW_DEADLOCK__NO__DEADLOCK));
+    result.saveIncomingFiles(null);
+    File file = new File(fileName);
+    assertTrue(file.exists());
+    file.delete();
+
+    disconnectAllFromDS();
+  }
+
+  private static final Lock lock = new ReentrantLock();
+
+
+  public void testDistributedDeadlockWithFunction() throws InterruptedException, ClassNotFoundException, IOException {
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    String filename = "gfeDependency.txt";
+    InternalDistributedMember member1 = createCache(vm0);
+    final InternalDistributedMember member2 = createCache(vm1);
+    createCache(new Properties());
+    //Have two threads lock locks on different members in different orders.
+    //This thread locks the lock member1 first, then member2.
+    lockTheLocks(vm0, member2);
+    //This thread locks the lock member2 first, then member1.
+    lockTheLocks(vm1, member1);
+
+    Thread.sleep(5000);
+    CommandProcessor commandProcessor = new CommandProcessor();
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.SHOW_DEADLOCK);
+    csb.addOption(CliStrings.SHOW_DEADLOCK__DEPENDENCIES__FILE, filename);
+    Result result = commandProcessor.createCommandStatement(csb.toString(), EMPTY_ENV).process();
+
+    String deadLockOutputFromCommand = getResultAsString(result);
+    getLogWriter().info("Deadlock = " + deadLockOutputFromCommand);
+    result.saveIncomingFiles(null);
+    assertEquals(true, deadLockOutputFromCommand.startsWith(CliStrings.SHOW_DEADLOCK__DEADLOCK__DETECTED));
+    assertEquals(true, result.getStatus().equals(Status.OK));
+    File file = new File(filename);
+    assertTrue(file.exists());
+    file.delete();
+
+  }
+
+
+  private void createCache(Properties props) {
+    getSystem(props);
+    final Cache cache = getCache();
+  }
+
+  private Properties createProperties(Host host, int locatorPort) {
+    Properties props = new Properties();
+    props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+//    props.setProperty(DistributionConfig.LOCATORS_NAME, getServerHostName(host) + "[" + locatorPort + "]");
+    props.setProperty(DistributionConfig.LOG_LEVEL_NAME, "info");
+    props.setProperty(DistributionConfig.STATISTIC_SAMPLING_ENABLED_NAME, "true");
+    props.setProperty(DistributionConfig.ENABLE_TIME_STATISTICS_NAME, "true");
+    props.put(DistributionConfig.ENABLE_NETWORK_PARTITION_DETECTION_NAME, "true");
+    return props;
+  }
+
+  private void lockTheLocks(VM vm0, final InternalDistributedMember member) {
+    vm0.invokeAsync(new SerializableRunnable() {
+
+      private static final long serialVersionUID = 1L;
+
+      public void run() {
+        lock.lock();
+        try {
+          Thread.sleep(1000);
+        } catch (InterruptedException e) {
+          fail("interrupted", e);
+        }
+        ResultCollector collector = FunctionService.onMember(system, member).execute(new TestFunction());
+        //wait the function to lock the lock on member.
+        collector.getResult();
+        lock.unlock();
+      }
+    });
+  }
+
+  private void lockTheDLocks(VM vm, final String first, final String second) {
+    vm.invokeAsync(new SerializableRunnable() {
+
+      private static final long serialVersionUID = 1L;
+
+      public void run() {
+        getCache();
+        DistributedLockService dls = DistributedLockService.create("deadlock_test", getSystem());
+        dls.lock(first, 10 * 1000, -1);
+
+        try {
+          Thread.sleep(1000);
+        } catch (InterruptedException e) {
+          e.printStackTrace();
+        }
+        dls.lock(second, 10 * 1000, -1);
+      }
+    });
+  }
+
+  private InternalDistributedMember createCache(VM vm) {
+    return (InternalDistributedMember) vm.invoke(new SerializableCallable() {
+      /**
+       *
+       */
+      private static final long serialVersionUID = 1L;
+
+      public Object call() {
+        getCache();
+        return getSystem().getDistributedMember();
+      }
+    });
+  }
+
+  private String getResultAsString(Result result) {
+    StringBuilder sb = new StringBuilder();
+    while (result.hasNextLine()) {
+      sb.append(result.nextLine());
+    }
+
+    return sb.toString();
+  }
+
+  private static class TestFunction implements Function {
+
+    private static final long serialVersionUID = 1L;
+    private static final int LOCK_WAIT_TIME = 1000;
+
+    public boolean hasResult() {
+      return true;
+    }
+
+    public void execute(FunctionContext context) {
+      try {
+        stuckThreads.add(Thread.currentThread());
+        lock.tryLock(LOCK_WAIT_TIME, TimeUnit.SECONDS);
+      } catch (InterruptedException e) {
+        //ingore
+      }
+      context.getResultSender().lastResult(null);
+    }
+
+    public String getId() {
+      return getClass().getCanonicalName();
+    }
+
+    public boolean optimizeForWrite() {
+      return false;
+    }
+
+    public boolean isHA() {
+      return false;
+    }
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowMetricsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowMetricsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowMetricsDUnitTest.java
new file mode 100644
index 0000000..a69c35a
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowMetricsDUnitTest.java
@@ -0,0 +1,347 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache.server.CacheServer;
+import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.management.CacheServerMXBean;
+import com.gemstone.gemfire.management.DistributedRegionMXBean;
+import com.gemstone.gemfire.management.DistributedSystemMXBean;
+import com.gemstone.gemfire.management.ManagementService;
+import com.gemstone.gemfire.management.MemberMXBean;
+import com.gemstone.gemfire.management.RegionMXBean;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.cli.Result.Status;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.remote.CommandProcessor;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import dunit.Host;
+import dunit.SerializableCallable;
+import dunit.SerializableRunnable;
+import dunit.VM;
+
+import javax.management.ObjectName;
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Properties;
+
+/****
+ * @author bansods
+ */
+public class ShowMetricsDUnitTest extends CliCommandTestBase {
+
+  private static final long serialVersionUID = 1L;
+
+  public ShowMetricsDUnitTest(String name) {
+    super(name);
+    // TODO Auto-generated constructor stub
+  }
+
+  private void createLocalSetUp() {
+    Properties localProps = new Properties();
+    localProps.setProperty(DistributionConfig.NAME_NAME, "Controller");
+    getSystem(localProps);
+    Cache cache = getCache();
+    RegionFactory<Integer, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.REPLICATE);
+    Region region1 = dataRegionFactory.create("REGION1");
+    Region region2 = dataRegionFactory.create("REGION2");
+  }
+
+  /*
+   * tests the default version of "show metrics"
+   */
+  public void testShowMetricsDefault() {
+    createDefaultSetup(null);
+    createLocalSetUp();
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final String vm1Name = "VM" + vm1.getPid();
+
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.NAME_NAME, vm1Name);
+        getSystem(localProps);
+
+        Cache cache = getCache();
+        RegionFactory<Integer, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.REPLICATE);
+        Region region = dataRegionFactory.create("REGION1");
+      }
+    });
+
+    SerializableCallable showMetricCmd = new SerializableCallable() {
+
+      @Override
+      public Object call() throws Exception {
+        WaitCriterion wc = createMBeanWaitCriterion(1, "", null, 0);
+        waitForCriterion(wc, 5000, 500, true);
+        CommandProcessor commandProcessor = new CommandProcessor();
+        Result result = commandProcessor.createCommandStatement("show metrics", Collections.EMPTY_MAP).process();
+        String resultStr = commandResultToString((CommandResult) result);
+        getLogWriter().info(resultStr);
+        assertEquals(resultStr, true, result.getStatus().equals(Status.OK));
+        return resultStr;
+      }
+    };
+
+    //Invoke the command in the Manager VM
+    final VM managerVm = Host.getHost(0).getVM(0);
+    Object managerResultObj = managerVm.invoke(showMetricCmd);
+
+    String managerResult = (String) managerResultObj;
+
+    getLogWriter().info("#SB Manager");
+    getLogWriter().info(managerResult);
+  }
+
+  public void systemSetUp() {
+    createDefaultSetup(null);
+    createLocalSetUp();
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final String vm1Name = "VM" + vm1.getPid();
+
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.NAME_NAME, vm1Name);
+        getSystem(localProps);
+
+        Cache cache = getCache();
+        RegionFactory<Integer, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.REPLICATE);
+        Region region = dataRegionFactory.create("REGION1");
+      }
+    });
+  }
+
+  public void testShowMetricsRegion() throws InterruptedException {
+    systemSetUp();
+    final String regionName = "REGION1";
+    SerializableCallable showMetricCmd = new SerializableCallable() {
+
+      @Override
+      public Object call() throws Exception {
+        WaitCriterion wc = createMBeanWaitCriterion(2, regionName, null, 0);
+        waitForCriterion(wc, 5000, 500, true);
+        CommandProcessor commandProcessor = new CommandProcessor();
+        Result result = commandProcessor.createCommandStatement("show metrics --region=REGION1",
+            Collections.EMPTY_MAP).process();
+        String resultAsString = commandResultToString((CommandResult) result);
+        assertEquals(resultAsString, true, result.getStatus().equals(Status.OK));
+        return resultAsString;
+      }
+    };
+
+    //Invoke the command in the Manager VM
+    final VM managerVm = Host.getHost(0).getVM(0);
+    Object managerResultObj = managerVm.invoke(showMetricCmd);
+
+    String managerResult = (String) managerResultObj;
+
+    getLogWriter().info("#SB Manager");
+    getLogWriter().info(managerResult);
+  }
+
+  /***
+   * Creates WaitCriterion based on creation of different types of MBeans
+   *
+   * @param beanType
+   * @param regionName
+   * @param memberName
+   * @return
+   */
+  private WaitCriterion createMBeanWaitCriterion(final int beanType, final String regionName,
+      final DistributedMember distributedMember, final int cacheServerPort) {
+
+    WaitCriterion waitCriterion = new WaitCriterion() {
+
+      @Override
+      public boolean done() {
+        boolean done = false;
+        Cache cache = getCache();
+        ManagementService mgmtService = ManagementService.getManagementService(cache);
+        if (beanType == 1) {
+          DistributedSystemMXBean dsMxBean = mgmtService.getDistributedSystemMXBean();
+          if (dsMxBean != null) done = true;
+        } else if (beanType == 2) {
+          DistributedRegionMXBean dsRegionMxBean = mgmtService.getDistributedRegionMXBean("/" + regionName);
+          if (dsRegionMxBean != null) done = true;
+        } else if (beanType == 3) {
+          ObjectName memberMBeanName = mgmtService.getMemberMBeanName(distributedMember);
+          MemberMXBean memberMxBean = mgmtService.getMBeanInstance(memberMBeanName, MemberMXBean.class);
+
+          if (memberMxBean != null) done = true;
+        } else if (beanType == 4) {
+          ObjectName regionMBeanName = mgmtService.getRegionMBeanName(distributedMember, "/" + regionName);
+          RegionMXBean regionMxBean = mgmtService.getMBeanInstance(regionMBeanName, RegionMXBean.class);
+
+          if (regionMxBean != null) done = true;
+        } else if (beanType == 5) {
+          ObjectName csMxBeanName = mgmtService.getCacheServerMBeanName(cacheServerPort, distributedMember);
+          CacheServerMXBean csMxBean = mgmtService.getMBeanInstance(csMxBeanName, CacheServerMXBean.class);
+
+          if (csMxBean != null) {
+            done = true;
+          }
+        }
+
+        return done;
+      }
+
+      @Override
+      public String description() {
+        return "Waiting for the mbean to be created";
+      }
+    };
+
+    return waitCriterion;
+  }
+
+  public void testShowMetricsMember() throws ClassNotFoundException, IOException, InterruptedException {
+    systemSetUp();
+    Cache cache = getCache();
+    final DistributedMember distributedMember = cache.getDistributedSystem().getDistributedMember();
+    final String exportFileName = "memberMetricReport.csv";
+
+    int ports[] = AvailablePortHelper.getRandomAvailableTCPPorts(1);
+    CacheServer cs = getCache().addCacheServer();
+    cs.setPort(ports[0]);
+    cs.start();
+    final int cacheServerPort = cs.getPort();
+
+    SerializableCallable showMetricCmd = new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+
+        WaitCriterion wc = createMBeanWaitCriterion(3, "", distributedMember, 0);
+        waitForCriterion(wc, 5000, 500, true);
+        wc = createMBeanWaitCriterion(5, "", distributedMember, cacheServerPort);
+        waitForCriterion(wc, 10000, 500, true);
+
+        final String command = CliStrings.SHOW_METRICS + " --" + CliStrings.SHOW_METRICS__MEMBER + "=" + distributedMember.getId() + " --" + CliStrings.SHOW_METRICS__CACHESERVER__PORT + "=" + cacheServerPort + " --" + CliStrings.SHOW_METRICS__FILE + "=" + exportFileName;
+
+        CommandProcessor commandProcessor = new CommandProcessor();
+        Result result = commandProcessor.createCommandStatement(command, Collections.EMPTY_MAP).process();
+        String resultAsString = commandResultToString((CommandResult) result);
+        assertEquals(resultAsString, true, result.getStatus().equals(Status.OK));
+        assertTrue(result.hasIncomingFiles());
+        result.saveIncomingFiles(null);
+        File file = new File(exportFileName);
+        file.deleteOnExit();
+        assertTrue(file.exists());
+        file.delete();
+        return resultAsString;
+
+      }
+    };
+
+    //Invoke the command in the Manager VM
+    final VM managerVm = Host.getHost(0).getVM(0);
+    Object managerResultObj = managerVm.invoke(showMetricCmd);
+
+    String managerResult = (String) managerResultObj;
+
+    getLogWriter().info("#SB Manager");
+    getLogWriter().info(managerResult);
+    cs.stop();
+  }
+
+  public void testShowMetricsRegionFromMember() throws ClassNotFoundException, IOException, InterruptedException {
+    systemSetUp();
+    Cache cache = getCache();
+    final DistributedMember distributedMember = cache.getDistributedSystem().getDistributedMember();
+    final String exportFileName = "regionOnAMemberReport.csv";
+    final String regionName = "REGION1";
+
+    SerializableCallable showMetricCmd = new SerializableCallable() {
+
+      @Override
+      public Object call() throws Exception {
+
+        WaitCriterion wc = createMBeanWaitCriterion(4, regionName, distributedMember, 0);
+        waitForCriterion(wc, 5000, 500, true);
+        CommandProcessor commandProcessor = new CommandProcessor();
+        Result result = commandProcessor.createCommandStatement(
+            "show metrics --region=" + regionName + " --member=" + distributedMember.getName() + " --file=" + exportFileName,
+            Collections.EMPTY_MAP).process();
+        String resultAsString = commandResultToString((CommandResult) result);
+        assertEquals(resultAsString, true, result.getStatus().equals(Status.OK));
+        assertTrue(result.hasIncomingFiles());
+        result.saveIncomingFiles(null);
+        File file = new File(exportFileName);
+        file.deleteOnExit();
+        assertTrue(file.exists());
+        file.delete();
+        return resultAsString;
+      }
+    };
+
+    //Invoke the command in the Manager VM
+    final VM managerVm = Host.getHost(0).getVM(0);
+    Object managerResultObj = managerVm.invoke(showMetricCmd);
+
+    String managerResult = (String) managerResultObj;
+
+    getLogWriter().info("#SB Manager");
+    getLogWriter().info(managerResult);
+  }
+
+  public void testShowMetricsRegionFromMemberWithCategories() throws ClassNotFoundException, IOException, InterruptedException {
+    systemSetUp();
+    Cache cache = getCache();
+    final DistributedMember distributedMember = cache.getDistributedSystem().getDistributedMember();
+    final String exportFileName = "regionOnAMemberReport.csv";
+    final String regionName = "REGION1";
+
+    SerializableCallable showMetricCmd = new SerializableCallable() {
+
+      @Override
+      public Object call() throws Exception {
+
+        WaitCriterion wc = createMBeanWaitCriterion(4, regionName, distributedMember, 0);
+        waitForCriterion(wc, 5000, 500, true);
+        CommandProcessor commandProcessor = new CommandProcessor();
+        Result result = commandProcessor.createCommandStatement(
+            "show metrics --region=" + regionName + " --member=" + distributedMember.getName() + " --file=" + exportFileName + " --categories=region,eviction",
+            Collections.EMPTY_MAP).process();
+        String resultAsString = commandResultToString((CommandResult) result);
+        assertEquals(resultAsString, true, result.getStatus().equals(Status.OK));
+        assertTrue(result.hasIncomingFiles());
+        result.saveIncomingFiles(null);
+        File file = new File(exportFileName);
+        file.deleteOnExit();
+        assertTrue(file.exists());
+        file.delete();
+        return resultAsString;
+      }
+    };
+
+    //Invoke the command in the Manager VM
+    final VM managerVm = Host.getHost(0).getVM(0);
+    Object managerResultObj = managerVm.invoke(showMetricCmd);
+
+    String managerResult = (String) managerResultObj;
+
+    getLogWriter().info("#SB Manager");
+    getLogWriter().info(managerResult);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowStackTraceDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowStackTraceDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowStackTraceDUnitTest.java
new file mode 100644
index 0000000..d1dc87f
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowStackTraceDUnitTest.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.management.cli.Result.Status;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+import dunit.Host;
+import dunit.SerializableRunnable;
+import dunit.VM;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Properties;
+
+/***
+ * DUnit test for 'show stack-trace' command
+ *
+ * @author bansods
+ */
+public class ShowStackTraceDUnitTest extends CliCommandTestBase {
+
+  private static final long serialVersionUID = 1L;
+
+  public ShowStackTraceDUnitTest(String name) {
+    super(name);
+  }
+
+  private void createCache(Properties props) {
+    getSystem(props);
+    getCache();
+  }
+
+  private Properties createProperties(Host host, String name, String groups) {
+    Properties props = new Properties();
+    props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    props.setProperty(DistributionConfig.LOG_LEVEL_NAME, "info");
+    props.setProperty(DistributionConfig.STATISTIC_SAMPLING_ENABLED_NAME, "true");
+    props.setProperty(DistributionConfig.ENABLE_TIME_STATISTICS_NAME, "true");
+    props.setProperty(DistributionConfig.NAME_NAME, name);
+    props.setProperty(DistributionConfig.GROUPS_NAME, groups);
+    return props;
+  }
+
+  /***
+   * Sets up a system of 3 peers
+   */
+  private void setupSystem() {
+    disconnectAllFromDS();
+    final Host host = Host.getHost(0);
+    final VM[] servers = {host.getVM(0), host.getVM(1)};
+
+    final Properties propsManager = createProperties(host, "Manager", "G1");
+    final Properties propsServer2 = createProperties(host, "Server", "G2");
+
+    createDefaultSetup(propsManager);
+
+    servers[1].invoke(new SerializableRunnable("Create cache for server1") {
+      public void run() {
+        createCache(propsServer2);
+      }
+    });
+  }
+
+  /***
+   * Tests the default behavior of the show stack-trace command
+   *
+   * @throws ClassNotFoundException
+   * @throws IOException
+   */
+  public void testExportStacktrace() throws ClassNotFoundException, IOException {
+    setupSystem();
+
+    File allStacktracesFile = new File("allStackTraces.txt");
+    allStacktracesFile.createNewFile();
+    allStacktracesFile.deleteOnExit();
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.EXPORT_STACKTRACE);
+    csb.addOption(CliStrings.EXPORT_STACKTRACE__FILE, allStacktracesFile.getCanonicalPath());
+    String commandString = csb.toString();
+    getLogWriter().info("CommandString : " + commandString);
+    CommandResult commandResult = executeCommand(commandString);
+    getLogWriter().info("Output : \n" + commandResultToString(commandResult));
+    assertTrue(commandResult.getStatus().equals(Status.OK));
+
+    File mgrStacktraceFile = new File("managerStacktrace.txt");
+    mgrStacktraceFile.createNewFile();
+    mgrStacktraceFile.deleteOnExit();
+    csb = new CommandStringBuilder(CliStrings.EXPORT_STACKTRACE);
+    csb.addOption(CliStrings.EXPORT_STACKTRACE__FILE, mgrStacktraceFile.getCanonicalPath());
+    csb.addOption(CliStrings.EXPORT_STACKTRACE__MEMBER, "Manager");
+    commandString = csb.toString();
+    getLogWriter().info("CommandString : " + commandString);
+    commandResult = executeCommand(commandString);
+    getLogWriter().info("Output : \n" + commandResultToString(commandResult));
+    assertTrue(commandResult.getStatus().equals(Status.OK));
+
+    File serverStacktraceFile = new File("serverStacktrace.txt");
+    serverStacktraceFile.createNewFile();
+    serverStacktraceFile.deleteOnExit();
+    csb = new CommandStringBuilder(CliStrings.EXPORT_STACKTRACE);
+    csb.addOption(CliStrings.EXPORT_STACKTRACE__FILE, serverStacktraceFile.getCanonicalPath());
+    csb.addOption(CliStrings.EXPORT_STACKTRACE__MEMBER, "Server");
+    commandString = csb.toString();
+    getLogWriter().info("CommandString : " + commandString);
+    commandResult = executeCommand(commandString);
+    getLogWriter().info("Output : \n" + commandResultToString(commandResult));
+    assertTrue(commandResult.getStatus().equals(Status.OK));
+
+    File groupStacktraceFile = new File("groupstacktrace.txt");
+    groupStacktraceFile.createNewFile();
+    groupStacktraceFile.deleteOnExit();
+    csb = new CommandStringBuilder(CliStrings.EXPORT_STACKTRACE);
+    csb.addOption(CliStrings.EXPORT_STACKTRACE__FILE, groupStacktraceFile.getCanonicalPath());
+    csb.addOption(CliStrings.EXPORT_STACKTRACE__GROUP, "G2");
+    commandString = csb.toString();
+    getLogWriter().info("CommandString : " + commandString);
+    commandResult = executeCommand(commandString);
+    getLogWriter().info("Output : \n" + commandResultToString(commandResult));
+    assertTrue(commandResult.getStatus().equals(Status.OK));
+
+    File wrongStackTraceFile = new File("wrongStackTrace.txt");
+    wrongStackTraceFile.createNewFile();
+    wrongStackTraceFile.deleteOnExit();
+    csb = new CommandStringBuilder(CliStrings.EXPORT_STACKTRACE);
+    csb.addOption(CliStrings.EXPORT_STACKTRACE__FILE, wrongStackTraceFile.getCanonicalPath());
+    csb.addOption(CliStrings.EXPORT_STACKTRACE__MEMBER, "WrongMember");
+    commandString = csb.toString();
+    getLogWriter().info("CommandString : " + commandString);
+    commandResult = executeCommand(commandString);
+    getLogWriter().info("Output : \n" + commandResultToString(commandResult));
+    assertFalse(commandResult.getStatus().equals(Status.OK));
+  }
+}


[33/50] [abbrv] incubator-geode git commit: GEODE-608: Add rat task as a depedency for the check task

Posted by kl...@apache.org.
GEODE-608: Add rat task as a depedency for the check task

The rat constraints will now be applied whenever the check task
is performed (typically via `gradle build`).


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/e45539a8
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/e45539a8
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/e45539a8

Branch: refs/heads/feature/GEODE-291
Commit: e45539a831fb57c6fdcaafab52463790ee491f71
Parents: a31c8fb
Author: Anthony Baker <ab...@pivotal.io>
Authored: Wed Dec 2 08:45:11 2015 -0800
Committer: Anthony Baker <ab...@pivotal.io>
Committed: Thu Dec 10 09:44:57 2015 -0800

----------------------------------------------------------------------
 build.gradle | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e45539a8/build.gradle
----------------------------------------------------------------------
diff --git a/build.gradle b/build.gradle
index babdb8b..6045b72 100755
--- a/build.gradle
+++ b/build.gradle
@@ -567,7 +567,7 @@ subprojects {
     description 'Run this task before checking in code to validate changes. This task combines the following tasks: build, integrationTest, and distributedTest'
   }
 
-  check.dependsOn checkMissedTests
+  check.dependsOn checkMissedTests, rat
   combineReports.mustRunAfter check, test, integrationTest, distributedTest, checkMissedTests
   build.finalizedBy combineReports
   check.finalizedBy combineReports


[44/50] [abbrv] incubator-geode git commit: GEODE-503: Addresses config passwords written to logs

Posted by kl...@apache.org.
GEODE-503: Addresses config passwords written to logs

Prevents configuration passwords from being written to log files
for keystores used by SSL or any config parameter with the
keyword password in its name.

Adds unit test to validate AbstractConfigJUnitTest


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/11c62f23
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/11c62f23
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/11c62f23

Branch: refs/heads/feature/GEODE-291
Commit: 11c62f232014d4c93cf3c625b31b1a3139613818
Parents: 34eb0fe
Author: Vince Ford <vf...@apache.org>
Authored: Thu Dec 10 11:01:13 2015 -0800
Committer: Vince Ford <vf...@apache.org>
Committed: Thu Dec 10 11:46:46 2015 -0800

----------------------------------------------------------------------
 .../gemfire/internal/AbstractConfig.java        |   4 -
 .../internal/AbstractConfigJUnitTest.java       | 114 +++++++++++++++++++
 2 files changed, 114 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/11c62f23/gemfire-core/src/main/java/com/gemstone/gemfire/internal/AbstractConfig.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/AbstractConfig.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/AbstractConfig.java
index 2f2f0f7..ddf2970 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/AbstractConfig.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/AbstractConfig.java
@@ -204,10 +204,6 @@ public abstract class AbstractConfig implements Config {
   }
   
   private boolean okToDisplayPropertyValue(String attName) {
-    if (AbstractDistributionConfig.isWellKnownAttribute(attName)) {
-      // it is always ok to display the well know attributes
-      return true;
-    }
     if (attName.startsWith(DistributionConfig.SECURITY_PREFIX_NAME)) {
       return false;
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/11c62f23/gemfire-core/src/test/java/com/gemstone/gemfire/internal/AbstractConfigJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/AbstractConfigJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/AbstractConfigJUnitTest.java
new file mode 100644
index 0000000..80c92e6
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/AbstractConfigJUnitTest.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal;
+
+import static org.junit.Assert.*;
+
+import org.apache.logging.log4j.Logger;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import com.gemstone.gemfire.distributed.internal.DistributionConfigImpl;
+
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.Map;
+import java.util.Properties;
+import junit.framework.TestCase;
+import org.junit.experimental.categories.Category;
+import com.gemstone.gemfire.test.junit.categories.UnitTest;
+
+@Category(UnitTest.class)
+public class AbstractConfigJUnitTest extends TestCase  {
+
+	@Test
+	public void testDisplayPropertyValue(){
+		AbstractConfigTestClass actc=new AbstractConfigTestClass();
+        Method method=null;
+        try{
+            method=actc.getClass().getSuperclass().getDeclaredMethod("okToDisplayPropertyValue",String.class);
+            method.setAccessible(true);
+            assertFalse((Boolean) method.invoke(actc, "password"));
+            assertFalse((Boolean)method.invoke(actc,"cluster-ssl-truststore-password"));
+            assertTrue((Boolean) method.invoke(actc, "cluster-ssl-enabled"));
+            assertFalse((Boolean)method.invoke(actc,"gateway-ssl-truststore-password"));
+            assertFalse((Boolean)method.invoke(actc,"server-ssl-keystore-password"));
+            assertTrue((Boolean) method.invoke(actc, "ssl-enabled"));
+            assertTrue((Boolean)method.invoke(actc,"conserve-sockets"));
+            assertFalse((Boolean)method.invoke(actc,"javax.net.ssl.keyStorePassword"));
+            assertFalse((Boolean)method.invoke(actc,"javax.net.ssl.keyStoreType"));
+            assertFalse((Boolean)method.invoke(actc,"sysprop-value"));
+        } catch (NoSuchMethodException e) {
+            e.printStackTrace();
+        } catch (InvocationTargetException e) {
+            e.printStackTrace();
+        } catch (IllegalAccessException e) {
+            e.printStackTrace();
+        } catch(Exception e){
+            e.printStackTrace();
+        }
+    }
+
+}
+
+class AbstractConfigTestClass extends AbstractConfig{
+
+
+	@Override
+	protected Map getAttDescMap() {
+		return null;
+	}
+
+	@Override
+	protected Map<String, ConfigSource> getAttSourceMap() {
+		return null;
+	}
+
+	@Override
+	public Object getAttributeObject(String attName) {
+		return null;
+	}
+
+	@Override
+	public void setAttributeObject(String attName, Object attValue, ConfigSource source) {
+
+	}
+
+	@Override
+	public boolean isAttributeModifiable(String attName) {
+		return false;
+	}
+
+	@Override
+	public Class getAttributeType(String attName) {
+		return null;
+	}
+
+	@Override
+	public String[] getAttributeNames() {
+		return new String[0];
+	}
+
+	@Override
+	public String[] getSpecificAttributeNames() {
+		return new String[0];
+	}
+}
+


[38/50] [abbrv] incubator-geode git commit: GEODE-18: Added missing source headers

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/loaderNotLoader.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/loaderNotLoader.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/loaderNotLoader.xml
index d24f8d0..b0bf8f4 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/loaderNotLoader.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/loaderNotLoader.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
     "-//GemStone Systems, Inc.//GemFire Declarative Caching 4.0//EN"
     "http://www.gemstone.com/dtd/cache4_0.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/malformed.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/malformed.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/malformed.xml
index 3c414be..8052ba5 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/malformed.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/malformed.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
     "-//GemStone Systems, Inc.//GemFire Declarative Caching 4.0//EN"
     "http://www.gemstone.com/dtd/cache4_0.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/namedAttributes.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/namedAttributes.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/namedAttributes.xml
index b6025fa..2d3b4a5 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/namedAttributes.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/namedAttributes.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
     "-//GemStone Systems, Inc.//GemFire Declarative Caching 4.1//EN"
     "http://www.gemstone.com/dtd/cache4_1.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/partitionedRegion.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/partitionedRegion.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/partitionedRegion.xml
index ee52010..773cb7b 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/partitionedRegion.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/partitionedRegion.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
     "-//GemStone Systems, Inc.//GemFire Declarative Caching 5.0//EN"
     "http://www.gemstone.com/dtd/cache5_0.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/partitionedRegion51.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/partitionedRegion51.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/partitionedRegion51.xml
index 1181245..826e500 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/partitionedRegion51.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/partitionedRegion51.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
     "-//GemStone Systems, Inc.//GemFire Declarative Caching 5.1//EN"
     "http://www.gemstone.com/dtd/cache5_1.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/sameRootRegion.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/sameRootRegion.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/sameRootRegion.xml
index b9dbfd4..f34061a 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/sameRootRegion.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/sameRootRegion.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
     "-//GemStone Systems, Inc.//GemFire Declarative Caching 4.1//EN"
     "http://www.gemstone.com/dtd/cache4_1.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/sameSubregion.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/sameSubregion.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/sameSubregion.xml
index 09d3014..26f95bb 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/sameSubregion.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/sameSubregion.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
     "-//GemStone Systems, Inc.//GemFire Declarative Caching 4.1//EN"
     "http://www.gemstone.com/dtd/cache4_1.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/unknownNamedAttributes.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/unknownNamedAttributes.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/unknownNamedAttributes.xml
index 1e51336..7e570e8 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/unknownNamedAttributes.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/unknownNamedAttributes.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
     "-//GemStone Systems, Inc.//GemFire Declarative Caching 4.1//EN"
     "http://www.gemstone.com/dtd/cache4_1.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/distributed/internal/SharedConfigurationJUnitTest.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/distributed/internal/SharedConfigurationJUnitTest.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/distributed/internal/SharedConfigurationJUnitTest.xml
index 65d5ddc..68ce00f 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/distributed/internal/SharedConfigurationJUnitTest.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/distributed/internal/SharedConfigurationJUnitTest.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
     "-//GemStone Systems, Inc.//GemFire Declarative Caching 8.0//EN"
     "http://www.gemstone.com/dtd/cache8_0.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/BackupJUnitTest.cache.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/BackupJUnitTest.cache.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/BackupJUnitTest.cache.xml
index 4fabee0..01570f4 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/BackupJUnitTest.cache.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/BackupJUnitTest.cache.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC "-//GemStone Systems, Inc.//GemFire Declarative Caching 5.1//EN" "http://www.gemstone.com/dtd/cache5_1.dtd">
 <cache>
   <!--  nothing special here, we just want to make sure this file gets backed up, byte for byte. -->

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/DiskRegCacheXmlJUnitTest.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/DiskRegCacheXmlJUnitTest.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/DiskRegCacheXmlJUnitTest.xml
index fe575ae..b6818d0 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/DiskRegCacheXmlJUnitTest.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/DiskRegCacheXmlJUnitTest.xml
@@ -1,4 +1,20 @@
 <?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 
 <!-- DiskRegion.xml
      Configures the Disk Regions to Overflow / Persist /PersistWithOverflow its data to disk.  

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/PartitionRegionCacheExample1.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/PartitionRegionCacheExample1.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/PartitionRegionCacheExample1.xml
index ccdebae..e6e907e 100755
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/PartitionRegionCacheExample1.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/PartitionRegionCacheExample1.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC "-//GemStone Systems, Inc.//GemFire Declarative Caching 5.1//EN" "http://www.gemstone.com/dtd/cache5_1.dtd">
 <cache>
 	<!-- Create a new root region as partition region -->

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/PartitionRegionCacheExample2.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/PartitionRegionCacheExample2.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/PartitionRegionCacheExample2.xml
index de1db01..6434103 100755
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/PartitionRegionCacheExample2.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/PartitionRegionCacheExample2.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC "-//GemStone Systems, Inc.//GemFire Declarative Caching 5.1//EN" "http://www.gemstone.com/dtd/cache5_1.dtd">
 <cache search-timeout="60" lock-lease="300">
   <region name="root">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_bytes_threshold.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_bytes_threshold.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_bytes_threshold.xml
index 6d757d3..b734ce1 100755
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_bytes_threshold.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_bytes_threshold.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC "-//GemStone Systems, Inc.//GemFire Declarative Caching 6.5//EN" "http://www.gemstone.com/dtd/cache6_5.dtd">
 <cache lock-lease="120" lock-timeout="60" search-timeout="300" is-server="false" copy-on-read="false">
   <cache-transaction-manager/>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_dir.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_dir.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_dir.xml
index 25ce234..71aba12 100755
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_dir.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_dir.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC "-//GemStone Systems, Inc.//GemFire Declarative Caching 6.5//EN" "http://www.gemstone.com/dtd/cache6_5.dtd">
 <cache lock-lease="120" lock-timeout="60" search-timeout="300" is-server="false" copy-on-read="false">
   <cache-transaction-manager/>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_dir_size.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_dir_size.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_dir_size.xml
index 71bf3be..a03139f 100755
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_dir_size.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_dir_size.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC "-//GemStone Systems, Inc.//GemFire Declarative Caching 6.5//EN" "http://www.gemstone.com/dtd/cache6_5.dtd">
 <cache lock-lease="120" lock-timeout="60" search-timeout="300" is-server="false" copy-on-read="false">
   <cache-transaction-manager/>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_max_oplog_size.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_max_oplog_size.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_max_oplog_size.xml
index f7a6e4b..71dc62f 100755
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_max_oplog_size.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_max_oplog_size.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC "-//GemStone Systems, Inc.//GemFire Declarative Caching 6.5//EN" "http://www.gemstone.com/dtd/cache6_5.dtd">
 <cache lock-lease="120" lock-timeout="60" search-timeout="300" is-server="false" copy-on-read="false">
   <cache-transaction-manager/>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_roll_oplogs_value.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_roll_oplogs_value.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_roll_oplogs_value.xml
index 34e59b2..f3d8594 100755
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_roll_oplogs_value.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_roll_oplogs_value.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC "-//GemStone Systems, Inc.//GemFire Declarative Caching 6.5//EN" "http://www.gemstone.com/dtd/cache6_5.dtd">
 <cache lock-lease="120" lock-timeout="60" search-timeout="300" is-server="false" copy-on-read="false">
   <cache-transaction-manager/>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_sync_value.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_sync_value.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_sync_value.xml
index acaeb58..990fa24 100755
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_sync_value.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_sync_value.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC "-//GemStone Systems, Inc.//GemFire Declarative Caching 6.5//EN" "http://www.gemstone.com/dtd/cache6_5.dtd">
 <cache lock-lease="120" lock-timeout="60" search-timeout="300" is-server="false" copy-on-read="false">
   <cache-transaction-manager/>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_time_interval.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_time_interval.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_time_interval.xml
index 40fbaac..a55b690 100755
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_time_interval.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/incorrect_time_interval.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC "-//GemStone Systems, Inc.//GemFire Declarative Caching 6.5//EN" "http://www.gemstone.com/dtd/cache6_5.dtd">
 <cache lock-lease="120" lock-timeout="60" search-timeout="300" is-server="false" copy-on-read="false">
   <cache-transaction-manager/>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/mixed_diskstore_diskdir.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/mixed_diskstore_diskdir.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/mixed_diskstore_diskdir.xml
index c73190f..6fcb012 100755
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/mixed_diskstore_diskdir.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/mixed_diskstore_diskdir.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC "-//GemStone Systems, Inc.//GemFire Declarative Caching 6.5//EN" "http://www.gemstone.com/dtd/cache6_5.dtd">
 <cache lock-lease="120" lock-timeout="60" search-timeout="300" is-server="false" copy-on-read="false">
   <cache-transaction-manager/>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/mixed_diskstore_diskwriteattrs.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/mixed_diskstore_diskwriteattrs.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/mixed_diskstore_diskwriteattrs.xml
index 38f75fc..6d23a22 100755
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/mixed_diskstore_diskwriteattrs.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/faultyDiskXMLsForTesting/mixed_diskstore_diskwriteattrs.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC "-//GemStone Systems, Inc.//GemFire Declarative Caching 6.5//EN" "http://www.gemstone.com/dtd/cache6_5.dtd">
 <cache lock-lease="120" lock-timeout="60" search-timeout="300" is-server="false" copy-on-read="false">
   <cache-transaction-manager/>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelJUnitTest.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelJUnitTest.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelJUnitTest.xml
index 38ad448..76f0c15 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelJUnitTest.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelJUnitTest.xml
@@ -1,4 +1,20 @@
 <?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 
 <!-- redundancylevel.xml
      Configures a region as a client region. The region's loader and writer 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParserJUnitTest.testDTDFallbackWithNonEnglishLocal.cache.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParserJUnitTest.testDTDFallbackWithNonEnglishLocal.cache.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParserJUnitTest.testDTDFallbackWithNonEnglishLocal.cache.xml
index 89e93f6..7fb89e4 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParserJUnitTest.testDTDFallbackWithNonEnglishLocal.cache.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/cache/xmlcache/CacheXmlParserJUnitTest.testDTDFallbackWithNonEnglishLocal.cache.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
   "-//GemStone Systems, Inc.//GemFire Declarative Caching 6.5//EN"
   "http://www.gemstone.com/dtd/cache6_5.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/jta/cachejta.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/jta/cachejta.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/jta/cachejta.xml
index 29273f1..d31a634 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/jta/cachejta.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/internal/jta/cachejta.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?> 
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
   "-//GemStone Systems, Inc.//GemFire Declarative Caching 4.1//EN"
   "http://www.gemstone.com/dtd/cache4_1.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeNewNamed.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeNewNamed.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeNewNamed.xml
index 3055f31..e8e8d72 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeNewNamed.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeNewNamed.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <cache
     xmlns="http://schema.pivotal.io/gemfire/cache"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeNewUnnamed.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeNewUnnamed.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeNewUnnamed.xml
index fb50e6b..b27039d 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeNewUnnamed.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeNewUnnamed.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <cache
     xmlns="http://schema.pivotal.io/gemfire/cache"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeNewUnnamedExtension.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeNewUnnamedExtension.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeNewUnnamedExtension.xml
index 38f791d..32cf0c2 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeNewUnnamedExtension.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeNewUnnamedExtension.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <cache
     xmlns="http://schema.pivotal.io/gemfire/cache"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeReplaceNamed.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeReplaceNamed.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeReplaceNamed.xml
index 4f5b1a1..69e919b 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeReplaceNamed.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeReplaceNamed.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <cache
     xmlns="http://schema.pivotal.io/gemfire/cache"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeReplaceUnnamed.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeReplaceUnnamed.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeReplaceUnnamed.xml
index 4db3799..ed88234 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeReplaceUnnamed.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeReplaceUnnamed.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <cache
     xmlns="http://schema.pivotal.io/gemfire/cache"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeReplaceUnnamedExtension.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeReplaceUnnamedExtension.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeReplaceUnnamedExtension.xml
index a3238d8..a2cecf0 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeReplaceUnnamedExtension.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testAddNewNodeReplaceUnnamedExtension.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <cache
     xmlns="http://schema.pivotal.io/gemfire/cache"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testDeleteNodeNamed.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testDeleteNodeNamed.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testDeleteNodeNamed.xml
index f9d548b..3f74bcb 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testDeleteNodeNamed.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testDeleteNodeNamed.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <cache
     xmlns="http://schema.pivotal.io/gemfire/cache"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testDeleteNodeUnnamed.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testDeleteNodeUnnamed.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testDeleteNodeUnnamed.xml
index b325fbf..85d6acc 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testDeleteNodeUnnamed.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testDeleteNodeUnnamed.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <cache
     xmlns="http://schema.pivotal.io/gemfire/cache"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testDeleteNodeUnnamedExtension.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testDeleteNodeUnnamedExtension.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testDeleteNodeUnnamedExtension.xml
index 53d8d6d..07afbce 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testDeleteNodeUnnamedExtension.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.testDeleteNodeUnnamedExtension.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <cache
     xmlns="http://schema.pivotal.io/gemfire/cache"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.xml
index bdefa30..bdf18e4 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsAddNewNodeJUnitTest.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <cache
     xmlns="http://schema.pivotal.io/gemfire/cache"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testBuildSchemaLocationMapAttribute.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testBuildSchemaLocationMapAttribute.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testBuildSchemaLocationMapAttribute.xml
index 832d756..257eb8e 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testBuildSchemaLocationMapAttribute.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testBuildSchemaLocationMapAttribute.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <cache
     xmlns="http://schema.pivotal.io/gemfire/cache"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testBuildSchemaLocationMapEmptyAttribute.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testBuildSchemaLocationMapEmptyAttribute.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testBuildSchemaLocationMapEmptyAttribute.xml
index 05843bf..968ca6a 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testBuildSchemaLocationMapEmptyAttribute.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testBuildSchemaLocationMapEmptyAttribute.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <cache
     xmlns="http://schema.pivotal.io/gemfire/cache"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testBuildSchemaLocationMapMapOfStringListOfStringAttribute.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testBuildSchemaLocationMapMapOfStringListOfStringAttribute.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testBuildSchemaLocationMapMapOfStringListOfStringAttribute.xml
index 87b3cac..bf2b2d8 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testBuildSchemaLocationMapMapOfStringListOfStringAttribute.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testBuildSchemaLocationMapMapOfStringListOfStringAttribute.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <cache
     xmlns="http://schema.pivotal.io/gemfire/cache"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testBuildSchemaLocationMapNullAttribute.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testBuildSchemaLocationMapNullAttribute.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testBuildSchemaLocationMapNullAttribute.xml
index 95d11b3..d32cb2c 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testBuildSchemaLocationMapNullAttribute.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testBuildSchemaLocationMapNullAttribute.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <cache
     xmlns="http://schema.pivotal.io/gemfire/cache"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testQuerySingleElement.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testQuerySingleElement.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testQuerySingleElement.xml
index 76b851e..d5f41d6 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testQuerySingleElement.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/utils/XmlUtilsJUnitTest.testQuerySingleElement.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <cache
     xmlns="http://schema.pivotal.io/gemfire/cache"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/jta/cachejta.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/jta/cachejta.xml b/gemfire-core/src/test/resources/jta/cachejta.xml
index 87727fc..9a36ee6 100644
--- a/gemfire-core/src/test/resources/jta/cachejta.xml
+++ b/gemfire-core/src/test/resources/jta/cachejta.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?> 
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
   "-//GemStone Systems, Inc.//GemFire Declarative Caching 6.0//EN"
   "http://www.gemstone.com/dtd/cache6_0.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/spring/spring-gemfire-context.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/spring/spring-gemfire-context.xml b/gemfire-core/src/test/resources/spring/spring-gemfire-context.xml
index bb40d1e..a728493 100644
--- a/gemfire-core/src/test/resources/spring/spring-gemfire-context.xml
+++ b/gemfire-core/src/test/resources/spring/spring-gemfire-context.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="utf-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <beans xmlns="http://www.springframework.org/schema/beans"
        xmlns:gfe="http://www.springframework.org/schema/gemfire"
        xmlns:util="http://www.springframework.org/schema/util"

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-lucene/build.gradle
----------------------------------------------------------------------
diff --git a/gemfire-lucene/build.gradle b/gemfire-lucene/build.gradle
index ff47748..6218e55 100644
--- a/gemfire-lucene/build.gradle
+++ b/gemfire-lucene/build.gradle
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 dependencies {
     compile project(':gemfire-core')
     compile project(':gemfire-common')

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.createIndex.cache.xml
----------------------------------------------------------------------
diff --git a/gemfire-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.createIndex.cache.xml b/gemfire-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.createIndex.cache.xml
index 42e4e84..47f3250 100644
--- a/gemfire-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.createIndex.cache.xml
+++ b/gemfire-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.createIndex.cache.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <cache
     xmlns="http://schema.pivotal.io/gemfire/cache"
     xmlns:lucene="http://geode.apache.org/schema/lucene"

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.parseIndex.cache.xml
----------------------------------------------------------------------
diff --git a/gemfire-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.parseIndex.cache.xml b/gemfire-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.parseIndex.cache.xml
index 42e4e84..47f3250 100644
--- a/gemfire-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.parseIndex.cache.xml
+++ b/gemfire-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.parseIndex.cache.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <cache
     xmlns="http://schema.pivotal.io/gemfire/cache"
     xmlns:lucene="http://geode.apache.org/schema/lucene"

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-rebalancer/build.gradle
----------------------------------------------------------------------
diff --git a/gemfire-rebalancer/build.gradle b/gemfire-rebalancer/build.gradle
index cbb6803..1f9bff8 100644
--- a/gemfire-rebalancer/build.gradle
+++ b/gemfire-rebalancer/build.gradle
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 dependencies {
     provided project(':gemfire-common')
     provided project(':gemfire-core')

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-spark-connector/gemfire-spark-connector/src/it/resources/test-regions.xml
----------------------------------------------------------------------
diff --git a/gemfire-spark-connector/gemfire-spark-connector/src/it/resources/test-regions.xml b/gemfire-spark-connector/gemfire-spark-connector/src/it/resources/test-regions.xml
index 9c59d7d..79893d6 100644
--- a/gemfire-spark-connector/gemfire-spark-connector/src/it/resources/test-regions.xml
+++ b/gemfire-spark-connector/gemfire-spark-connector/src/it/resources/test-regions.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
   "-//GemStone Systems, Inc.//GemFire Declarative Caching 6.5//EN"
   "http://www.gemstone.com/dtd/cache6_5.dtd" >

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-spark-connector/gemfire-spark-connector/src/it/resources/test-retrieve-regions.xml
----------------------------------------------------------------------
diff --git a/gemfire-spark-connector/gemfire-spark-connector/src/it/resources/test-retrieve-regions.xml b/gemfire-spark-connector/gemfire-spark-connector/src/it/resources/test-retrieve-regions.xml
index 1608751..3023959 100644
--- a/gemfire-spark-connector/gemfire-spark-connector/src/it/resources/test-retrieve-regions.xml
+++ b/gemfire-spark-connector/gemfire-spark-connector/src/it/resources/test-retrieve-regions.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
   "-//GemStone Systems, Inc.//GemFire Declarative Caching 6.5//EN"
   "http://www.gemstone.com/dtd/cache6_5.dtd" >

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-web-api/build.gradle
----------------------------------------------------------------------
diff --git a/gemfire-web-api/build.gradle b/gemfire-web-api/build.gradle
index 476872f..7c35ecd 100755
--- a/gemfire-web-api/build.gradle
+++ b/gemfire-web-api/build.gradle
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 apply plugin: 'war'
 
 dependencies {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-web/build.gradle
----------------------------------------------------------------------
diff --git a/gemfire-web/build.gradle b/gemfire-web/build.gradle
index c438de4..1c926c0 100755
--- a/gemfire-web/build.gradle
+++ b/gemfire-web/build.gradle
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 apply plugin: 'war'
 
 dependencies {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gradle.properties
----------------------------------------------------------------------
diff --git a/gradle.properties b/gradle.properties
index 03a07d8..c6ca51f 100755
--- a/gradle.properties
+++ b/gradle.properties
@@ -1,3 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 versionNumber = 1.0.0-incubating
 releaseType = SNAPSHOT
 



[14/50] [abbrv] incubator-geode git commit: GEODE-563: Moving gfsh tests from closed

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DiskStoreCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DiskStoreCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DiskStoreCommandsDUnitTest.java
new file mode 100644
index 0000000..83264d5
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DiskStoreCommandsDUnitTest.java
@@ -0,0 +1,1154 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.DataPolicy;
+import com.gemstone.gemfire.cache.DiskStore;
+import com.gemstone.gemfire.cache.DiskStoreFactory;
+import com.gemstone.gemfire.cache.EvictionAction;
+import com.gemstone.gemfire.cache.EvictionAttributes;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache.Scope;
+import com.gemstone.gemfire.cache.query.data.PortfolioPdx;
+import com.gemstone.gemfire.compression.SnappyCompressor;
+import com.gemstone.gemfire.distributed.DistributedSystemDisconnectedException;
+import com.gemstone.gemfire.distributed.Locator;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.distributed.internal.InternalLocator;
+import com.gemstone.gemfire.distributed.internal.SharedConfiguration;
+import com.gemstone.gemfire.internal.AvailablePort;
+import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.internal.FileUtil;
+import com.gemstone.gemfire.internal.cache.DiskStoreImpl;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.SnapshotTestUtil;
+import com.gemstone.gemfire.internal.cache.persistence.PersistentMemberManager;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import com.gemstone.gemfire.management.internal.cli.shell.Gfsh;
+import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+import dunit.DistributedTestCase;
+import dunit.Host;
+import dunit.SerializableCallable;
+import dunit.SerializableRunnable;
+import dunit.VM;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.StringTokenizer;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+/**
+ * The DiskStoreCommandsDUnitTest class is a distributed test suite of test cases for testing the disk store commands
+ * that are part of Gfsh. </p>
+ *
+ * @author John Blum
+ * @author David Hoots
+ * @see com.gemstone.gemfire.management.internal.cli.commands.DiskStoreCommands
+ * @see org.junit.Assert
+ * @see org.junit.Test
+ * @since 7.0
+ */
+public class DiskStoreCommandsDUnitTest extends CliCommandTestBase {
+  private static final long serialVersionUID = 1L;
+
+  final List<String> filesToBeDeleted = new CopyOnWriteArrayList<String>();
+
+  public DiskStoreCommandsDUnitTest(final String testName) {
+    super(testName);
+  }
+
+  @SuppressWarnings("serial")
+  @Test
+  public void testMissingDiskStore() {
+    final String regionName = "testShowMissingDiskStoreRegion";
+
+    createDefaultSetup(null);
+
+    final VM vm0 = Host.getHost(0).getVM(0);
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final String vm1Name = "VM" + vm1.getPid();
+    final String diskStoreName = "DiskStoreCommandsDUnitTest";
+
+    // Default setup creates a cache in the Manager, now create a cache in VM1
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.NAME_NAME, vm1Name);
+        getSystem(localProps);
+        Cache cache = getCache();
+      }
+    });
+
+    // Create a disk store and region in the Manager (VM0) and VM1 VMs
+    for (final VM vm : (new VM[]{vm0, vm1})) {
+      final String vmName = "VM" + vm.getPid();
+      vm.invoke(new SerializableRunnable() {
+        public void run() {
+          Cache cache = getCache();
+
+          File diskStoreDirFile = new File(diskStoreName + vm.getPid());
+          diskStoreDirFile.mkdirs();
+
+          DiskStoreFactory diskStoreFactory = cache.createDiskStoreFactory();
+          diskStoreFactory.setDiskDirs(new File[]{diskStoreDirFile});
+          diskStoreFactory.setMaxOplogSize(1);
+          diskStoreFactory.setAllowForceCompaction(true);
+          diskStoreFactory.setAutoCompact(false);
+          diskStoreFactory.create(regionName);
+
+          RegionFactory regionFactory = cache.createRegionFactory();
+          regionFactory.setDiskStoreName(regionName);
+          regionFactory.setDiskSynchronous(true);
+          regionFactory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
+          regionFactory.setScope(Scope.DISTRIBUTED_ACK);
+          regionFactory.create(regionName);
+        }
+      });
+    }
+
+    // Add data to the region
+    vm0.invoke(new SerializableRunnable() {
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(regionName);
+        region.put("A", "B");
+      }
+    });
+
+    // Make sure that everything thus far is okay and there are no missing disk stores
+    CommandResult cmdResult = executeCommand(CliStrings.SHOW_MISSING_DISK_STORE);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    assertTrue(commandResultToString(cmdResult).contains("No missing disk store found"));
+
+    // Close the region in the Manager (VM0) VM
+    vm0.invoke(new SerializableRunnable() {
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(regionName);
+        region.close();
+      }
+    });
+
+    // Add data to VM1 and then close the region
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(regionName);
+        region.put("A", "C");
+        region.close();
+      }
+    });
+
+    // Add the region back to the Manager (VM0) VM
+    vm0.invokeAsync(new SerializableRunnable() {
+      public void run() {
+        Cache cache = getCache();
+
+        RegionFactory regionFactory = cache.createRegionFactory();
+        regionFactory.setDiskStoreName(regionName);
+        regionFactory.setDiskSynchronous(true);
+        regionFactory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
+        regionFactory.setScope(Scope.DISTRIBUTED_ACK);
+        try {
+          regionFactory.create(regionName);
+        } catch (DistributedSystemDisconnectedException e) {
+          // okay to ignore
+        }
+      }
+    });
+
+    // Wait for the region in the Manager (VM0) to come online
+    vm0.invoke(new SerializableRunnable() {
+      public void run() {
+        WaitCriterion waitCriterion = new WaitCriterion() {
+          public boolean done() {
+            Cache cache = getCache();
+            PersistentMemberManager memberManager = ((GemFireCacheImpl) cache).getPersistentMemberManager();
+            return !memberManager.getWaitingRegions().isEmpty();
+          }
+
+          public String description() {
+            return "Waiting for another persistent member to come online";
+          }
+        };
+        waitForCriterion(waitCriterion, 70000, 100, true);
+      }
+    });
+
+    // Validate that there is a missing disk store on VM1
+    cmdResult = executeCommand(CliStrings.SHOW_MISSING_DISK_STORE);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    String stringResult = commandResultToString(cmdResult);
+    System.out.println("command result=" + stringResult);
+    assertEquals(3, countLinesInString(stringResult, false));
+    assertTrue(stringContainsLine(stringResult, "Disk Store ID.*Host.*Directory"));
+    assertTrue(stringContainsLine(stringResult, ".*" + diskStoreName + vm1.getPid()));
+
+    // Extract the id from the returned missing disk store
+    String line = getLineFromString(stringResult, 3);
+    assertFalse(line.contains("---------"));
+    StringTokenizer resultTokenizer = new StringTokenizer(line);
+    String id = resultTokenizer.nextToken();
+
+    // Remove the missing disk store and validate the result
+    cmdResult = executeCommand("revoke missing-disk-store --id=" + id);
+    assertNotNull(cmdResult);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    assertTrue(commandResultToString(cmdResult).contains("Missing disk store successfully revoked"));
+
+    // Do our own cleanup so that the disk store directories can be removed
+    super.destroyDefaultSetup();
+    for (final VM vm : (new VM[]{vm0, vm1})) {
+      final String vmName = "VM" + vm.getPid();
+      vm.invoke(new SerializableRunnable() {
+        public void run() {
+          try {
+            FileUtil.delete((new File(diskStoreName + vm.getPid())));
+          } catch (IOException iex) {
+            // There's nothing else we can do
+          }
+        }
+      });
+    }
+  }
+
+  public void testDescribeOfflineDiskStore() {
+    createDefaultSetup(null);
+
+    final File diskStoreDir = new File(new File(".").getAbsolutePath(), "DiskStoreCommandDUnitDiskStores");
+    diskStoreDir.mkdir();
+    this.filesToBeDeleted.add(diskStoreDir.getAbsolutePath());
+
+    final String diskStoreName1 = "DiskStore1";
+    final String region1 = "Region1";
+    final String region2 = "Region2";
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        final Cache cache = getCache();
+
+        DiskStoreFactory diskStoreFactory = cache.createDiskStoreFactory();
+        diskStoreFactory.setDiskDirs(new File[]{diskStoreDir});
+        final DiskStore diskStore1 = diskStoreFactory.create(diskStoreName1);
+        assertNotNull(diskStore1);
+
+        RegionFactory regionFactory = cache.createRegionFactory(RegionShortcut.REPLICATE_PERSISTENT);
+        regionFactory.setDiskStoreName(diskStoreName1);
+        regionFactory.setDiskSynchronous(true);
+        regionFactory.create(region1);
+
+        regionFactory.setCompressor(SnappyCompressor.getDefaultInstance());
+        regionFactory.create(region2);
+
+        cache.close();
+        assertTrue(new File(diskStoreDir, "BACKUP" + diskStoreName1 + ".if").exists());
+      }
+    });
+
+    CommandResult cmdResult = executeCommand(
+        "describe offline-disk-store --name=" + diskStoreName1 + " --disk-dirs=" + diskStoreDir.getAbsolutePath());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    String stringResult = commandResultToString(cmdResult);
+    assertEquals(3, countLinesInString(stringResult, false));
+    assertTrue(stringContainsLine(stringResult,
+        ".*/" + region1 + ": -lru=none -concurrencyLevel=16 -initialCapacity=16 -loadFactor=0.75 -offHeap=false -compressor=none -statisticsEnabled=false"));
+    assertTrue(stringContainsLine(stringResult,
+        ".*/" + region2 + ": -lru=none -concurrencyLevel=16 -initialCapacity=16 -loadFactor=0.75 -offHeap=false -compressor=com.gemstone.gemfire.compression.SnappyCompressor -statisticsEnabled=false"));
+
+    cmdResult = executeCommand(
+        "describe offline-disk-store --name=" + diskStoreName1 + " --disk-dirs=" + diskStoreDir.getAbsolutePath() + " --region=/" + region1);
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(2, countLinesInString(stringResult, false));
+    assertTrue(stringContainsLine(stringResult, ".*/" + region1 + ": .*"));
+    assertFalse(stringContainsLine(stringResult, ".*/" + region2 + ": .*"));
+  }
+
+  public void testOfflineDiskStorePdxCommands() {
+    final Properties props = new Properties();
+    props.setProperty("mcast-port", "0");
+    props.setProperty("start-locator", "localhost[" + AvailablePortHelper.getRandomAvailableTCPPort() + "]");
+
+    final File diskStoreDir = new File(new File(".").getAbsolutePath(), "DiskStoreCommandDUnitDiskStores");
+    diskStoreDir.mkdir();
+    this.filesToBeDeleted.add(diskStoreDir.getAbsolutePath());
+
+    final String diskStoreName1 = "DiskStore1";
+    final String region1 = "Region1";
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        final Cache cache = new CacheFactory(props).setPdxPersistent(true).setPdxDiskStore(diskStoreName1).create();
+
+        DiskStoreFactory diskStoreFactory = cache.createDiskStoreFactory();
+        diskStoreFactory.setDiskDirs(new File[]{diskStoreDir});
+        final DiskStore diskStore1 = diskStoreFactory.create(diskStoreName1);
+        assertNotNull(diskStore1);
+
+        RegionFactory regionFactory = cache.createRegionFactory(RegionShortcut.REPLICATE_PERSISTENT);
+        regionFactory.setDiskStoreName(diskStoreName1);
+        regionFactory.setDiskSynchronous(true);
+        Region r1 = regionFactory.create(region1);
+        r1.put("key-1", new PortfolioPdx(1));
+
+        cache.close();
+        assertTrue(new File(diskStoreDir, "BACKUP" + diskStoreName1 + ".if").exists());
+      }
+    });
+
+    CommandResult cmdResult = executeCommand(
+        "describe offline-disk-store --name=" + diskStoreName1 + " --disk-dirs=" + diskStoreDir.getAbsolutePath() + " --pdx=true");
+    String stringResult = commandResultToString(cmdResult);
+    assertTrue(stringContainsLine(stringResult, ".*PDX Types.*"));
+    assertTrue(stringContainsLine(stringResult, ".*com\\.gemstone\\.gemfire\\.cache\\.query\\.data\\.PortfolioPdx.*"));
+    assertTrue(stringContainsLine(stringResult, ".*com\\.gemstone\\.gemfire\\.cache\\.query\\.data\\.PositionPdx.*"));
+    assertTrue(stringContainsLine(stringResult, ".*PDX Enums.*"));
+    assertTrue(
+        stringContainsLine(stringResult, ".*com\\.gemstone\\.gemfire\\.cache\\.query\\.data\\.PortfolioPdx\\$Day.*"));
+  }
+
+
+  public void testValidateDiskStore() {
+    createDefaultSetup(null);
+
+    final File diskStoreDir = new File(new File(".").getAbsolutePath(), "DiskStoreCommandDUnitDiskStores");
+    diskStoreDir.mkdir();
+    this.filesToBeDeleted.add(diskStoreDir.getAbsolutePath());
+
+    final String diskStoreName1 = "DiskStore1";
+    final String region1 = "Region1";
+    final String region2 = "Region2";
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        final Cache cache = getCache();
+
+        DiskStoreFactory diskStoreFactory = cache.createDiskStoreFactory();
+        diskStoreFactory.setDiskDirs(new File[]{diskStoreDir});
+        final DiskStore diskStore1 = diskStoreFactory.create(diskStoreName1);
+        assertNotNull(diskStore1);
+
+        RegionFactory regionFactory = cache.createRegionFactory(RegionShortcut.REPLICATE_PERSISTENT);
+        regionFactory.setDiskStoreName(diskStoreName1);
+        regionFactory.setDiskSynchronous(true);
+        regionFactory.create(region1);
+        regionFactory.create(region2);
+
+        cache.close();
+        assertTrue(new File(diskStoreDir, "BACKUP" + diskStoreName1 + ".if").exists());
+      }
+    });
+    String command = "validate offline-disk-store --name=" + diskStoreName1 + " --disk-dirs=" + diskStoreDir.getAbsolutePath();
+    getLogWriter().info("testValidateDiskStore command: " + command);
+    CommandResult cmdResult = executeCommand(command);
+    if (cmdResult != null) {
+      String stringResult = commandResultToString(cmdResult);
+      getLogWriter().info("testValidateDiskStore cmdResult is stringResult " + stringResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      assertTrue(stringResult.contains("Total number of region entries in this disk store is"));
+
+    } else {
+      getLogWriter().info("testValidateDiskStore cmdResult is null");
+      fail("Did not get CommandResult in testValidateDiskStore");
+    }
+  }
+
+  public void testExportOfflineDiskStore() throws Exception {
+    createDefaultSetup(null);
+
+    final File diskStoreDir = new File(new File(".").getAbsolutePath(), "DiskStoreCommandDUnitDiskStores");
+    diskStoreDir.mkdir();
+    this.filesToBeDeleted.add(diskStoreDir.getAbsolutePath());
+    final File exportDir = new File(new File(".").getAbsolutePath(), "DiskStoreCommandDUnitExport");
+    exportDir.mkdir();
+    this.filesToBeDeleted.add(exportDir.getAbsolutePath());
+
+    final String diskStoreName1 = "DiskStore1";
+    final String region1 = "Region1";
+    final String region2 = "Region2";
+    final Map<String, String> entries = new HashMap<String, String>();
+    entries.put("key1", "value1");
+    entries.put("key2", "value2");
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        final Cache cache = getCache();
+
+        DiskStoreFactory diskStoreFactory = cache.createDiskStoreFactory();
+        diskStoreFactory.setDiskDirs(new File[]{diskStoreDir});
+        final DiskStore diskStore1 = diskStoreFactory.create(diskStoreName1);
+        assertNotNull(diskStore1);
+
+        RegionFactory regionFactory = cache.createRegionFactory(RegionShortcut.REPLICATE_PERSISTENT);
+        regionFactory.setDiskStoreName(diskStoreName1);
+        regionFactory.setDiskSynchronous(true);
+        Region r1 = regionFactory.create(region1);
+        r1.putAll(entries);
+        Region r2 = regionFactory.create(region2);
+        r2.putAll(entries);
+
+        cache.close();
+        assertTrue(new File(diskStoreDir, "BACKUP" + diskStoreName1 + ".if").exists());
+      }
+    });
+    String command = "export offline-disk-store --name=" + diskStoreName1 + " --disk-dirs=" + diskStoreDir.getAbsolutePath() + " --dir=" + exportDir;
+    getLogWriter().info("testExportDiskStore command" + command);
+    CommandResult cmdResult = executeCommand(command);
+    if (cmdResult != null) {
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      String stringResult = commandResultToString(cmdResult);
+      SnapshotTestUtil.checkSnapshotEntries(exportDir, entries, diskStoreName1, region1);
+      SnapshotTestUtil.checkSnapshotEntries(exportDir, entries, diskStoreName1, region2);
+
+    } else {
+      getLogWriter().info("testExportOfflineDiskStore cmdResult is null");
+      fail("Did not get CommandResult in testExportOfflineDiskStore");
+    }
+  }
+
+  /**
+   * Asserts that creating and destroying disk stores correctly updates the shared configuration.
+   */
+  public void testCreateDestroyUpdatesSharedConfig() {
+    disconnectAllFromDS();
+
+    final String groupName = "testDiskStoreSharedConfigGroup";
+    final String diskStoreName = "testDiskStoreSharedConfigDiskStore";
+
+    // Start the Locator and wait for shared configuration to be available
+    final int locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
+    Host.getHost(0).getVM(3).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+
+        final File locatorLogFile = new File("locator-" + locatorPort + ".log");
+        final Properties locatorProps = new Properties();
+        locatorProps.setProperty(DistributionConfig.NAME_NAME, "Locator");
+        locatorProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        locatorProps.setProperty(DistributionConfig.LOG_LEVEL_NAME, "fine");
+        locatorProps.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "true");
+        try {
+          final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locatorPort, locatorLogFile, null,
+              locatorProps);
+
+          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+            @Override
+            public boolean done() {
+              return locator.isSharedConfigurationRunning();
+            }
+
+            @Override
+            public String description() {
+              return "Waiting for shared configuration to be started";
+            }
+          };
+          DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+        } catch (IOException ioex) {
+          fail("Unable to create a locator with a shared configuration");
+        }
+      }
+    });
+
+    // Start the default manager
+    Properties managerProps = new Properties();
+    managerProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    managerProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+    createDefaultSetup(managerProps);
+
+    // Create a cache in VM 1
+    final File diskStoreDir = new File(new File(".").getAbsolutePath(), diskStoreName);
+    this.filesToBeDeleted.add(diskStoreDir.getAbsolutePath());
+    VM vm = Host.getHost(0).getVM(1);
+    vm.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        diskStoreDir.mkdirs();
+
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        localProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, groupName);
+        getSystem(localProps);
+        assertNotNull(getCache());
+      }
+    });
+
+    // Test creating the disk store
+    CommandStringBuilder commandStringBuilder = new CommandStringBuilder(CliStrings.CREATE_DISK_STORE);
+    commandStringBuilder.addOption(CliStrings.CREATE_DISK_STORE__NAME, diskStoreName);
+    commandStringBuilder.addOption(CliStrings.CREATE_DISK_STORE__GROUP, groupName);
+    commandStringBuilder.addOption(CliStrings.CREATE_DISK_STORE__DIRECTORY_AND_SIZE, diskStoreDir.getAbsolutePath());
+    CommandResult cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    // Make sure the disk store exists in the shared config
+    Host.getHost(0).getVM(3).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        SharedConfiguration sharedConfig = ((InternalLocator) Locator.getLocator()).getSharedConfiguration();
+        String xmlFromConfig;
+        try {
+          xmlFromConfig = sharedConfig.getConfiguration(groupName).getCacheXmlContent();
+          assertTrue(xmlFromConfig.contains(diskStoreName));
+        } catch (Exception e) {
+          fail("Error occurred in cluster configuration service", e);
+        }
+      }
+    });
+
+    //Restart the cache and make sure it has the diskstore
+    vm = Host.getHost(0).getVM(1);
+    vm.invoke(new SerializableCallable() {
+      @Override
+      public Object call() {
+        getCache().close();
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        localProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, groupName);
+        localProps.setProperty(DistributionConfig.USE_CLUSTER_CONFIGURATION_NAME, "true");
+        getSystem(localProps);
+        Cache cache = getCache();
+        assertNotNull(cache);
+
+        GemFireCacheImpl gfc = (GemFireCacheImpl) cache;
+        Collection<DiskStoreImpl> diskStoreList = gfc.listDiskStores();
+        assertNotNull(diskStoreList);
+        assertFalse(diskStoreList.isEmpty());
+        assertTrue(diskStoreList.size() == 1);
+
+        for (DiskStoreImpl diskStore : diskStoreList) {
+          assertTrue(diskStore.getName().equals(diskStoreName));
+          break;
+        }
+        return null;
+      }
+    });
+
+    // Test destroying the disk store
+    commandStringBuilder = new CommandStringBuilder(CliStrings.DESTROY_DISK_STORE);
+    commandStringBuilder.addOption(CliStrings.DESTROY_DISK_STORE__NAME, diskStoreName);
+    commandStringBuilder.addOption(CliStrings.DESTROY_DISK_STORE__GROUP, groupName);
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    // Make sure the disk store was removed from the shared config
+    Host.getHost(0).getVM(3).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        SharedConfiguration sharedConfig = ((InternalLocator) Locator.getLocator()).getSharedConfiguration();
+        String xmlFromConfig;
+        try {
+          xmlFromConfig = sharedConfig.getConfiguration(groupName).getCacheXmlContent();
+          assertFalse(xmlFromConfig.contains(diskStoreName));
+        } catch (Exception e) {
+          fail("Error occurred in cluster configuration service", e);
+        }
+      }
+    });
+
+
+    //Restart the cache and make sure it DOES NOT have the diskstore
+    vm = Host.getHost(0).getVM(1);
+    vm.invoke(new SerializableCallable() {
+      @Override
+      public Object call() {
+        getCache().close();
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        localProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, groupName);
+        localProps.setProperty(DistributionConfig.USE_CLUSTER_CONFIGURATION_NAME, "true");
+        getSystem(localProps);
+        Cache cache = getCache();
+        assertNotNull(cache);
+        GemFireCacheImpl gfc = (GemFireCacheImpl) cache;
+        Collection<DiskStoreImpl> diskStores = gfc.listDiskStores();
+        assertNotNull(diskStores);
+        assertTrue(diskStores.isEmpty());
+        return null;
+      }
+    });
+  }
+
+
+  /****
+   * 1) Create a disk-store in a member, get the disk-dirs. 2) Close the member. 3) Execute the command. 4) Restart the
+   * member. 5) Check if the disk-store is altered.
+   *
+   * @throws IOException
+   * @throws ClassNotFoundException
+   */
+  public void testAlterDiskStore() throws ClassNotFoundException, IOException {
+    final String regionName = "region1";
+    final String diskStoreName = "disk-store1";
+    final String diskDirName = "diskStoreDir";
+    final File diskStoreDir = new File(diskDirName);
+    diskStoreDir.deleteOnExit();
+
+    if (!diskStoreDir.exists()) {
+      diskStoreDir.mkdir();
+    }
+
+    final String diskDirPath = diskStoreDir.getCanonicalPath();
+    final VM vm1 = Host.getHost(0).getVM(1);
+
+
+    vm1.invoke(new SerializableCallable() {
+
+      @Override
+      public Object call() throws Exception {
+        getSystem();
+        Region region = createParRegWithPersistence(regionName, diskStoreName, diskDirPath);
+        region.put("a", "QWE");
+        return region.put("b", "ASD");
+      }
+    });
+    //Close the cache and all the connections , so the disk-store can be altered
+    disconnectAllFromDS();
+
+    //Now do the command execution
+    createDefaultSetup(null);
+    Gfsh gfshInstance = Gfsh.getCurrentInstance();
+
+    if (gfshInstance == null) {
+      fail("In testAlterDiskStore command gfshInstance is null");
+    }
+
+    gfshInstance.setDebug(true);
+
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.ALTER_DISK_STORE);
+    csb.addOption(CliStrings.ALTER_DISK_STORE__DISKSTORENAME, diskStoreName);
+    csb.addOption(CliStrings.ALTER_DISK_STORE__REGIONNAME, regionName);
+    csb.addOption(CliStrings.ALTER_DISK_STORE__DISKDIRS, diskDirPath);
+    csb.addOption(CliStrings.ALTER_DISK_STORE__CONCURRENCY__LEVEL, "5");
+    csb.addOption(CliStrings.ALTER_DISK_STORE__INITIAL__CAPACITY, "6");
+    csb.addOption(CliStrings.ALTER_DISK_STORE__LRU__EVICTION__ACTION, "local-destroy");
+    csb.addOption(CliStrings.ALTER_DISK_STORE__COMPRESSOR, "com.gemstone.gemfire.compression.SnappyCompressor");
+    csb.addOption(CliStrings.ALTER_DISK_STORE__STATISTICS__ENABLED, "true");
+
+    String commandString = csb.getCommandString();
+
+    commandString.trim();
+
+    CommandResult cmdResult = executeCommand(commandString);
+    String resultString = commandResultToString(cmdResult);
+    getLogWriter().info("#SB command output : \n" + resultString);
+    assertEquals(true, Result.Status.OK.equals(cmdResult.getStatus()));
+    assertEquals(true, resultString.contains("concurrencyLevel=5"));
+    assertEquals(true, resultString.contains("lruAction=local-destroy"));
+    assertEquals(true, resultString.contains("compressor=com.gemstone.gemfire.compression.SnappyCompressor"));
+    assertEquals(true, resultString.contains("initialCapacity=6"));
+
+    csb = new CommandStringBuilder(CliStrings.ALTER_DISK_STORE);
+    csb.addOption(CliStrings.ALTER_DISK_STORE__DISKSTORENAME, diskStoreName);
+    csb.addOption(CliStrings.ALTER_DISK_STORE__REGIONNAME, regionName);
+    csb.addOption(CliStrings.ALTER_DISK_STORE__DISKDIRS, diskDirPath);
+    csb.addOption(CliStrings.ALTER_DISK_STORE__COMPRESSOR, "none");
+
+    cmdResult = executeCommand(csb.getCommandString().trim());
+    resultString = commandResultToString(cmdResult);
+    assertEquals(true, Result.Status.OK.equals(cmdResult.getStatus()));
+    assertTrue(stringContainsLine(resultString, "-compressor=none"));
+
+    //Alter DiskStore with remove option
+    csb = new CommandStringBuilder(CliStrings.ALTER_DISK_STORE);
+    csb.addOption(CliStrings.ALTER_DISK_STORE__DISKSTORENAME, diskStoreName);
+    csb.addOption(CliStrings.ALTER_DISK_STORE__REGIONNAME, regionName);
+    csb.addOption(CliStrings.ALTER_DISK_STORE__DISKDIRS, diskDirPath);
+    csb.addOption(CliStrings.ALTER_DISK_STORE__REMOVE, "true");
+
+    commandString = csb.getCommandString();
+
+    commandString.trim();
+
+    cmdResult = executeCommand(commandString);
+    resultString = commandResultToString(cmdResult);
+    getLogWriter().info("command output : \n" + resultString);
+    assertEquals(true, Result.Status.OK.equals(cmdResult.getStatus()));
+
+    Object postDestroyValue = vm1.invoke(new SerializableCallable() {
+
+      @Override
+      public Object call() throws Exception {
+        getSystem();
+        Region region = createParRegWithPersistence(regionName, diskStoreName, diskDirPath);
+        return region.get("a");
+      }
+    });
+    assertNull(postDestroyValue);
+
+    csb = new CommandStringBuilder(CliStrings.ALTER_DISK_STORE);
+    csb.addOption(CliStrings.ALTER_DISK_STORE__DISKSTORENAME, diskStoreName);
+    csb.addOption(CliStrings.ALTER_DISK_STORE__REGIONNAME, regionName);
+    csb.addOption(CliStrings.ALTER_DISK_STORE__DISKDIRS, diskDirPath);
+    csb.addOption(CliStrings.ALTER_DISK_STORE__CONCURRENCY__LEVEL, "5");
+    csb.addOption(CliStrings.ALTER_DISK_STORE__REMOVE, "true");
+
+
+    commandString = csb.getCommandString();
+    commandString.trim();
+
+    cmdResult = executeCommand(commandString);
+    resultString = commandResultToString(cmdResult);
+    getLogWriter().info("Alter DiskStore with wrong remove option  : \n" + resultString);
+    assertEquals(true, Result.Status.ERROR.equals(cmdResult.getStatus()));
+
+    filesToBeDeleted.add(diskDirName);
+  }
+
+
+  public void testBackupDiskStoreBackup() throws IOException {
+    final String regionName = "region1";
+    final String fullBackUpName = "fullBackUp";
+    final String controllerName = "controller";
+    final String vm1Name = "vm1";
+    final String diskStoreName = "diskStore";
+    final String controllerDiskDirName = "controllerDiskDir";
+    final String vm1DiskDirName = "vm1DiskDir";
+    final String incrementalBackUpName = "incrementalBackUp";
+    final VM manager = Host.getHost(0).getVM(0);
+    final VM vm1 = Host.getHost(0).getVM(1);
+    createDefaultSetup(null);
+
+
+    File controllerDiskDir = new File(controllerDiskDirName);
+    controllerDiskDir.mkdir();
+    final String controllerDiskDirPath = controllerDiskDir.getCanonicalPath();
+    filesToBeDeleted.add(controllerDiskDirPath);
+
+    File vm1DiskDir = new File(vm1DiskDirName);
+    vm1DiskDir.mkdir();
+    final String vm1DiskDirPath = vm1DiskDir.getCanonicalPath();
+    filesToBeDeleted.add(vm1DiskDirPath);
+
+    File fullBackupDir = new File(fullBackUpName);
+    fullBackupDir.mkdir();
+    final String fullBackupDirPath = fullBackupDir.getCanonicalPath();
+    filesToBeDeleted.add(fullBackupDirPath);
+
+    Properties props = new Properties();
+    props.setProperty(DistributionConfig.NAME_NAME, controllerName);
+
+    getSystem(props);
+
+    manager.invoke(new SerializableRunnable() {
+      public void run() {
+        Region region = createParRegWithPersistence(regionName, diskStoreName, controllerDiskDirPath);
+        region.put("A", "1");
+        region.put("B", "2");
+      }
+    });
+
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.NAME_NAME, vm1Name);
+        getSystem(localProps);
+
+        Cache cache = getCache();
+        Region region = createParRegWithPersistence(regionName, diskStoreName, vm1DiskDirPath);
+      }
+    });
+
+
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.BACKUP_DISK_STORE);
+    csb.addOption(CliStrings.BACKUP_DISK_STORE__DISKDIRS, fullBackupDirPath);
+    String commandString = csb.toString();
+
+    CommandResult cmdResult = executeCommand(commandString);
+    String resultAsString = commandResultToString(cmdResult);
+    getLogWriter().info("Result from full backup : \n" + resultAsString);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    assertEquals(true, resultAsString.contains("Manager"));
+    assertEquals(true, resultAsString.contains(vm1Name));
+
+
+    vm1.invoke(new SerializableRunnable() {
+
+      @Override
+      public void run() {
+        Region region = getCache().getRegion(regionName);
+        //Add some data to the region
+        region.put("F", "231");
+        region.put("D", "ew");
+      }
+    });
+
+    File incrementalBackUpDir = new File(incrementalBackUpName);
+    incrementalBackUpDir.mkdir();
+
+    //Perform an incremental backup 
+    final String incrementalBackUpDirPath = incrementalBackUpDir.getCanonicalPath();
+    filesToBeDeleted.add(incrementalBackUpDirPath);
+
+    csb = new CommandStringBuilder(CliStrings.BACKUP_DISK_STORE);
+    csb.addOption(CliStrings.BACKUP_DISK_STORE__DISKDIRS, incrementalBackUpDirPath);
+    csb.addOption(CliStrings.BACKUP_DISK_STORE__BASELINEDIR, fullBackupDirPath);
+
+    cmdResult = executeCommand(csb.toString());
+    resultAsString = commandResultToString(cmdResult);
+    getLogWriter().info("Result from incremental backup : \n" + resultAsString);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    assertEquals(true, resultAsString.contains("Manager"));
+    assertEquals(true, resultAsString.contains(vm1Name));
+  }
+
+  public void testCreateDiskStore() {
+    final String diskStore1Name = "testCreateDiskStore1";
+    final String diskStore2Name = "testCreateDiskStore2";
+
+    Properties localProps = new Properties();
+    localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group0");
+    createDefaultSetup(localProps);
+
+    CommandResult cmdResult = executeCommand(CliStrings.LIST_DISK_STORE);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    assertTrue(commandResultToString(cmdResult).contains("No Disk Stores Found"));
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final String vm1Name = "VM" + vm1.getPid();
+    final File diskStore1Dir1 = new File(new File(".").getAbsolutePath(), diskStore1Name + ".1");
+    this.filesToBeDeleted.add(diskStore1Dir1.getAbsolutePath());
+    final File diskStore1Dir2 = new File(new File(".").getAbsolutePath(), diskStore1Name + ".2");
+    this.filesToBeDeleted.add(diskStore1Dir2.getAbsolutePath());
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        diskStore1Dir1.mkdirs();
+        diskStore1Dir2.mkdirs();
+
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.NAME_NAME, vm1Name);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group1");
+        getSystem(localProps);
+        getCache();
+      }
+    });
+
+    final VM vm2 = Host.getHost(0).getVM(2);
+    final String vm2Name = "VM" + vm2.getPid();
+    final File diskStore2Dir = new File(new File(".").getAbsolutePath(), diskStore2Name);
+    this.filesToBeDeleted.add(diskStore2Dir.getAbsolutePath());
+    vm2.invoke(new SerializableRunnable() {
+      public void run() {
+        diskStore2Dir.mkdirs();
+
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.NAME_NAME, vm2Name);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group2");
+        getSystem(localProps);
+        getCache();
+      }
+    });
+
+    CommandStringBuilder commandStringBuilder = new CommandStringBuilder(CliStrings.CREATE_DISK_STORE);
+    commandStringBuilder.addOption(CliStrings.CREATE_DISK_STORE__NAME, diskStore1Name);
+    commandStringBuilder.addOption(CliStrings.CREATE_DISK_STORE__GROUP, "Group1");
+    commandStringBuilder.addOption(CliStrings.CREATE_DISK_STORE__ALLOW_FORCE_COMPACTION, "true");
+    commandStringBuilder.addOption(CliStrings.CREATE_DISK_STORE__AUTO_COMPACT, "false");
+    commandStringBuilder.addOption(CliStrings.CREATE_DISK_STORE__COMPACTION_THRESHOLD, "67");
+    commandStringBuilder.addOption(CliStrings.CREATE_DISK_STORE__MAX_OPLOG_SIZE, "355");
+    commandStringBuilder.addOption(CliStrings.CREATE_DISK_STORE__QUEUE_SIZE, "5321");
+    commandStringBuilder.addOption(CliStrings.CREATE_DISK_STORE__TIME_INTERVAL, "2023");
+    commandStringBuilder.addOption(CliStrings.CREATE_DISK_STORE__WRITE_BUFFER_SIZE, "3110");
+    commandStringBuilder.addOption(CliStrings.CREATE_DISK_STORE__DIRECTORY_AND_SIZE,
+        diskStore1Dir1.getAbsolutePath() + "#1452637463");
+    commandStringBuilder.addOption(CliStrings.CREATE_DISK_STORE__DIRECTORY_AND_SIZE, diskStore1Dir2.getAbsolutePath());
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    String stringResult = commandResultToString(cmdResult);
+    assertEquals(3, countLinesInString(stringResult, false));
+    assertEquals(false, stringResult.contains("ERROR"));
+    assertTrue(stringContainsLine(stringResult, vm1Name + ".*Success"));
+
+    // Verify that the disk store was created on the correct member
+    cmdResult = executeCommand(CliStrings.LIST_DISK_STORE);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(3, countLinesInString(stringResult, false));
+    assertTrue(stringContainsLine(stringResult, vm1Name + ".*" + diskStore1Name + " .*"));
+    assertFalse(stringContainsLine(stringResult, vm2Name + ".*" + diskStore1Name + " .*"));
+
+    // Verify that the disk store files were created in the correct directory.
+    assertEquals(diskStore1Dir1.listFiles().length, 2);
+
+    // Verify that all of the attributes of the disk store were set correctly.
+    commandStringBuilder = new CommandStringBuilder(CliStrings.DESCRIBE_DISK_STORE);
+    commandStringBuilder.addOption(CliStrings.DESCRIBE_DISK_STORE__MEMBER, vm1Name);
+    commandStringBuilder.addOption(CliStrings.DESCRIBE_DISK_STORE__NAME, diskStore1Name);
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    stringResult = commandResultToString(cmdResult);
+    assertTrue(stringContainsLine(stringResult, "Allow Force Compaction.*Yes"));
+    assertTrue(stringContainsLine(stringResult, "Auto Compaction.*No"));
+    assertTrue(stringContainsLine(stringResult, "Compaction Threshold.*67"));
+    assertTrue(stringContainsLine(stringResult, "Max Oplog Size.*355"));
+    assertTrue(stringContainsLine(stringResult, "Queue Size.*5321"));
+    assertTrue(stringContainsLine(stringResult, "Time Interval.*2023"));
+    assertTrue(stringContainsLine(stringResult, "Write Buffer Size.*3110"));
+    assertTrue(stringContainsLine(stringResult, ".*" + diskStore1Name + ".1 .*1452637463"));
+    assertTrue(stringContainsLine(stringResult, ".*" + diskStore1Name + ".2 .*" + Integer.MAX_VALUE));
+
+    commandStringBuilder = new CommandStringBuilder(CliStrings.CREATE_DISK_STORE);
+    commandStringBuilder.addOption(CliStrings.CREATE_DISK_STORE__NAME, diskStore2Name);
+    commandStringBuilder.addOption(CliStrings.CREATE_DISK_STORE__GROUP, "Group2");
+    commandStringBuilder.addOption(CliStrings.CREATE_DISK_STORE__DIRECTORY_AND_SIZE, diskStore2Dir.getAbsolutePath());
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(3, countLinesInString(stringResult, false));
+    assertTrue(stringContainsLine(stringResult, vm2Name + ".*Success"));
+
+    // Verify that the second disk store was created correctly.
+    cmdResult = executeCommand(CliStrings.LIST_DISK_STORE);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(4, countLinesInString(stringResult, false));
+    assertTrue(stringContainsLine(stringResult, vm1Name + ".*" + diskStore1Name + " .*"));
+    assertFalse(stringContainsLine(stringResult, vm2Name + ".*" + diskStore1Name + " .*"));
+    assertFalse(stringContainsLine(stringResult, vm1Name + ".*" + diskStore2Name + " .*"));
+    assertTrue(stringContainsLine(stringResult, vm2Name + ".*" + diskStore2Name + " .*"));
+  }
+
+  public void testDestroyDiskStore() {
+    final String diskStore1Name = "testDestroyDiskStore1";
+    final String diskStore2Name = "testDestroyDiskStore2";
+    final String region1Name = "testDestroyDiskStoreRegion1";
+    final String region2Name = "testDestroyDiskStoreRegion2";
+
+    Properties localProps = new Properties();
+    localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group0");
+    createDefaultSetup(localProps);
+
+    CommandResult cmdResult = executeCommand(CliStrings.LIST_DISK_STORE);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    assertTrue(commandResultToString(cmdResult).contains("No Disk Stores Found"));
+
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final String vm1Name = "VM" + vm1.getPid();
+    final File diskStore1Dir1 = new File(new File(".").getAbsolutePath(), diskStore1Name + ".1");
+    this.filesToBeDeleted.add(diskStore1Dir1.getAbsolutePath());
+    final File diskStore2Dir1 = new File(new File(".").getAbsolutePath(), diskStore2Name + ".1");
+    this.filesToBeDeleted.add(diskStore2Dir1.getAbsolutePath());
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        diskStore1Dir1.mkdirs();
+        diskStore2Dir1.mkdirs();
+
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.NAME_NAME, vm1Name);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group1,Group2");
+        getSystem(localProps);
+        Cache cache = getCache();
+
+        DiskStoreFactory diskStoreFactory = cache.createDiskStoreFactory();
+        diskStoreFactory.setDiskDirs(new File[]{diskStore1Dir1});
+        diskStoreFactory.create(diskStore1Name);
+
+        diskStoreFactory.setDiskDirs(new File[]{diskStore2Dir1});
+        diskStoreFactory.create(diskStore2Name);
+      }
+    });
+
+    final VM vm2 = Host.getHost(0).getVM(2);
+    final String vm2Name = "VM" + vm2.getPid();
+    final File diskStore1Dir2 = new File(new File(".").getAbsolutePath(), diskStore1Name + ".2");
+    this.filesToBeDeleted.add(diskStore1Dir2.getAbsolutePath());
+    final File diskStore2Dir2 = new File(new File(".").getAbsolutePath(), diskStore2Name + ".2");
+    this.filesToBeDeleted.add(diskStore2Dir2.getAbsolutePath());
+    vm2.invoke(new SerializableRunnable() {
+      public void run() {
+        diskStore1Dir2.mkdirs();
+        diskStore2Dir2.mkdirs();
+
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.NAME_NAME, vm2Name);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group2");
+        getSystem(localProps);
+        Cache cache = getCache();
+
+        DiskStoreFactory diskStoreFactory = cache.createDiskStoreFactory();
+        diskStoreFactory.setDiskDirs(new File[]{diskStore1Dir2});
+        diskStoreFactory.create(diskStore1Name);
+
+        RegionFactory regionFactory = cache.createRegionFactory();
+        regionFactory.setDiskStoreName(diskStore1Name);
+        regionFactory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
+        regionFactory.create(region1Name);
+        regionFactory.create(region2Name);
+
+        diskStoreFactory.setDiskDirs(new File[]{diskStore2Dir2});
+        diskStoreFactory.create(diskStore2Name);
+      }
+    });
+
+    // TEST DELETING ON 1 MEMBER
+
+    CommandStringBuilder commandStringBuilder = new CommandStringBuilder(CliStrings.DESTROY_DISK_STORE);
+    commandStringBuilder.addOption(CliStrings.DESTROY_DISK_STORE__NAME, diskStore1Name);
+    commandStringBuilder.addOption(CliStrings.DESTROY_DISK_STORE__GROUP, "Group1");
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    String stringResult = commandResultToString(cmdResult);
+    assertEquals(3, countLinesInString(stringResult, false));
+    assertEquals(false, stringResult.contains("ERROR"));
+    assertTrue(stringContainsLine(stringResult, vm1Name + ".*Success"));
+
+    // Verify that the disk store was destroyed on the correct member
+    cmdResult = executeCommand(CliStrings.LIST_DISK_STORE);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(5, countLinesInString(stringResult, false));
+    assertFalse(stringContainsLine(stringResult, vm1Name + ".*" + diskStore1Name + " .*"));
+    assertTrue(stringContainsLine(stringResult, vm2Name + ".*" + diskStore1Name + " .*"));
+
+    // Verify that the disk store files were deleted from the correct directory.
+    assertEquals(0, diskStore1Dir1.listFiles().length);
+    assertEquals(4, diskStore1Dir2.listFiles().length);
+
+    // TEST DELETING ON 2 MEMBERS
+
+    commandStringBuilder = new CommandStringBuilder(CliStrings.DESTROY_DISK_STORE);
+    commandStringBuilder.addOption(CliStrings.DESTROY_DISK_STORE__NAME, diskStore2Name);
+    commandStringBuilder.addOption(CliStrings.DESTROY_DISK_STORE__GROUP, "Group2");
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(4, countLinesInString(stringResult, false));
+    assertEquals(false, stringResult.contains("ERROR"));
+    assertTrue(stringContainsLine(stringResult, vm1Name + ".*Success"));
+    assertTrue(stringContainsLine(stringResult, vm2Name + ".*Success"));
+
+    // Verify that the disk store was destroyed on the correct member
+    cmdResult = executeCommand(CliStrings.LIST_DISK_STORE);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(3, countLinesInString(stringResult, false));
+    assertFalse(stringContainsLine(stringResult, vm1Name + ".*" + diskStore2Name + " .*"));
+    assertFalse(stringContainsLine(stringResult, vm2Name + ".*" + diskStore2Name + " .*"));
+
+    // Verify that the disk store files were deleted from the correct directories.
+    assertEquals(0, diskStore2Dir1.listFiles().length);
+    assertEquals(0, diskStore2Dir2.listFiles().length);
+
+    // TEST FOR DISK STORE IN USE
+
+    commandStringBuilder = new CommandStringBuilder(CliStrings.DESTROY_DISK_STORE);
+    commandStringBuilder.addOption(CliStrings.DESTROY_DISK_STORE__NAME, diskStore1Name);
+    commandStringBuilder.addOption(CliStrings.DESTROY_DISK_STORE__GROUP, "Group2");
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(4, countLinesInString(stringResult, false));
+    assertEquals(false, stringResult.contains("ERROR"));
+    assertTrue(stringContainsLine(stringResult, vm1Name + ".*Disk store not found on this member"));
+    assertTrue(stringContainsLine(stringResult, vm2Name + ".*" + region1Name + ".*" + region2Name + ".*"));
+
+    // TEST DELETING ON ALL MEMBERS
+
+    vm2.invoke(new SerializableRunnable() {
+      public void run() {
+        Cache cache = getCache();
+
+        Region region = cache.getRegion(region1Name);
+        region.destroyRegion();
+
+        region = cache.getRegion(region2Name);
+        region.destroyRegion();
+      }
+    });
+
+    commandStringBuilder = new CommandStringBuilder(CliStrings.DESTROY_DISK_STORE);
+    commandStringBuilder.addOption(CliStrings.DESTROY_DISK_STORE__NAME, diskStore1Name);
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(5, countLinesInString(stringResult, false));
+    assertEquals(false, stringResult.contains("ERROR"));
+    assertTrue(stringContainsLine(stringResult, "Manager.*Disk store not found on this member"));
+    assertTrue(stringContainsLine(stringResult, vm1Name + ".*Disk store not found on this member"));
+    assertTrue(stringContainsLine(stringResult, vm2Name + ".*Success"));
+
+    // Verify that there are no disk stores left.
+    cmdResult = executeCommand(CliStrings.LIST_DISK_STORE);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    assertTrue(commandResultToString(cmdResult).contains("No Disk Stores Found"));
+
+    // Verify that the disk store files were deleted from the correct directory.
+    assertEquals(0, diskStore1Dir2.listFiles().length);
+  }
+
+  private Region<?, ?> createParRegWithPersistence(String regionName, String diskStoreName, String diskDirName) {
+    Cache cache = getCache();
+    File diskStoreDirFile = new File(diskDirName);
+
+    if (!diskStoreDirFile.exists()) {
+      diskStoreDirFile.mkdirs();
+    }
+
+    DiskStoreFactory diskStoreFactory = cache.createDiskStoreFactory();
+    diskStoreFactory.setDiskDirs(new File[]{diskStoreDirFile});
+    diskStoreFactory.setMaxOplogSize(1);
+    diskStoreFactory.setAllowForceCompaction(true);
+    diskStoreFactory.setAutoCompact(false);
+    diskStoreFactory.create(diskStoreName);
+
+    /****
+     * Eviction Attributes
+     */
+    EvictionAttributes ea = EvictionAttributes.createLRUEntryAttributes(1, EvictionAction.OVERFLOW_TO_DISK);
+
+    RegionFactory regionFactory = cache.createRegionFactory();
+    regionFactory.setDiskStoreName(diskStoreName);
+    regionFactory.setDiskSynchronous(true);
+    regionFactory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
+    regionFactory.setScope(Scope.DISTRIBUTED_ACK);
+    regionFactory.setEvictionAttributes(ea);
+
+    return regionFactory.create(regionName);
+  }
+
+  @Override
+  public void tearDown2() throws Exception {
+    for (String path : this.filesToBeDeleted) {
+      try {
+        FileUtil.delete(new File(path));
+      } catch (IOException e) {
+        getLogWriter().error("Unable to delete file", e);
+      }
+    }
+    this.filesToBeDeleted.clear();
+    super.tearDown2();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/FunctionCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/FunctionCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/FunctionCommandsDUnitTest.java
new file mode 100644
index 0000000..8c3dd22
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/FunctionCommandsDUnitTest.java
@@ -0,0 +1,593 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache.execute.Function;
+import com.gemstone.gemfire.cache.execute.FunctionService;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.internal.cache.functions.TestFunction;
+import com.gemstone.gemfire.management.DistributedRegionMXBean;
+import com.gemstone.gemfire.management.ManagementService;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.json.GfJsonException;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import com.gemstone.gemfire.management.internal.cli.result.TabularResultData;
+import dunit.DistributedTestCase;
+import dunit.Host;
+import dunit.SerializableCallable;
+import dunit.SerializableRunnable;
+import dunit.VM;
+
+import java.util.List;
+import java.util.Properties;
+
+/**
+ * Dunit class for testing gemfire function commands : execute function, destroy function, list function
+ *
+ * @author apande
+ * @author David Hoots
+ */
+public class FunctionCommandsDUnitTest extends CliCommandTestBase {
+  private static final long serialVersionUID = 1L;
+  private static final String REGION_NAME = "FunctionCommandsReplicatedRegion";
+  private static final String REGION_ONE = "RegionOne";
+  private static final String REGION_TWO = "RegionTwo";
+
+  public FunctionCommandsDUnitTest(String name) {
+    super(name);
+  }
+
+  void setupWith2Regions() {
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final VM vm2 = Host.getHost(0).getVM(2);
+    createDefaultSetup(null);
+
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        final Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+        FunctionService.registerFunction(function);
+        // no need to close cache as it will be closed as part of teardown2
+        Cache cache = getCache();
+
+        RegionFactory<Integer, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+        Region region = dataRegionFactory.create("RegionOne");
+        for (int i = 0; i < 10; i++) {
+          region.put("key" + (i + 200), "value" + (i + 200));
+        }
+        region = dataRegionFactory.create("RegionTwo");
+        for (int i = 0; i < 1000; i++) {
+          region.put("key" + (i + 200), "value" + (i + 200));
+        }
+      }
+    });
+
+
+    vm2.invoke(new SerializableRunnable() {
+      public void run() {
+        final Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+        FunctionService.registerFunction(function);
+        // no need to close cache as it will be closed as part of teardown2
+        Cache cache = getCache();
+
+        RegionFactory<Integer, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+        Region region = dataRegionFactory.create("RegionOne");
+        for (int i = 0; i < 10000; i++) {
+          region.put("key" + (i + 400), "value" + (i + 400));
+        }
+        region = dataRegionFactory.create("Regiontwo");
+        for (int i = 0; i < 10; i++) {
+          region.put("key" + (i + 200), "value" + (i + 200));
+        }
+      }
+    });
+  }
+
+  public void testExecuteFunctionWithNoRegionOnManager() {
+    setupWith2Regions();
+    Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+    FunctionService.registerFunction(function);
+    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
+      public void run() {
+        Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+        FunctionService.registerFunction(function);
+      }
+    });
+    try {
+      Thread.sleep(2500);
+    } catch (InterruptedException e) {
+      // TODO Auto-generated catch block
+      e.printStackTrace();
+    }
+    String command = "execute function --id=" + function.getId() + " --region=" + "/" + "RegionOne";
+    getLogWriter().info("testExecuteFunctionWithNoRegionOnManager command : " + command);
+    CommandResult cmdResult = executeCommand(command);
+    if (cmdResult != null) {
+      String strCmdResult = commandResultToString(cmdResult);
+      getLogWriter().info("testExecuteFunctionWithNoRegionOnManager stringResult : " + strCmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      assertTrue(strCmdResult.contains("Execution summary"));
+    } else {
+      fail("testExecuteFunctionWithNoRegionOnManager failed as did not get CommandResult");
+    }
+
+  }
+
+  public static String getMemberId() {
+    Cache cache = new FunctionCommandsDUnitTest("test").getCache();
+    return cache.getDistributedSystem().getDistributedMember().getId();
+  }
+
+  public void testExecuteFunctionOnRegion() {
+    createDefaultSetup(null);
+
+    final Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
+      public void run() {
+        RegionFactory<Integer, Integer> dataRegionFactory = getCache().createRegionFactory(RegionShortcut.REPLICATE);
+        Region region = dataRegionFactory.create(REGION_NAME);
+        assertNotNull(region);
+        FunctionService.registerFunction(function);
+      }
+    });
+
+    String command = "execute function --id=" + function.getId() + " --region=" + REGION_NAME;
+    getLogWriter().info("testExecuteFunctionOnRegion command=" + command);
+    CommandResult cmdResult = executeCommand(command);
+    if (cmdResult != null) {
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      getLogWriter().info("testExecuteFunctionOnRegion cmdResult=" + cmdResult);
+      String stringResult = commandResultToString(cmdResult);
+      getLogWriter().info("testExecuteFunctionOnRegion stringResult=" + stringResult);
+      assert (stringResult.contains("Execution summary"));
+    } else {
+      fail("testExecuteFunctionOnRegion did not return CommandResult");
+    }
+  }
+
+  void setupForBug51480() {
+    final VM vm1 = Host.getHost(0).getVM(1);
+    createDefaultSetup(null);
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        final Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+        FunctionService.registerFunction(function);
+        // no need to close cache as it will be closed as part of teardown2
+        Cache cache = getCache();
+
+        RegionFactory<Integer, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+        Region region = dataRegionFactory.create(REGION_ONE);
+        for (int i = 0; i < 10; i++) {
+          region.put("key" + (i + 200), "value" + (i + 200));
+        }
+      }
+    });
+  }
+
+  SerializableRunnable checkRegionMBeans = new SerializableRunnable() {
+    @Override
+    public void run() {
+      final WaitCriterion waitForMaangerMBean = new WaitCriterion() {
+        @Override
+        public boolean done() {
+          final ManagementService service = ManagementService.getManagementService(getCache());
+          final DistributedRegionMXBean bean = service.getDistributedRegionMXBean(Region.SEPARATOR + REGION_ONE);
+          if (bean == null) {
+            return false;
+          } else {
+            getLogWriter().info("Probing for checkRegionMBeans testExecuteFunctionOnRegionBug51480 finished");
+            return true;
+          }
+        }
+
+        @Override
+        public String description() {
+          return "Probing for testExecuteFunctionOnRegionBug51480";
+        }
+      };
+      DistributedTestCase.waitForCriterion(waitForMaangerMBean, 2 * 60 * 1000, 2000, true);
+      DistributedRegionMXBean bean = ManagementService.getManagementService(getCache()).getDistributedRegionMXBean(
+          Region.SEPARATOR + REGION_ONE);
+      assertNotNull(bean);
+    }
+  };
+
+  public void testExecuteFunctionOnRegionBug51480() {
+    setupForBug51480();
+
+    //check if DistributedRegionMXBean is available so that command will not fail
+    final VM manager = Host.getHost(0).getVM(0);
+    manager.invoke(checkRegionMBeans);
+
+    final Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
+      public void run() {
+        FunctionService.registerFunction(function);
+      }
+    });
+
+    String command = "execute function --id=" + function.getId() + " --region=" + REGION_ONE;
+
+    getLogWriter().info("testExecuteFunctionOnRegionBug51480 command=" + command);
+    CommandResult cmdResult = executeCommand(command);
+    if (cmdResult != null) {
+      getLogWriter().info("testExecuteFunctionOnRegionBug51480 cmdResult=" + cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      String stringResult = commandResultToString(cmdResult);
+      getLogWriter().info("testExecuteFunctionOnRegionBug51480 stringResult=" + stringResult);
+      assert (stringResult.contains("Execution summary"));
+    } else {
+      fail("testExecuteFunctionOnRegionBug51480 did not return CommandResult");
+
+    }
+  }
+
+  public void testExecuteFunctionOnMember() {
+    Properties localProps = new Properties();
+    localProps.setProperty(DistributionConfig.NAME_NAME, "Manager");
+    localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group1");
+    createDefaultSetup(localProps);
+    Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+    FunctionService.registerFunction(function);
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final String vm1MemberId = (String) vm1.invoke(FunctionCommandsDUnitTest.class, "getMemberId");
+
+    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
+      public void run() {
+        RegionFactory<Integer, Integer> dataRegionFactory = getCache().createRegionFactory(RegionShortcut.REPLICATE);
+        Region region = dataRegionFactory.create(REGION_NAME);
+        Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+        assertNotNull(region);
+        FunctionService.registerFunction(function);
+      }
+    });
+
+    String command = "execute function --id=" + function.getId() + " --member=" + vm1MemberId;
+    getLogWriter().info("testExecuteFunctionOnMember command=" + command);
+    CommandResult cmdResult = executeCommand(command);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    String stringResult = commandResultToString(cmdResult);
+    getLogWriter().info("testExecuteFunctionOnMember stringResult:" + stringResult);
+    assertTrue(stringResult.contains("Execution summary"));
+  }
+
+  public void testExecuteFunctionOnMembers() {
+    Properties localProps = new Properties();
+    localProps.setProperty(DistributionConfig.NAME_NAME, "Manager");
+    localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group1");
+    createDefaultSetup(localProps);
+    Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+    FunctionService.registerFunction(function);
+    final VM vm1 = Host.getHost(0).getVM(1);
+
+
+    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
+      public void run() {
+        RegionFactory<Integer, Integer> dataRegionFactory = getCache().createRegionFactory(RegionShortcut.REPLICATE);
+        Region region = dataRegionFactory.create(REGION_NAME);
+        Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+        assertNotNull(region);
+        FunctionService.registerFunction(function);
+      }
+    });
+    String command = "execute function --id=" + function.getId();
+    getLogWriter().info("testExecuteFunctionOnMembers command=" + command);
+    CommandResult cmdResult = executeCommand(command);
+    if (cmdResult != null) {
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      getLogWriter().info("testExecuteFunctionOnMembers cmdResult:" + cmdResult);
+      String stringResult = commandResultToString(cmdResult);
+      getLogWriter().info("testExecuteFunctionOnMembers stringResult:" + stringResult);
+      assertTrue(stringResult.contains("Execution summary"));
+    } else {
+      fail("testExecuteFunctionOnMembers did not return CommandResult");
+    }
+  }
+
+  public void testExecuteFunctionOnMembersWithArgs() {
+    Properties localProps = new Properties();
+    localProps.setProperty(DistributionConfig.NAME_NAME, "Manager");
+    localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group1");
+    createDefaultSetup(localProps);
+    Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_RETURN_ARGS);
+    FunctionService.registerFunction(function);
+
+
+    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
+      public void run() {
+        RegionFactory<Integer, Integer> dataRegionFactory = getCache().createRegionFactory(RegionShortcut.REPLICATE);
+        Region region = dataRegionFactory.create(REGION_NAME);
+        Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_RETURN_ARGS);
+        assertNotNull(region);
+        FunctionService.registerFunction(function);
+      }
+    });
+
+    String command = "execute function --id=" + function.getId() + " --arguments=arg1,arg2";
+
+    getLogWriter().info("testExecuteFunctionOnMembersWithArgs command=" + command);
+    CommandResult cmdResult = executeCommand(command);
+    if (cmdResult != null) {
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      getLogWriter().info("testExecuteFunctionOnMembersWithArgs cmdResult:" + cmdResult);
+      String stringResult = commandResultToString(cmdResult);
+      getLogWriter().info("testExecuteFunctionOnMembersWithArgs stringResult:" + stringResult);
+      assertTrue(stringResult.contains("Execution summary"));
+      assertTrue(stringResult.contains("arg1"));
+    } else {
+      fail("testExecuteFunctionOnMembersWithArgs did not return CommandResult");
+    }
+  }
+
+  public void testExecuteFunctionOnGroups() {
+    Properties localProps = new Properties();
+    localProps.setProperty(DistributionConfig.NAME_NAME, "Manager");
+    localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group0");
+    createDefaultSetup(localProps);
+    Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+    FunctionService.registerFunction(function);
+
+    VM vm1 = Host.getHost(0).getVM(1);
+    VM vm2 = Host.getHost(0).getVM(2);
+
+    String vm1id = (String) vm1.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group1");
+        getSystem(localProps);
+        Cache cache = getCache();
+        Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+        FunctionService.registerFunction(function);
+        return cache.getDistributedSystem().getDistributedMember().getId();
+      }
+    });
+
+    String vm2id = (String) vm2.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group2");
+        getSystem(localProps);
+        Cache cache = getCache();
+        Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+        FunctionService.registerFunction(function);
+        return cache.getDistributedSystem().getDistributedMember().getId();
+      }
+    });
+
+    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
+      public void run() {
+        RegionFactory<Integer, Integer> dataRegionFactory = getCache().createRegionFactory(RegionShortcut.REPLICATE);
+        Region region = dataRegionFactory.create(REGION_NAME);
+        Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+        assertNotNull(region);
+        FunctionService.registerFunction(function);
+      }
+    });
+
+    String command = "execute function --id=" + TestFunction.TEST_FUNCTION1 + " --groups=Group1,Group2";
+    getLogWriter().info("testExecuteFunctionOnGroups command=" + command);
+    CommandResult cmdResult = executeCommand(command);
+    getLogWriter().info("testExecuteFunctionOnGroups cmdResult=" + cmdResult);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    TabularResultData resultData = (TabularResultData) cmdResult.getResultData();
+    List<String> members = resultData.retrieveAllValues("Member ID/Name");
+    getLogWriter().info("testExecuteFunctionOnGroups members=" + members);
+    assertTrue(members.size() == 2 && members.contains(vm1id) && members.contains(vm2id));
+  }
+
+
+  public void testDestroyOnMember() {
+    createDefaultSetup(null);
+    Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+    FunctionService.registerFunction(function);
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final String vm1MemberId = (String) vm1.invoke(FunctionCommandsDUnitTest.class, "getMemberId");
+    String command = "destroy function --id=" + function.getId() + " --member=" + vm1MemberId;
+    getLogWriter().info("testDestroyOnMember command=" + command);
+    CommandResult cmdResult = executeCommand(command);
+    if (cmdResult != null) {
+      String strCmdResult = commandResultToString(cmdResult);
+      assertEquals(Result.Status.OK, cmdResult.getStatus());
+      getLogWriter().info("testDestroyOnMember strCmdResult=" + strCmdResult);
+      assertTrue(strCmdResult.contains("Destroyed TestFunction1 Successfully"));
+    } else {
+      fail("testDestroyOnMember failed as did not get CommandResult");
+    }
+  }
+
+  public void testDestroyOnGroups() {
+    Properties localProps = new Properties();
+    localProps.setProperty(DistributionConfig.NAME_NAME, "Manager");
+    localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group0");
+    createDefaultSetup(localProps);
+    Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+    FunctionService.registerFunction(function);
+
+    VM vm1 = Host.getHost(0).getVM(1);
+    VM vm2 = Host.getHost(0).getVM(2);
+
+    String vm1id = (String) vm1.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group1");
+        getSystem(localProps);
+        Cache cache = getCache();
+        Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+        FunctionService.registerFunction(function);
+        return cache.getDistributedSystem().getDistributedMember().getId();
+      }
+    });
+
+
+    String vm2id = (String) vm2.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group2");
+        getSystem(localProps);
+        Cache cache = getCache();
+        Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+        FunctionService.registerFunction(function);
+        return cache.getDistributedSystem().getDistributedMember().getId();
+      }
+    });
+
+    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
+      public void run() {
+        Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+        FunctionService.registerFunction(function);
+      }
+    });
+
+    String command = "destroy function --id=" + TestFunction.TEST_FUNCTION1 + " --groups=Group1,Group2";
+    getLogWriter().info("testDestroyOnGroups command=" + command);
+    CommandResult cmdResult = executeCommand(command);
+    getLogWriter().info("testDestroyOnGroups cmdResult=" + cmdResult);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    String content = null;
+    try {
+      content = cmdResult.getContent().get("message").toString();
+      getLogWriter().info("testDestroyOnGroups content = " + content);
+    } catch (GfJsonException e) {
+      fail("testDestroyOnGroups exception=" + e);
+    }
+    assertNotNull(content);
+    assertTrue(content.equals(
+        "[\"Destroyed " + TestFunction.TEST_FUNCTION1 + " Successfully on " + vm1id + "," + vm2id + "\"]") || content.equals(
+        "[\"Destroyed " + TestFunction.TEST_FUNCTION1 + " Successfully on " + vm2id + "," + vm1id + "\"]"));
+  }
+
+  public void testListFunction() {
+    // Create the default setup, putting the Manager VM into Group1
+    Properties localProps = new Properties();
+    localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group1");
+    createDefaultSetup(localProps);
+
+    // Find no functions
+    CommandResult cmdResult = executeCommand(CliStrings.LIST_FUNCTION);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    assertTrue(commandResultToString(cmdResult).contains("No Functions Found"));
+
+    // Add a function in the manager VM (VM 0)
+    final Function function1 = new TestFunction(true, TestFunction.TEST_FUNCTION1);
+    final VM managerVm = Host.getHost(0).getVM(0);
+    managerVm.invoke(new SerializableRunnable() {
+      public void run() {
+        FunctionService.registerFunction(function1);
+      }
+    });
+
+    // Add functions in another VM (VM 1)
+    final Function function2 = new TestFunction(true, TestFunction.TEST_FUNCTION2);
+    final Function function3 = new TestFunction(true, TestFunction.TEST_FUNCTION3);
+    final VM vm1 = Host.getHost(0).getVM(1);
+    final String vm1Name = "VM" + vm1.getPid();
+    vm1.invoke(new SerializableRunnable() {
+      public void run() {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.NAME_NAME, vm1Name);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group2");
+        getSystem(localProps);
+        getCache();
+
+        FunctionService.registerFunction(function2);
+        FunctionService.registerFunction(function3);
+      }
+    });
+
+    // Add functions in a third VM (VM 2)
+    final Function function4 = new TestFunction(true, TestFunction.TEST_FUNCTION4);
+    final Function function5 = new TestFunction(true, TestFunction.TEST_FUNCTION5);
+    final Function function6 = new TestFunction(true, TestFunction.TEST_FUNCTION6);
+    final VM vm2 = Host.getHost(0).getVM(2);
+    final String vm2Name = "VM" + vm2.getPid();
+    vm2.invoke(new SerializableRunnable() {
+      public void run() {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.NAME_NAME, vm2Name);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group3");
+        getSystem(localProps);
+        getCache();
+
+        FunctionService.registerFunction(function4);
+        FunctionService.registerFunction(function5);
+        FunctionService.registerFunction(function6);
+      }
+    });
+
+    // Find all functions
+    cmdResult = executeCommand(CliStrings.LIST_FUNCTION);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    String stringResult = commandResultToString(cmdResult);
+    assertEquals(8, countLinesInString(stringResult, false));
+    assertTrue(stringContainsLine(stringResult, "Member.*Function"));
+    assertTrue(stringContainsLine(stringResult, "Manager.*" + function1.getId()));
+    assertTrue(stringContainsLine(stringResult, vm1Name + ".*" + function2.getId()));
+    assertTrue(stringContainsLine(stringResult, vm1Name + ".*" + function3.getId()));
+    assertTrue(stringContainsLine(stringResult, vm2Name + ".*" + function4.getId()));
+    assertTrue(stringContainsLine(stringResult, vm2Name + ".*" + function5.getId()));
+    assertTrue(stringContainsLine(stringResult, vm2Name + ".*" + function6.getId()));
+
+    // Find functions in group Group3
+    cmdResult = executeCommand(CliStrings.LIST_FUNCTION + " --group=Group1,Group3");
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(6, countLinesInString(stringResult, false));
+    assertTrue(stringContainsLine(stringResult, "Member.*Function"));
+    assertTrue(stringContainsLine(stringResult, "Manager.*" + function1.getId()));
+    assertFalse(stringContainsLine(stringResult, vm1Name + ".*"));
+    assertTrue(stringContainsLine(stringResult, vm2Name + ".*" + function4.getId()));
+    assertTrue(stringContainsLine(stringResult, vm2Name + ".*" + function5.getId()));
+    assertTrue(stringContainsLine(stringResult, vm2Name + ".*" + function6.getId()));
+
+    // Find functions for Manager member
+    cmdResult = executeCommand(CliStrings.LIST_FUNCTION + " --member=Manager," + vm1Name);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(5, countLinesInString(stringResult, false));
+    assertTrue(stringContainsLine(stringResult, "Member.*Function"));
+    assertTrue(stringContainsLine(stringResult, "Manager.*" + function1.getId()));
+    assertTrue(stringContainsLine(stringResult, vm1Name + ".*" + function2.getId()));
+    assertTrue(stringContainsLine(stringResult, vm1Name + ".*" + function3.getId()));
+    assertFalse(stringContainsLine(stringResult, vm2Name + ".*"));
+
+    // Find functions that match a pattern
+    cmdResult = executeCommand(CliStrings.LIST_FUNCTION + " --matches=.*[135]$");
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(5, countLinesInString(stringResult, false));
+    assertTrue(stringContainsLine(stringResult, "Member.*Function"));
+    assertTrue(stringContainsLine(stringResult, "Manager.*" + function1.getId()));
+    assertFalse(stringContainsLine(stringResult, vm2Name + ".*" + function2.getId()));
+    assertTrue(stringContainsLine(stringResult, vm1Name + ".*" + function3.getId()));
+    assertFalse(stringContainsLine(stringResult, vm2Name + ".*" + function4.getId()));
+    assertTrue(stringContainsLine(stringResult, vm2Name + ".*" + function5.getId()));
+    assertFalse(stringContainsLine(stringResult, vm2Name + ".*" + function6.getId()));
+  }
+}


[18/50] [abbrv] incubator-geode git commit: GEODE-590: GMSLocatorRecoveryJUnitTest.testRecoverFromOther Reset gemfire.bind-address system property

Posted by kl...@apache.org.
GEODE-590: GMSLocatorRecoveryJUnitTest.testRecoverFromOther
Reset gemfire.bind-address system property


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/1e93c6f9
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/1e93c6f9
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/1e93c6f9

Branch: refs/heads/feature/GEODE-291
Commit: 1e93c6f952a4355da0a04319eaa800cfcbadaac6
Parents: bd43c34
Author: Jason Huynh <hu...@gmail.com>
Authored: Tue Dec 8 09:24:21 2015 -0800
Committer: Jason Huynh <hu...@gmail.com>
Committed: Tue Dec 8 09:24:21 2015 -0800

----------------------------------------------------------------------
 .../internal/membership/gms/fd/GMSHealthMonitorJUnitTest.java       | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1e93c6f9/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitorJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitorJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitorJUnitTest.java
index 86205b9..585ff17 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitorJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitorJUnitTest.java
@@ -127,6 +127,7 @@ public class GMSHealthMonitorJUnitTest {
   @After
   public void tearDown() {
     gmsHealthMonitor.stop();
+    System.getProperties().remove("gemfire.bind-address");
   }
 
   @Test


[04/50] [abbrv] incubator-geode git commit: GEODE-622: add OffHeapStorage unit test coverage

Posted by kl...@apache.org.
GEODE-622: add OffHeapStorage unit test coverage

Removed unused OFF_HEAP_TOTAL_SIZE system property.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/812d51c4
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/812d51c4
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/812d51c4

Branch: refs/heads/feature/GEODE-291
Commit: 812d51c4d71b3163e9c26cf2d3046bd8267ebe69
Parents: e414a49
Author: Darrel Schneider <ds...@pivotal.io>
Authored: Wed Dec 2 15:06:34 2015 -0800
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Mon Dec 7 11:09:30 2015 -0800

----------------------------------------------------------------------
 .../internal/offheap/OffHeapStorage.java        |  29 +--
 .../offheap/SimpleMemoryAllocatorImpl.java      |  22 +-
 .../offheap/OffHeapStorageJUnitTest.java        | 202 +++++++++++++++++++
 3 files changed, 219 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/812d51c4/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapStorage.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapStorage.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapStorage.java
index ef584f1..82cbfeb 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapStorage.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapStorage.java
@@ -147,6 +147,7 @@ public class OffHeapStorage implements OffHeapMemoryStats {
         result = MAX_SLAB_SIZE;
       }
     }
+    assert result > 0 && result <= MAX_SLAB_SIZE && result <= offHeapMemorySize;
     return result;
   }
   
@@ -175,14 +176,6 @@ public class OffHeapStorage implements OffHeapMemoryStats {
    * @return MemoryAllocator for off-heap storage
    */
   public static MemoryAllocator createOffHeapStorage(LogWriter lw, StatisticsFactory sf, long offHeapMemorySize, DistributedSystem system) {
-    // TODO: delete this block of code after tests are changed to use new config
-    if (offHeapMemorySize == 0 && !Boolean.getBoolean(InternalLocator.FORCE_LOCATOR_DM_TYPE)) {
-      String offHeapConfig = System.getProperty("gemfire.OFF_HEAP_TOTAL_SIZE");
-      if (offHeapConfig != null && !offHeapConfig.equals("")) {
-        offHeapMemorySize = parseLongWithUnits(offHeapConfig, 0L, 1024*1024);
-      }
-    }
-    
     MemoryAllocator result;
     if (offHeapMemorySize == 0 || Boolean.getBoolean(InternalLocator.FORCE_LOCATOR_DM_TYPE)) {
       // Checking the FORCE_LOCATOR_DM_TYPE is a quick hack to keep our locator from allocating off heap memory.
@@ -199,15 +192,6 @@ public class OffHeapStorage implements OffHeapMemoryStats {
       
       // determine off-heap and slab sizes
       final long maxSlabSize = calcMaxSlabSize(offHeapMemorySize);
-      assert maxSlabSize > 0;
-      
-      // validate sizes
-      if (maxSlabSize > MAX_SLAB_SIZE) {
-        throw new IllegalArgumentException("gemfire.OFF_HEAP_SLAB_SIZE of value " + offHeapMemorySize + " exceeds maximum value of " + MAX_SLAB_SIZE);
-      }
-      if (maxSlabSize > offHeapMemorySize) {
-        throw new IllegalArgumentException("The off heap slab size (which is " + maxSlabSize + "; set it with gemfire.OFF_HEAP_SLAB_SIZE) must be less than or equal to the total size (which is " + offHeapMemorySize + "; set it with gemfire.OFF_HEAP_SLAB_SIZE).");
-      }
       
       final int slabCount = calcSlabCount(maxSlabSize, offHeapMemorySize);
 
@@ -222,9 +206,10 @@ public class OffHeapStorage implements OffHeapMemoryStats {
   }
   
   private static final long MAX_SLAB_SIZE = Integer.MAX_VALUE;
-  private static final long MIN_SLAB_SIZE = 1024;
+  static final long MIN_SLAB_SIZE = 1024;
 
-  private static int calcSlabCount(long maxSlabSize, long offHeapMemorySize) {
+  // non-private for unit test access
+  static int calcSlabCount(long maxSlabSize, long offHeapMemorySize) {
     long result = offHeapMemorySize / maxSlabSize;
     if ((offHeapMemorySize % maxSlabSize) >= MIN_SLAB_SIZE) {
       result++;
@@ -430,13 +415,13 @@ public class OffHeapStorage implements OffHeapMemoryStats {
         if (this.ids == null) {
           return;
         }
-        final InternalDistributedSystem dsToDisconnect = this.ids;
-        this.ids = null; // set null to prevent memory leak after closure!
-        
         if (stayConnectedOnOutOfOffHeapMemory) {
           return;
         }
         
+        final InternalDistributedSystem dsToDisconnect = this.ids;
+        this.ids = null; // set null to prevent memory leak after closure!
+        
         if (dsToDisconnect.getDistributionManager().getRootCause() == null) {
           dsToDisconnect.getDistributionManager().setRootCause(cause);
         }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/812d51c4/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java
index dfd05c6..d053797 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java
@@ -25,12 +25,13 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
+
 import org.apache.logging.log4j.Logger;
 
 import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.cache.CacheClosedException;
 import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionService;
 import com.gemstone.gemfire.internal.cache.BucketRegion;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
@@ -100,7 +101,6 @@ public final class SimpleMemoryAllocatorImpl implements MemoryAllocator, MemoryI
   private volatile MemoryUsageListener[] memoryUsageListeners = new MemoryUsageListener[0];
   
   private static SimpleMemoryAllocatorImpl singleton = null;
-  private static final AtomicReference<Thread> asyncCleanupThread = new AtomicReference<>();
   final ChunkFactory chunkFactory;
   
   public static SimpleMemoryAllocatorImpl getAllocator() {
@@ -297,8 +297,8 @@ public final class SimpleMemoryAllocatorImpl implements MemoryAllocator, MemoryI
   public List<Chunk> getLostChunks() {
     List<Chunk> liveChunks = this.freeList.getLiveChunks();
     List<Chunk> regionChunks = getRegionLiveChunks();
-    Set liveChunksSet = new HashSet(liveChunks);
-    Set regionChunksSet = new HashSet(regionChunks);
+    Set<Chunk> liveChunksSet = new HashSet<>(liveChunks);
+    Set<Chunk> regionChunksSet = new HashSet<>(regionChunks);
     liveChunksSet.removeAll(regionChunksSet);
     return new ArrayList<Chunk>(liveChunksSet);
   }
@@ -308,23 +308,22 @@ public final class SimpleMemoryAllocatorImpl implements MemoryAllocator, MemoryI
    */
   private List<Chunk> getRegionLiveChunks() {
     ArrayList<Chunk> result = new ArrayList<Chunk>();
-    GemFireCacheImpl gfc = GemFireCacheImpl.getInstance();
+    RegionService gfc = GemFireCacheImpl.getInstance();
     if (gfc != null) {
-      Iterator rootIt = gfc.rootRegions().iterator();
+      Iterator<Region<?,?>> rootIt = gfc.rootRegions().iterator();
       while (rootIt.hasNext()) {
-        Region rr = (Region) rootIt.next();
+        Region<?,?> rr = rootIt.next();
         getRegionLiveChunks(rr, result);
-        Iterator srIt = rr.subregions(true).iterator();
+        Iterator<Region<?,?>> srIt = rr.subregions(true).iterator();
         while (srIt.hasNext()) {
-          Region sr = (Region)srIt.next();
-          getRegionLiveChunks(sr, result);
+          getRegionLiveChunks(srIt.next(), result);
         }
       }
     }
     return result;
   }
 
-  private void getRegionLiveChunks(Region r, List<Chunk> result) {
+  private void getRegionLiveChunks(Region<?,?> r, List<Chunk> result) {
     if (r.getAttributes().getOffHeap()) {
 
       if (r instanceof PartitionedRegion) {
@@ -375,7 +374,6 @@ public final class SimpleMemoryAllocatorImpl implements MemoryAllocator, MemoryI
     return result;
   }
   
-  @SuppressWarnings("unused")
   public static void debugLog(String msg, boolean logStack) {
     if (logStack) {
       logger.info(msg, new RuntimeException(msg));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/812d51c4/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OffHeapStorageJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OffHeapStorageJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OffHeapStorageJUnitTest.java
index 8b61ab0..de21487 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OffHeapStorageJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OffHeapStorageJUnitTest.java
@@ -17,14 +17,29 @@
 package com.gemstone.gemfire.internal.offheap;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.File;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import com.gemstone.gemfire.OutOfOffHeapMemoryException;
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.distributed.DistributedSystem;
+import com.gemstone.gemfire.distributed.internal.DistributionStats;
+import com.gemstone.gemfire.distributed.internal.InternalLocator;
 import com.gemstone.gemfire.test.junit.categories.UnitTest;
+import com.jayway.awaitility.Awaitility;
 
 @Category(UnitTest.class)
 public class OffHeapStorageJUnitTest {
@@ -76,4 +91,191 @@ public class OffHeapStorageJUnitTest {
     assertEquals(GIGABYTE, OffHeapStorage.parseOffHeapMemorySize("1g"));
     assertEquals(Integer.MAX_VALUE * GIGABYTE, OffHeapStorage.parseOffHeapMemorySize("" + Integer.MAX_VALUE + "g"));
   }
+  @Test
+  public void testCalcMaxSlabSize() {
+    assertEquals(100, OffHeapStorage.calcMaxSlabSize(100L));
+    assertEquals(Integer.MAX_VALUE, OffHeapStorage.calcMaxSlabSize(Long.MAX_VALUE));
+    try {
+      System.setProperty("gemfire.OFF_HEAP_SLAB_SIZE", "99");
+      assertEquals(99*1024*1024, OffHeapStorage.calcMaxSlabSize(100L*1024*1024));
+      System.setProperty("gemfire.OFF_HEAP_SLAB_SIZE", "88m");
+      assertEquals(88*1024*1024, OffHeapStorage.calcMaxSlabSize(100L*1024*1024));
+      System.setProperty("gemfire.OFF_HEAP_SLAB_SIZE", "77M");
+      assertEquals(77*1024*1024, OffHeapStorage.calcMaxSlabSize(100L*1024*1024));
+      System.setProperty("gemfire.OFF_HEAP_SLAB_SIZE", "1g");
+      assertEquals(1*1024*1024*1024, OffHeapStorage.calcMaxSlabSize(2L*1024*1024*1024));
+      System.setProperty("gemfire.OFF_HEAP_SLAB_SIZE", "1G");
+      assertEquals(1L*1024*1024*1024, OffHeapStorage.calcMaxSlabSize(2L*1024*1024*1024+1));
+      System.setProperty("gemfire.OFF_HEAP_SLAB_SIZE", "foobarG");
+      try {
+        OffHeapStorage.calcMaxSlabSize(100);
+        fail("expected IllegalArgumentException");
+      } catch (IllegalArgumentException expected) {
+      }
+      System.setProperty("gemfire.OFF_HEAP_SLAB_SIZE", "");
+      assertEquals(100, OffHeapStorage.calcMaxSlabSize(100L));
+      assertEquals(Integer.MAX_VALUE, OffHeapStorage.calcMaxSlabSize(Long.MAX_VALUE));
+    } finally {
+      System.clearProperty("gemfire.OFF_HEAP_SLAB_SIZE");
+    }
+  }
+  @Test
+  public void testCreateOffHeapStorage() {
+    System.setProperty(InternalLocator.FORCE_LOCATOR_DM_TYPE, "true");
+    try {
+      assertEquals(null, OffHeapStorage.createOffHeapStorage(null, null, 0, null));
+    } finally {
+      System.clearProperty(InternalLocator.FORCE_LOCATOR_DM_TYPE);
+    }
+    // TODO: mock the StatiticsFactory and InternalDistributedSystem that createOffHeapStorage require
+    Cache c = new CacheFactory().set("mcast-port", "0").create();
+    try {
+      try {
+        OffHeapStorage.createOffHeapStorage(null, c.getDistributedSystem(), OffHeapStorage.MIN_SLAB_SIZE-1, c.getDistributedSystem());
+      } catch (IllegalArgumentException expected) {
+        expected.getMessage().equals("The amount of off heap memory must be at least " + OffHeapStorage.MIN_SLAB_SIZE + " but it was set to " + (OffHeapStorage.MIN_SLAB_SIZE-1));
+      }
+      try {
+        OffHeapStorage.createOffHeapStorage(null, c.getDistributedSystem(), OffHeapStorage.MIN_SLAB_SIZE, null);
+      } catch (IllegalArgumentException expected) {
+        expected.getMessage().equals("InternalDistributedSystem is null");
+      }
+      MemoryAllocator ma = OffHeapStorage.createOffHeapStorage(null, c.getDistributedSystem(), 1024*1024, c.getDistributedSystem());
+      try {
+        OffHeapMemoryStats stats = ma.getStats();
+        assertEquals(1024*1024, stats.getFreeMemory());
+        assertEquals(1024*1024, stats.getMaxMemory());
+        assertEquals(0, stats.getUsedMemory());
+        assertEquals(0, stats.getCompactions());
+        assertEquals(0, stats.getCompactionTime());
+        assertEquals(0, stats.getFragmentation());
+        assertEquals(1, stats.getFragments());
+        assertEquals(1024*1024, stats.getLargestFragment());
+        assertEquals(0, stats.getObjects());
+        assertEquals(0, stats.getReads());
+
+        stats.incFreeMemory(100);
+        assertEquals(1024*1024+100, stats.getFreeMemory());
+        stats.incFreeMemory(-100);
+        assertEquals(1024*1024, stats.getFreeMemory());
+
+        stats.incMaxMemory(100);
+        assertEquals(1024*1024+100, stats.getMaxMemory());
+        stats.incMaxMemory(-100);
+        assertEquals(1024*1024, stats.getMaxMemory());
+
+        stats.incUsedMemory(100);
+        assertEquals(100, stats.getUsedMemory());
+        stats.incUsedMemory(-100);
+        assertEquals(0, stats.getUsedMemory());
+
+        stats.incObjects(100);
+        assertEquals(100, stats.getObjects());
+        stats.incObjects(-100);
+        assertEquals(0, stats.getObjects());
+
+        stats.incReads();
+        assertEquals(1, stats.getReads());
+
+        stats.setFragmentation(100);
+        assertEquals(100, stats.getFragmentation());
+        stats.setFragmentation(0);
+        assertEquals(0, stats.getFragmentation());
+
+        stats.setFragments(2);
+        assertEquals(2, stats.getFragments());
+        stats.setFragments(1);
+        assertEquals(1, stats.getFragments());
+
+        stats.setLargestFragment(100);
+        assertEquals(100, stats.getLargestFragment());
+        stats.setLargestFragment(1024*1024);
+        assertEquals(1024*1024, stats.getLargestFragment());
+
+        boolean originalEnableClockStats = DistributionStats.enableClockStats;
+        DistributionStats.enableClockStats = true;
+        try {
+          long start = stats.startCompaction();
+          while (stats.startCompaction() == start) {
+            Thread.yield();
+          }
+          stats.endCompaction(start);
+          assertEquals(1, stats.getCompactions());
+          assertTrue(stats.getCompactionTime() > 0);
+        } finally {
+          DistributionStats.enableClockStats = originalEnableClockStats;
+        }
+
+        stats.incObjects(100);
+        stats.incUsedMemory(100);
+        stats.setFragmentation(100);
+        OffHeapStorage ohs = (OffHeapStorage) stats;
+        ohs.initialize(new NullOffHeapMemoryStats());
+        assertEquals(0, stats.getFreeMemory());
+        assertEquals(0, stats.getMaxMemory());
+        assertEquals(0, stats.getUsedMemory());
+        assertEquals(0, stats.getCompactions());
+        assertEquals(0, stats.getCompactionTime());
+        assertEquals(0, stats.getFragmentation());
+        assertEquals(0, stats.getFragments());
+        assertEquals(0, stats.getLargestFragment());
+        assertEquals(0, stats.getObjects());
+        assertEquals(0, stats.getReads());
+        System.setProperty(OffHeapStorage.STAY_CONNECTED_ON_OUTOFOFFHEAPMEMORY_PROPERTY, "true");
+        try {
+          try {
+            ma.allocate(1024*1024+1, null);
+            fail("expected OutOfOffHeapMemoryException");
+          } catch (OutOfOffHeapMemoryException expected) {
+          }
+          assertTrue(c.getDistributedSystem().isConnected());
+          try {
+            ma.allocate(1024*1024+1, null);
+            fail("expected OutOfOffHeapMemoryException");
+          } catch (OutOfOffHeapMemoryException expected) {
+          }
+          assertTrue(c.getDistributedSystem().isConnected());
+        } finally {
+          System.clearProperty(OffHeapStorage.STAY_CONNECTED_ON_OUTOFOFFHEAPMEMORY_PROPERTY);
+        }
+        try {
+          ma.allocate(1024*1024+1, null);
+          fail("expected OutOfOffHeapMemoryException");
+        } catch (OutOfOffHeapMemoryException expected) {
+        }
+        try {
+          ma.allocate(1024*1024+1, null);
+          fail("expected OutOfOffHeapMemoryException");
+        } catch (OutOfOffHeapMemoryException expected) {
+        }
+        Awaitility.await().atMost(5, TimeUnit.SECONDS).until(() -> {
+          return !c.getDistributedSystem().isConnected();
+        });
+
+      } finally {
+        System.setProperty(SimpleMemoryAllocatorImpl.FREE_OFF_HEAP_MEMORY_PROPERTY, "true");
+        try {
+          ma.close();
+        } finally {
+          System.clearProperty(SimpleMemoryAllocatorImpl.FREE_OFF_HEAP_MEMORY_PROPERTY);
+        }
+      }
+   } finally {
+      c.close();
+    }
+  }
+  @Test
+  public void testCalcSlabCount() {
+    final long MSS = OffHeapStorage.MIN_SLAB_SIZE;
+    assertEquals(100, OffHeapStorage.calcSlabCount(MSS*4, MSS*4*100));
+    assertEquals(100, OffHeapStorage.calcSlabCount(MSS*4, (MSS*4*100) + (MSS-1)));
+    assertEquals(101, OffHeapStorage.calcSlabCount(MSS*4, (MSS*4*100) + MSS));
+    assertEquals(Integer.MAX_VALUE, OffHeapStorage.calcSlabCount(MSS, MSS * Integer.MAX_VALUE));
+    assertEquals(Integer.MAX_VALUE, OffHeapStorage.calcSlabCount(MSS, (MSS * Integer.MAX_VALUE) + MSS-1));
+    try {
+      OffHeapStorage.calcSlabCount(MSS, (((long)MSS) * Integer.MAX_VALUE) + MSS);
+      fail("Expected IllegalArgumentException");
+    } catch (IllegalArgumentException expected) {
+    }
+  }
 }


[05/50] [abbrv] incubator-geode git commit: "new" unit tests. These were unnecessarily dependent on some classes remaining in the closed-source GemFire repo.

Posted by kl...@apache.org.
"new" unit tests.  These were unnecessarily dependent on some classes remaining
in the closed-source GemFire repo.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/8f9b3216
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/8f9b3216
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/8f9b3216

Branch: refs/heads/feature/GEODE-291
Commit: 8f9b32161815fe5c007616adb4a3db5df78cd3b4
Parents: 812d51c
Author: Bruce Schuchardt <bs...@pivotal.io>
Authored: Mon Dec 7 13:06:50 2015 -0800
Committer: Bruce Schuchardt <bs...@pivotal.io>
Committed: Mon Dec 7 13:10:41 2015 -0800

----------------------------------------------------------------------
 .../InstantiatorPropagationDUnitTest.java       | 1761 ++++++++++++++++++
 1 file changed, 1761 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8f9b3216/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InstantiatorPropagationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InstantiatorPropagationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InstantiatorPropagationDUnitTest.java
new file mode 100644
index 0000000..063112a
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InstantiatorPropagationDUnitTest.java
@@ -0,0 +1,1761 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.internal.cache.tier.sockets;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Properties;
+import java.util.Random;
+
+import com.gemstone.gemfire.DataSerializable;
+import com.gemstone.gemfire.Instantiator;
+import com.gemstone.gemfire.cache.AttributesFactory;
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheException;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.MirrorType;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionAttributes;
+import com.gemstone.gemfire.cache.Scope;
+import com.gemstone.gemfire.cache.client.Pool;
+import com.gemstone.gemfire.cache.client.PoolManager;
+import com.gemstone.gemfire.cache.client.internal.PoolImpl;
+import com.gemstone.gemfire.cache.server.CacheServer;
+import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
+import com.gemstone.gemfire.distributed.DistributedSystem;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.internal.AvailablePort;
+import com.gemstone.gemfire.internal.InternalInstantiator;
+import com.gemstone.gemfire.internal.cache.CacheServerImpl;
+import com.gemstone.gemfire.internal.cache.ClientServerObserverAdapter;
+import com.gemstone.gemfire.internal.cache.ClientServerObserverHolder;
+import com.gemstone.gemfire.internal.cache.EventID;
+
+import dunit.DistributedTestCase;
+import dunit.Host;
+import dunit.VM;
+
+public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
+  private static Cache cache = null;
+
+  private static VM client1 = null;
+
+  private static VM client2 = null;
+
+  private static VM server1 = null;
+
+  private static VM server2 = null;
+
+  private static int PORT1 = -1;
+
+  private static int PORT2 = -1;
+
+  private static int instanceCountWithAllPuts = 3;
+
+  private static int instanceCountWithOnePut = 1;
+
+  private static final String REGION_NAME = "ClientServerInstantiatorRegistrationDUnitTest";
+  
+  protected static EventID eventId;
+
+  static boolean testEventIDResult = false;
+
+  public static boolean testObject20Loaded = false;
+
+
+
+
+  public InstantiatorPropagationDUnitTest(String name) {
+    super(name);
+    // TODO Auto-generated constructor stub
+  }
+
+  public void setUp() throws Exception {
+    super.setUp();
+    final Host host = Host.getHost(0);
+    client1 = host.getVM(0);
+    client2 = host.getVM(1);
+    server1 = host.getVM(2);
+    server2 = host.getVM(3);
+  }
+
+  private void createCache(Properties props) throws Exception {
+    DistributedSystem ds = getSystem(props);
+    ds.disconnect();
+    ds = getSystem(props);
+    assertNotNull(ds);
+    cache = CacheFactory.create(ds);
+    assertNotNull(cache);
+  }
+
+  public static void createClientCache(String host, Integer port1)
+      throws Exception {
+    Properties props = new Properties();
+    props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    props.setProperty(DistributionConfig.LOCATORS_NAME, "");
+    new InstantiatorPropagationDUnitTest("temp").createCache(props);
+    Pool p = PoolManager.createFactory().addServer(host, port1.intValue())
+        .setMinConnections(1).setSubscriptionEnabled(true).setPingInterval(200)
+        .create("ClientServerInstantiatorRegistrationDUnitTestPool");
+    AttributesFactory factory = new AttributesFactory();
+    factory.setScope(Scope.DISTRIBUTED_ACK);
+    factory.setPoolName(p.getName());
+    Region r = cache.createRegion(REGION_NAME, factory.create());
+    r.registerInterest("ALL_KEYS");
+  }
+
+  protected int getMaxThreads() {
+    return 0;
+  }
+
+  private int initServerCache(VM server) {
+    Object[] args = new Object[] { new Integer(getMaxThreads()) };
+    return ((Integer)server.invoke(InstantiatorPropagationDUnitTest.class,
+        "createServerCache", args)).intValue();
+  }
+
+  public static Integer createServerCache(Integer maxThreads) throws Exception {
+    new InstantiatorPropagationDUnitTest("temp").createCache(new Properties());
+    AttributesFactory factory = new AttributesFactory();
+    factory.setScope(Scope.DISTRIBUTED_ACK);
+    factory.setMirrorType(MirrorType.KEYS_VALUES);
+    RegionAttributes attrs = factory.create();
+    cache.createRegion(REGION_NAME, attrs);
+    int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
+    CacheServer server1 = cache.addCacheServer();
+    server1.setPort(port);
+    server1.setMaxThreads(maxThreads.intValue());
+    server1.start();
+    return new Integer(port);
+  }
+
+  @Override
+  public void tearDown2() throws Exception {
+    super.tearDown2();
+    // close the clients first
+    closeCache();
+    client1.invoke(InstantiatorPropagationDUnitTest.class, "closeCache");
+    client2.invoke(InstantiatorPropagationDUnitTest.class, "closeCache");
+
+    server1.invoke(InstantiatorPropagationDUnitTest.class, "closeCache");
+    server1.invoke(InstantiatorPropagationDUnitTest.class, "closeCache");
+  }
+
+  public static void closeCache() {
+    if (cache != null && !cache.isClosed()) {
+      cache.close();
+      cache.getDistributedSystem().disconnect();
+    }
+  }
+  
+  public static void unregisterInstantiatorsInAllVMs() {
+    invokeInEveryVM(DistributedTestCase.class, "unregisterInstantiatorsInThisVM");
+  }
+
+  public static void verifyInstantiators(final int numOfInstantiators) {
+    WaitCriterion wc = new WaitCriterion() {
+      String excuse;
+
+      public boolean done() {
+        return InternalInstantiator.getInstantiators().length == numOfInstantiators;
+      }
+
+      public String description() {
+        return "expected " + numOfInstantiators + " but got this "
+            + InternalInstantiator.getInstantiators().length
+          + " instantiators=" + java.util.Arrays.toString(InternalInstantiator.getInstantiators());
+      }
+    };
+    DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+  }
+
+  public static void registerTestObject1() throws Exception {
+
+    try {
+      Class cls = Class
+          .forName("com.gemstone.gemfire.internal.cache.tier.sockets.TestObject1");
+      ConfigurableObject obj = (ConfigurableObject)cls.newInstance();
+      obj.init(0);
+    }
+    catch (Exception e) {
+      fail("Test failed due to exception in TestObject1", e);
+    }
+  }
+
+  public static void registerTestObject2() throws Exception {
+    try {
+      Class cls = Class
+          .forName("com.gemstone.gemfire.internal.cache.tier.sockets.TestObject2");
+      ConfigurableObject obj = (ConfigurableObject)cls.newInstance();
+      obj.init(0);
+    }
+    catch (Exception e) {
+      fail("Test failed due to exception in TestObject2", e);
+    }
+  }
+
+  public static void registerTestObject3() throws Exception {
+    try {
+      Class cls = Class
+          .forName("com.gemstone.gemfire.internal.cache.tier.sockets.TestObject3");
+      ConfigurableObject obj = (ConfigurableObject)cls.newInstance();
+      obj.init(0);
+    }
+    catch (Exception e) {
+      fail("Test failed due to exception in TestObject3", e);
+    }
+  }
+
+  public static void registerTestObject4() throws Exception {
+    try {
+      Class cls = Class
+          .forName("com.gemstone.gemfire.internal.cache.tier.sockets.TestObject4");
+      ConfigurableObject obj = (ConfigurableObject)cls.newInstance();
+      obj.init(0);
+    }
+    catch (Exception e) {
+      fail("Test failed due to exception in TestObject4", e);
+    }
+  }
+
+  public static void registerTestObject5() throws Exception {
+    try {
+      Class cls = Class
+          .forName("com.gemstone.gemfire.internal.cache.tier.sockets.TestObject5");
+      ConfigurableObject obj = (ConfigurableObject)cls.newInstance();
+      obj.init(0);
+    }
+    catch (Exception e) {
+      fail("Test failed due to exception in TestObject5", e);
+    }
+  }
+
+  public static void registerTestObject6() throws Exception {
+    try {
+      Class cls = Class
+          .forName("com.gemstone.gemfire.internal.cache.tier.sockets.TestObject6");
+      ConfigurableObject obj = (ConfigurableObject)cls.newInstance();
+      obj.init(0);
+    }
+    catch (Exception e) {
+      fail("Test failed due to exception in TestObject6", e);
+    }
+  }
+
+  public static void registerTestObject7() throws Exception {
+    try {
+      Class cls = Class
+          .forName("com.gemstone.gemfire.internal.cache.tier.sockets.TestObject7");
+      ConfigurableObject obj = (ConfigurableObject)cls.newInstance();
+      obj.init(0);
+    }
+    catch (Exception e) {
+      fail("Test failed due to exception in TestObject7", e);
+    }
+  }
+
+  public static void registerTestObject8() throws Exception {
+    try {
+      Class cls = Class
+          .forName("com.gemstone.gemfire.internal.cache.tier.sockets.TestObject8");
+      ConfigurableObject obj = (ConfigurableObject)cls.newInstance();
+      obj.init(0);
+    }
+    catch (Exception e) {
+      fail("Test failed due to exception in TestObject8", e);
+    }
+  }
+
+  public static void registerTestObject9() throws Exception {
+    try {
+      Class cls = Class
+          .forName("com.gemstone.gemfire.internal.cache.tier.sockets.TestObject9");
+      ConfigurableObject obj = (ConfigurableObject)cls.newInstance();
+      obj.init(0);
+    }
+    catch (Exception e) {
+      fail("Test failed due to exception in TestObject9", e);
+    }
+  }
+
+  public static void registerTestObject10() throws Exception {
+    try {
+      Class cls = Class
+          .forName("com.gemstone.gemfire.internal.cache.tier.sockets.TestObject10");
+      ConfigurableObject obj = (ConfigurableObject)cls.newInstance();
+      obj.init(0);
+    }
+    catch (Exception e) {
+      fail("Test failed due to exception in TestObject10", e);
+    }
+  }
+
+  public static void registerTestObject11() throws Exception {
+    try {
+      Class cls = Class
+          .forName("com.gemstone.gemfire.internal.cache.tier.sockets.TestObject11");
+      ConfigurableObject obj = (ConfigurableObject)cls.newInstance();
+      obj.init(0);
+    }
+    catch (Exception e) {
+      fail("Test failed due to exception in TestObject11", e);
+    }
+  }
+
+  public static void registerTestObject12() throws Exception {
+    try {
+      Class cls = Class
+          .forName("com.gemstone.gemfire.internal.cache.tier.sockets.TestObject12");
+      ConfigurableObject obj = (ConfigurableObject)cls.newInstance();
+      obj.init(0);
+    }
+    catch (Exception e) {
+      fail("Test failed due to exception in TestObject11", e);
+    }
+  }
+
+  public static void registerTestObject13() throws Exception {
+    try {
+      Class cls = Class
+          .forName("com.gemstone.gemfire.internal.cache.tier.sockets.TestObject13");
+      ConfigurableObject obj = (ConfigurableObject)cls.newInstance();
+      obj.init(0);
+    }
+    catch (Exception e) {
+      fail("Test failed due to exception in TestObject13", e);
+    }
+  }
+
+  public static void registerTestObject14() throws Exception {
+    try {
+      Class cls = Class
+          .forName("com.gemstone.gemfire.internal.cache.tier.sockets.TestObject14");
+      ConfigurableObject obj = (ConfigurableObject)cls.newInstance();
+      obj.init(0);
+    }
+    catch (Exception e) {
+      fail("Test failed due to exception in TestObject14", e);
+    }
+  }
+
+  public static void registerTestObject15() throws Exception {
+    try {
+      Class cls = Class
+          .forName("com.gemstone.gemfire.internal.cache.tier.sockets.TestObject15");
+      ConfigurableObject obj = (ConfigurableObject)cls.newInstance();
+      obj.init(0);
+    }
+    catch (Exception e) {
+      fail("Test failed due to exception in TestObject15", e);
+    }
+  }
+
+  public static void registerTestObject16() throws Exception {
+    try {
+      Class cls = Class
+          .forName("com.gemstone.gemfire.internal.cache.tier.sockets.TestObject16");
+      ConfigurableObject obj = (ConfigurableObject)cls.newInstance();
+      obj.init(0);
+    }
+    catch (Exception e) {
+      fail("Test failed due to exception in TestObject16", e);
+    }
+  }
+
+  public static void registerTestObject17() throws Exception {
+    try {
+      Class cls = Class
+          .forName("com.gemstone.gemfire.internal.cache.tier.sockets.TestObject17");
+      ConfigurableObject obj = (ConfigurableObject)cls.newInstance();
+      obj.init(0);
+    }
+    catch (Exception e) {
+      fail("Test failed due to exception in TestObject17", e);
+    }
+  }
+
+  public static void registerTestObject18() throws Exception {
+    try {
+      Class cls = Class
+          .forName("com.gemstone.gemfire.internal.cache.tier.sockets.TestObject18");
+      ConfigurableObject obj = (ConfigurableObject)cls.newInstance();
+      obj.init(0);
+    }
+    catch (Exception e) {
+      fail("Test failed due to exception in TestObject18", e);
+    }
+  }
+  
+  public static void registerTestObject19() throws Exception {
+    try {
+      Class cls = Class
+          .forName("com.gemstone.gemfire.internal.cache.tier.sockets.TestObject19");
+      ConfigurableObject obj = (ConfigurableObject)cls.newInstance();
+      obj.init(0);
+    }
+    catch (Exception e) {
+      fail("Test failed due to exception in TestObject19", e);
+    }
+  }
+
+  public static void registerTestObject20() throws Exception {
+    try {
+      Class cls = Class
+          .forName("com.gemstone.gemfire.internal.cache.tier.sockets.TestObject20");
+      ConfigurableObject obj = (ConfigurableObject)cls.newInstance();
+      obj.init(0);
+    }
+    catch (Exception e) {
+      fail("Test failed due to exception in TestObject20", e);
+    }
+  }
+
+  public static void stopServer() {
+    try {
+      assertEquals("Expected exactly one BridgeServer", 1, cache
+          .getCacheServers().size());
+      CacheServerImpl bs = (CacheServerImpl)cache.getCacheServers()
+          .iterator().next();
+      assertNotNull(bs);
+      bs.stop();
+    }
+    catch (Exception ex) {
+      fail("while setting stopServer  " + ex);
+    }
+  }
+
+  public static void startServer() {
+    try {
+      Cache c = CacheFactory.getAnyInstance();
+      assertEquals("Expected exactly one BridgeServer", 1, c.getCacheServers()
+          .size());
+      CacheServerImpl bs = (CacheServerImpl)c.getCacheServers().iterator()
+          .next();
+      assertNotNull(bs);
+      bs.start();
+    }
+    catch (Exception ex) {
+      fail("while startServer()  " + ex);
+    }
+  }
+
+  /**
+   * In this test the server is up first.2 Instantiators are registered on it.
+   * Verified if the 2 instantiators get propagated to client when client gets
+   * connected.
+   */
+  public void testServerUpFirstClientLater() throws Exception {
+    PORT1 = initServerCache(server1);
+
+    unregisterInstantiatorsInAllVMs();
+    
+    pause(3000);
+
+    server1.invoke(InstantiatorPropagationDUnitTest.class,
+        "registerTestObject1");
+    server1.invoke(InstantiatorPropagationDUnitTest.class,
+        "registerTestObject2");
+
+    server1.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(2) });
+
+    client1
+        .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
+            new Object[] { getServerHostName(server1.getHost()),
+                new Integer(PORT1) });
+
+    // // wait for client2 to come online
+    pause(3000);
+    //
+    client1.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(2) });
+    //
+    // // Put some entries from the client
+    client1.invoke(new CacheSerializableRunnable("Put entries from client") {
+      public void run2() throws CacheException {
+        Region region = cache.getRegion(REGION_NAME);
+        for (int i = 1; i <= 10; i++) {
+          region.put(i, i);
+        }
+      }
+    });
+
+    // Run getAll
+    client1
+        .invoke(new CacheSerializableRunnable("Get all entries from server") {
+          public void run2() throws CacheException {
+            // Invoke getAll
+            Region region = cache.getRegion(REGION_NAME);
+            // Verify result size is correct
+            assertEquals(1, region.get(1));
+          }
+        });
+
+    server1.invoke(new CacheSerializableRunnable("Put entry from client") {
+      public void run2() throws CacheException {
+        Region region = cache.getRegion(REGION_NAME);
+        region.put(1, 20);
+      }
+    });
+    //
+    pause(3000);
+    // Run getAll
+    client1.invoke(new CacheSerializableRunnable("Get entry from client") {
+      public void run2() throws CacheException {
+        // Invoke getAll
+        Region region = cache.getRegion(REGION_NAME);
+        // Verify result size is correct
+        assertEquals(20, region.get(1));
+      }
+    });
+
+    unregisterInstantiatorsInAllVMs();
+  }
+
+  /**
+   * In this test there are 2 clients and 2 servers.Registered one instantiator
+   * on one client. Verified, if that instantiator gets propagated to the server
+   * the client is connected to(server1), to the other server(server2) in the DS
+   * and the client(client2) that is connected to server2.
+   */
+  public void testInstantiatorsWith2ClientsN2Servers() throws Exception {
+    PORT1 = initServerCache(server1);
+    PORT2 = initServerCache(server2);
+
+    pause(3000);
+
+    client1
+        .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
+            new Object[] { getServerHostName(server1.getHost()),
+                new Integer(PORT1) });
+    client2
+        .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
+            new Object[] { getServerHostName(server1.getHost()),
+                new Integer(PORT2) });
+
+    unregisterInstantiatorsInAllVMs();
+
+    // wait for client2 to come online
+    pause(2000);
+
+
+    client1.invoke(InstantiatorPropagationDUnitTest.class,
+        "registerTestObject3");
+    pause(4000);
+
+    client1.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(1) });
+
+    server1.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(1) });
+
+    server2.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(1) });
+
+    client2.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(1) });
+
+    unregisterInstantiatorsInAllVMs();
+  }
+
+  /**
+   * First register an instantiator on client1. Stop the server1. Now register 2
+   * instantiators on server1. Now check that server1,server2,client2 has all 3
+   * instantiators. Client1 should have only 1 instantiator since the server1
+   * was stopped when 2 instantiators were added on it.
+   */
+  public void testInstantiatorsWithServerKill() throws Exception {
+    PORT1 = initServerCache(server1);
+    PORT2 = initServerCache(server2);
+
+    client1
+        .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
+            new Object[] { getServerHostName(server1.getHost()),
+                new Integer(PORT1) });
+    client2
+        .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
+            new Object[] { getServerHostName(server1.getHost()),
+                new Integer(PORT2) });
+
+    unregisterInstantiatorsInAllVMs();
+
+    // wait for client2 to come online
+    pause(2000);
+
+    client1.invoke(InstantiatorPropagationDUnitTest.class,
+        "registerTestObject4");
+    pause(4000);
+
+    server1.invoke(InstantiatorPropagationDUnitTest.class, "stopServer");
+
+    server1.invoke(InstantiatorPropagationDUnitTest.class,
+        "registerTestObject5");
+    server1.invoke(InstantiatorPropagationDUnitTest.class,
+        "registerTestObject6");
+
+    server2.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(
+            instanceCountWithAllPuts) });
+
+    server1.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(
+            instanceCountWithAllPuts) });
+
+    client1.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(
+            instanceCountWithOnePut) });
+
+    client2.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(
+            instanceCountWithAllPuts) });
+
+    unregisterInstantiatorsInAllVMs();
+  }
+
+  /**
+   * 2 clients n 2 servers.Registered instantiators on both client n server to
+   * check if propagation of instantiators to n fro (from client n server) is
+   * taking place.Diff from the previous test in the case that server is not
+   * stopped.So registering an instantiator on server should propagate that to
+   * client as well.
+   */
+  public void testInstantiators() throws Exception {
+    PORT1 = initServerCache(server1);
+    PORT2 = initServerCache(server2);
+
+    client1
+        .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
+            new Object[] { getServerHostName(server1.getHost()),
+                new Integer(PORT1) });
+    client2
+        .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
+            new Object[] { getServerHostName(server1.getHost()),
+                new Integer(PORT2) });
+
+    unregisterInstantiatorsInAllVMs();
+
+    // wait for client2 to come online
+    pause(2000);
+
+    client1.invoke(InstantiatorPropagationDUnitTest.class,
+        "registerTestObject10");
+    pause(4000);
+
+    server1.invoke(InstantiatorPropagationDUnitTest.class,
+        "registerTestObject11");
+    pause(4000);
+
+    server2.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(2) });
+
+    server1.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(2) });
+
+    client1.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(2) });
+
+    client2.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(2) });
+
+    unregisterInstantiatorsInAllVMs();
+  }
+
+  /**
+   * Test's Number of Instantiators at all clients & servers with one Server
+   * being stopped and then restarted
+   */
+  public void _testInstantiatorsWithServerKillAndReInvoked() throws Exception {
+    PORT1 = initServerCache(server1);
+    PORT2 = initServerCache(server2);
+    client1
+        .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
+            new Object[] { getServerHostName(server1.getHost()),
+                new Integer(PORT1) });
+    client2
+        .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
+            new Object[] { getServerHostName(server1.getHost()),
+                new Integer(PORT2) });
+
+    unregisterInstantiatorsInAllVMs();
+
+    client1.invoke(InstantiatorPropagationDUnitTest.class,
+        "registerTestObject7");
+    client1.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(
+            instanceCountWithOnePut) });
+
+    server1.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(
+            instanceCountWithOnePut) });
+
+    server2.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(
+            instanceCountWithOnePut) });
+
+    client2.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(
+            instanceCountWithOnePut) });
+
+    server1.invoke(InstantiatorPropagationDUnitTest.class, "stopServer");
+
+    try {
+      client1.invoke(InstantiatorPropagationDUnitTest.class,
+          "registerTestObject8");
+    }
+    catch (Exception expected) {// we are putting in a client whose server is
+      // dead
+    }
+
+    server1.invoke(InstantiatorPropagationDUnitTest.class, "startServer");
+
+    client1.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(
+            instanceCountWithAllPuts) });
+
+    server1.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(
+            instanceCountWithAllPuts) });
+
+    server2.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(
+            instanceCountWithAllPuts) });
+
+    unregisterInstantiatorsInAllVMs();
+  }
+
+  /**
+   * In this test there are 2 clients connected to 1 server and 1 client
+   * connected to the other server.Registered one instantiator on one
+   * client(client1). Verified, if that instantiator gets propagated to the
+   * server the client is connected to(server1), to client2, to the other
+   * server(server2) in the DS and the client that is connected to server2.
+   * 
+   */
+  public void testInstantiatorCount() throws Exception {
+    PORT1 = initServerCache(server1);
+    PORT2 = initServerCache(server2);
+
+    client1
+        .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
+            new Object[] { getServerHostName(server1.getHost()),
+                new Integer(PORT1) });
+    client2
+        .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
+            new Object[] { getServerHostName(server1.getHost()),
+                new Integer(PORT1) });
+    createClientCache(getServerHostName(server2.getHost()), new Integer(PORT2));
+    unregisterInstantiatorsInAllVMs();
+
+    // wait for client2 to come online
+    pause(2000);
+
+    client1.invoke(InstantiatorPropagationDUnitTest.class,
+        "registerTestObject12");
+    pause(4000);
+
+    client1.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(1) });
+
+    server1.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(1) });
+
+    server2.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(1) });
+
+    client2.invoke(InstantiatorPropagationDUnitTest.class,
+        "verifyInstantiators", new Object[] { new Integer(1) });
+
+    verifyInstantiators(1);
+
+    unregisterInstantiatorsInAllVMs();
+  }
+
+  public static void createClientCache_EventId(String host, Integer port1) throws Exception
+  {
+    Properties props = new Properties();
+    props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    props.setProperty(DistributionConfig.LOCATORS_NAME, "");
+    new InstantiatorPropagationDUnitTest("temp").createCache(props);
+    Pool p = PoolManager.createFactory()
+      .addServer(host, port1.intValue())
+      .setSubscriptionEnabled(true)
+      .create("RegisterInstantiatorEventIdDUnitTestPool");
+    AttributesFactory factory = new AttributesFactory();
+    factory.setScope(Scope.LOCAL);
+    factory.setPoolName(p.getName());
+    cache.createRegion(REGION_NAME, factory.create());
+  }
+  /**
+   * Test's same eventId being same for the Instantiators at all clients &
+   * servers
+   * 
+   */
+  // disabled - the eventID received does not match the sender's eventID.  Why is this a requirement anyway?
+  public void _testInstantiatorsEventIdVerificationClientsAndServers()
+      throws Exception {
+    PORT1 = initServerCache(server1, 1);
+    PORT2 = initServerCache(server2, 2);
+
+    createClientCache_EventId(getServerHostName(server1.getHost()), new Integer(PORT1));
+
+    unregisterInstantiatorsInAllVMs();
+    
+    client2.invoke(InstantiatorPropagationDUnitTest.class,
+        "createClientCache_EventId", new Object[] {
+            getServerHostName(server1.getHost()), new Integer(PORT2) });
+    setClientServerObserver1();
+    client2.invoke(InstantiatorPropagationDUnitTest.class,
+        "setClientServerObserver2");
+
+    registerTestObject19();
+
+    pause(10000);
+
+    Boolean pass = (Boolean)client2.invoke(
+        InstantiatorPropagationDUnitTest.class, "verifyResult");
+    assertTrue("EventId found Different", pass.booleanValue());
+
+    PoolImpl.IS_INSTANTIATOR_CALLBACK = false;
+
+  }
+  
+  public void testLazyRegistrationOfInstantiators()
+      throws Exception {
+    try {
+      PORT1 = initServerCache(server1);
+      PORT2 = initServerCache(server2);
+  
+      unregisterInstantiatorsInAllVMs();
+
+      pause(3000);
+  
+      createClientCache(getServerHostName(server1.getHost()),
+          new Integer(PORT1));
+  
+      client2
+          .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
+              new Object[] {getServerHostName(server2.getHost()),
+                  new Integer(PORT2)});
+  
+      pause(3000);
+      unregisterInstantiatorsInAllVMs();
+  
+      assertTestObject20NotLoaded();
+      server1.invoke(InstantiatorPropagationDUnitTest.class, "assertTestObject20NotLoaded");
+      server2.invoke(InstantiatorPropagationDUnitTest.class, "assertTestObject20NotLoaded");
+      client2.invoke(InstantiatorPropagationDUnitTest.class, "assertTestObject20NotLoaded");
+  
+      registerTestObject20();
+      pause(5000);
+      assertTestObject20Loaded();
+      server1.invoke(InstantiatorPropagationDUnitTest.class, "assertTestObject20Loaded");
+      //server2.invoke(InstantiatorPropagationDUnitTest.class, "assertTestObject20Loaded"); // classes are not initialized after loading in p2p path
+      client2.invoke(InstantiatorPropagationDUnitTest.class, "assertTestObject20NotLoaded");
+    } finally {
+      unregisterInstantiatorsInAllVMs();
+      disconnectAllFromDS();
+    }
+  }
+
+  public static void assertTestObject20Loaded() {
+    assertTrue("TestObject20 is expected to be loaded into VM.", testObject20Loaded);
+  }
+
+  public static void assertTestObject20NotLoaded() {
+    assertFalse("TestObject20 is not expected to be loaded into VM.", testObject20Loaded);
+  }
+
+  public static Boolean verifyResult() {
+    boolean temp = testEventIDResult;
+    testEventIDResult = false;
+    return new Boolean(temp);
+  }
+  
+  /**
+   * this method initializes the appropriate server cache
+   * 
+   * @param server
+   * @param serverNo
+   * @return portNo.
+   */
+
+  private int initServerCache(VM server, int serverNo)
+  {
+    Object[] args = new Object[] { new Integer(getMaxThreads()) };
+    if (serverNo == 1) {
+      return ((Integer)server.invoke(
+          InstantiatorPropagationDUnitTest.class,
+          "createServerCacheOne", args)).intValue();
+    }
+    else {
+      return ((Integer)server.invoke(
+          InstantiatorPropagationDUnitTest.class,
+          "createServerCacheTwo", args)).intValue();
+    }
+  }
+
+  /**
+   * This method creates the server cache
+   * 
+   * @param maxThreads
+   * @return
+   * @throws Exception
+   */
+  public static Integer createServerCacheTwo(Integer maxThreads)
+      throws Exception
+  {
+    new InstantiatorPropagationDUnitTest("temp")
+        .createCache(new Properties());
+    AttributesFactory factory = new AttributesFactory();
+    factory.setScope(Scope.DISTRIBUTED_ACK);
+    factory.setMirrorType(MirrorType.KEYS_VALUES);
+
+    RegionAttributes attrs = factory.create();
+    cache.createRegion(REGION_NAME, attrs);
+    int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
+    CacheServer server1 = cache.addCacheServer();
+    server1.setPort(port);
+    server1.setMaxThreads(maxThreads.intValue());
+    server1.setNotifyBySubscription(true);
+    server1.start();
+    return new Integer(port);
+  }
+
+  /**
+   * This method creates the server cache
+   * 
+   * @param maxThreads
+   * @return
+   * @throws Exception
+   */
+  public static Integer createServerCacheOne(Integer maxThreads)
+      throws Exception
+  {
+    new InstantiatorPropagationDUnitTest("temp")
+        .createCache(new Properties());
+    AttributesFactory factory = new AttributesFactory();
+    factory.setScope(Scope.DISTRIBUTED_ACK);
+    factory.setMirrorType(MirrorType.KEYS_VALUES);
+    RegionAttributes attrs = factory.create();
+    cache.createRegion(REGION_NAME, attrs);
+    int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
+    CacheServer server1 = cache.addCacheServer();
+    server1.setPort(port);
+    server1.setMaxThreads(maxThreads.intValue());
+    server1.setNotifyBySubscription(true);
+    server1.start();
+    return new Integer(port);
+  }
+
+  public static void setClientServerObserver1()
+  {
+    PoolImpl.IS_INSTANTIATOR_CALLBACK = true;
+    ClientServerObserverHolder
+        .setInstance(new ClientServerObserverAdapter() {
+          public void beforeSendingToServer(EventID eventID)
+          {
+            eventId = eventID;
+            System.out.println("client2= "+client2 + " eventid= "+eventID);
+            client2.invoke(InstantiatorPropagationDUnitTest.class,
+                "setEventId", new Object[] { eventId });
+
+          }
+
+        });
+  }
+
+  /**
+   * sets the EventId value in the VM
+   * 
+   * @param eventID
+   */
+  public static void setEventId(EventID eventID)
+  {
+    eventId = eventID;
+  }
+  
+  public static void setClientServerObserver2()
+  {
+    PoolImpl.IS_INSTANTIATOR_CALLBACK = true;
+    ClientServerObserverHolder
+        .setInstance(new ClientServerObserverAdapter() {
+          public void afterReceivingFromServer(EventID eventID)
+          {
+            System.out.println("Observer2 received " + eventID + "; my eventID is " + eventId);
+            testEventIDResult = eventID.equals(eventId);
+          }
+
+        });
+  }
+}
+
+
+abstract class ConfigurableObject {
+  public abstract void init(int index);
+  public abstract int getIndex();
+  public abstract void validate(int index);
+}
+
+
+
+
+class TestObject1 extends ConfigurableObject implements DataSerializable {
+
+  private int field1;
+
+  public TestObject1() {
+  }
+
+  /**
+   * Initializes a Instantiator1DUnitTestObject1.
+   */
+  static {
+    Instantiator.register(new Instantiator(TestObject1.class, -100123) {
+      public DataSerializable newInstance() {
+        return new TestObject1();
+      }
+    });
+  }
+
+  public void init(int index) {
+    Random random = new Random();
+    this.field1 = random.nextInt();
+  }
+
+  public int getIndex() {
+    return 1;
+  }
+
+  public void validate(int index) {
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.field1 = in.readInt();
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(this.field1);
+  }
+
+}
+
+class TestObject2 extends ConfigurableObject implements DataSerializable {
+
+  private int field1;
+
+  public TestObject2() {
+  }
+
+  static {
+    Instantiator.register(new Instantiator(TestObject2.class, -100122) {
+      public DataSerializable newInstance() {
+        return new TestObject2();
+      }
+    });
+  }
+
+  /**
+   * Initializes a Instantiator1DUnitTestObject1.
+   */
+
+  public void init(int index) {
+    Random random = new Random();
+    this.field1 = random.nextInt();
+  }
+
+  public int getIndex() {
+    return 1;
+  }
+
+  public void validate(int index) {
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.field1 = in.readInt();
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(this.field1);
+  }
+
+}
+
+class TestObject3 extends ConfigurableObject implements DataSerializable {
+
+  private int field1;
+
+  public TestObject3() {
+  }
+
+  static {
+    Instantiator.register(new Instantiator(TestObject3.class, -121) {
+      public DataSerializable newInstance() {
+        return new TestObject3();
+      }
+    });
+  }
+
+  /**
+   * Initializes a Instantiator1DUnitTestObject1.
+   */
+  public void init(int index) {
+    Random random = new Random();
+    this.field1 = random.nextInt();
+  }
+
+  public int getIndex() {
+    return 1;
+  }
+
+  public void validate(int index) {
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.field1 = in.readInt();
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(this.field1);
+  }
+}
+
+class TestObject4 extends ConfigurableObject implements DataSerializable {
+
+  private int field1;
+
+  public TestObject4() {
+  }
+
+  static {
+    Instantiator.register(new Instantiator(TestObject4.class, -122) {
+      public DataSerializable newInstance() {
+        return new TestObject4();
+      }
+    });
+  }
+
+  /**
+   * Initializes a Instantiator1DUnitTestObject1.
+   */
+  public void init(int index) {
+    Random random = new Random();
+    this.field1 = random.nextInt();
+  }
+
+  public int getIndex() {
+    return 1;
+  }
+
+  public void validate(int index) {
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.field1 = in.readInt();
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(this.field1);
+  }
+}
+
+class TestObject5 extends ConfigurableObject implements DataSerializable {
+
+  private int field1;
+
+  public TestObject5() {
+  }
+
+  static {
+    Instantiator.register(new Instantiator(TestObject5.class, -123) {
+      public DataSerializable newInstance() {
+        return new TestObject5();
+      }
+    });
+  }
+
+  /**
+   * Initializes a Instantiator1DUnitTestObject1.
+   */
+  public void init(int index) {
+    Random random = new Random();
+    this.field1 = random.nextInt();
+  }
+
+  public int getIndex() {
+    return 1;
+  }
+
+  public void validate(int index) {
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.field1 = in.readInt();
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(this.field1);
+  }
+}
+
+class TestObject6 extends ConfigurableObject implements DataSerializable {
+
+  private int field1;
+
+  public TestObject6() {
+  }
+
+  static {
+    Instantiator.register(new Instantiator(TestObject6.class, -124) {
+      public DataSerializable newInstance() {
+        return new TestObject6();
+      }
+    });
+  }
+
+  /**
+   * Initializes a Instantiator1DUnitTestObject1.
+   */
+  public void init(int index) {
+    Random random = new Random();
+    this.field1 = random.nextInt();
+  }
+
+  public int getIndex() {
+    return 1;
+  }
+
+  public void validate(int index) {
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.field1 = in.readInt();
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(this.field1);
+  }
+}
+
+class TestObject7 extends ConfigurableObject implements DataSerializable {
+
+  private int field1;
+
+  public TestObject7() {
+  }
+
+  static {
+    Instantiator.register(new Instantiator(TestObject7.class, -125) {
+      public DataSerializable newInstance() {
+        return new TestObject7();
+      }
+    });
+  }
+
+  /**
+   * Initializes a Instantiator1DUnitTestObject1.
+   */
+  public void init(int index) {
+    Random random = new Random();
+    this.field1 = random.nextInt();
+  }
+
+  public int getIndex() {
+    return 1;
+  }
+
+  public void validate(int index) {
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.field1 = in.readInt();
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(this.field1);
+  }
+}
+
+class TestObject8 extends ConfigurableObject implements DataSerializable {
+
+  private int field1;
+
+  public TestObject8() {
+  }
+
+  static {
+    Instantiator.register(new Instantiator(TestObject8.class, -126) {
+      public DataSerializable newInstance() {
+        return new TestObject8();
+      }
+    });
+  }
+
+  /**
+   * Initializes a Instantiator1DUnitTestObject1.
+   */
+  public void init(int index) {
+    Random random = new Random();
+    this.field1 = random.nextInt();
+  }
+
+  public int getIndex() {
+    return 1;
+  }
+
+  public void validate(int index) {
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.field1 = in.readInt();
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(this.field1);
+  }
+}
+
+class TestObject9 extends ConfigurableObject implements DataSerializable {
+
+  private int field1;
+
+  public TestObject9() {
+  }
+
+  static {
+    Instantiator.register(new Instantiator(TestObject9.class, -127) {
+      public DataSerializable newInstance() {
+        return new TestObject9();
+      }
+    });
+  }
+
+  /**
+   * Initializes a Instantiator1DUnitTestObject1.
+   */
+  public void init(int index) {
+    Random random = new Random();
+    this.field1 = random.nextInt();
+  }
+
+  public int getIndex() {
+    return 1;
+  }
+
+  public void validate(int index) {
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.field1 = in.readInt();
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(this.field1);
+  }
+}
+
+class TestObject10 extends ConfigurableObject implements DataSerializable {
+
+  private int field1;
+
+  public TestObject10() {
+  }
+
+  static {
+    Instantiator.register(new Instantiator(TestObject10.class, -128) {
+      public DataSerializable newInstance() {
+        return new TestObject10();
+      }
+    });
+  }
+
+  /**
+   * Initializes a Instantiator1DUnitTestObject1.
+   */
+  public void init(int index) {
+    Random random = new Random();
+    this.field1 = random.nextInt();
+  }
+
+  public int getIndex() {
+    return 1;
+  }
+
+  public void validate(int index) {
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.field1 = in.readInt();
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(this.field1);
+  }
+}
+
+class TestObject11 extends ConfigurableObject implements DataSerializable {
+
+  private int field1;
+
+  static {
+    Instantiator.register(new Instantiator(TestObject11.class, -129) {
+      public DataSerializable newInstance() {
+        return new TestObject11();
+      }
+    });
+  }
+
+  /**
+   * Initializes a Instantiator1DUnitTestObject1.
+   */
+  public void init(int index) {
+    Random random = new Random();
+    this.field1 = random.nextInt();
+  }
+
+  public int getIndex() {
+    return 1;
+  }
+
+  public void validate(int index) {
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.field1 = in.readInt();
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(this.field1);
+  }
+}
+
+class TestObject12 extends ConfigurableObject implements DataSerializable {
+
+  private int field1;
+
+  static {
+    Instantiator.register(new Instantiator(TestObject12.class, -130) {
+      public DataSerializable newInstance() {
+        return new TestObject12();
+      }
+    });
+  }
+
+  /**
+   * Initializes a Instantiator1DUnitTestObject1.
+   */
+  public void init(int index) {
+    Random random = new Random();
+    this.field1 = random.nextInt();
+  }
+
+  public int getIndex() {
+    return 1;
+  }
+
+  public void validate(int index) {
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.field1 = in.readInt();
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(this.field1);
+  }
+}
+
+class TestObject13 extends ConfigurableObject implements DataSerializable {
+
+  private int field1;
+
+  static {
+    Instantiator.register(new Instantiator(TestObject13.class, -131) {
+      public DataSerializable newInstance() {
+        return new TestObject13();
+      }
+    });
+  }
+
+  /**
+   * Initializes a Instantiator1DUnitTestObject1.
+   */
+  public void init(int index) {
+    Random random = new Random();
+    this.field1 = random.nextInt();
+  }
+
+  public int getIndex() {
+    return 1;
+  }
+
+  public void validate(int index) {
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.field1 = in.readInt();
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(this.field1);
+  }
+}
+
+class TestObject14 extends ConfigurableObject implements DataSerializable {
+
+  private int field1;
+
+  static {
+    Instantiator.register(new Instantiator(TestObject14.class, -132) {
+      public DataSerializable newInstance() {
+        return new TestObject14();
+      }
+    });
+  }
+
+  /**
+   * Initializes a Instantiator1DUnitTestObject1.
+   */
+  public void init(int index) {
+    Random random = new Random();
+    this.field1 = random.nextInt();
+  }
+
+  public int getIndex() {
+    return 1;
+  }
+
+  public void validate(int index) {
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.field1 = in.readInt();
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(this.field1);
+  }
+}
+
+class TestObject15 extends ConfigurableObject implements DataSerializable {
+
+  private int field1;
+
+  static {
+    Instantiator.register(new Instantiator(TestObject15.class, -133) {
+      public DataSerializable newInstance() {
+        return new TestObject15();
+      }
+    });
+  }
+
+  /**
+   * Initializes a Instantiator1DUnitTestObject1.
+   */
+  public void init(int index) {
+    Random random = new Random();
+    this.field1 = random.nextInt();
+  }
+
+  public int getIndex() {
+    return 1;
+  }
+
+  public void validate(int index) {
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.field1 = in.readInt();
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(this.field1);
+  }
+}
+
+class TestObject16 extends ConfigurableObject implements DataSerializable {
+
+  private int field1;
+
+  static {
+    Instantiator.register(new Instantiator(TestObject16.class, -134) {
+      public DataSerializable newInstance() {
+        return new TestObject16();
+      }
+    });
+  }
+
+  /**
+   * Initializes a Instantiator1DUnitTestObject1.
+   */
+  public void init(int index) {
+    Random random = new Random();
+    this.field1 = random.nextInt();
+  }
+
+  public int getIndex() {
+    return 1;
+  }
+
+  public void validate(int index) {
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.field1 = in.readInt();
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(this.field1);
+  }
+}
+
+class TestObject17 extends ConfigurableObject implements DataSerializable {
+
+  private int field1;
+
+  static {
+    Instantiator.register(new Instantiator(TestObject17.class, -135) {
+      public DataSerializable newInstance() {
+        return new TestObject17();
+      }
+    });
+  }
+
+  /**
+   * Initializes a Instantiator1DUnitTestObject1.
+   */
+  public void init(int index) {
+    Random random = new Random();
+    this.field1 = random.nextInt();
+  }
+
+  public int getIndex() {
+    return 1;
+  }
+
+  public void validate(int index) {
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.field1 = in.readInt();
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(this.field1);
+  }
+}
+
+class TestObject18 extends ConfigurableObject implements DataSerializable {
+
+  private int field1;
+
+  static {
+    Instantiator.register(new Instantiator(TestObject18.class, -1136) {
+      public DataSerializable newInstance() {
+        return new TestObject18();
+      }
+    });
+  }
+
+  /**
+   * Initializes a Instantiator1DUnitTestObject1.
+   */
+  public void init(int index) {
+    Random random = new Random();
+    this.field1 = random.nextInt();
+  }
+
+  public int getIndex() {
+    return 1;
+  }
+
+  public void validate(int index) {
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.field1 = in.readInt();
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(this.field1);
+  }
+}
+
+class TestObject19 extends ConfigurableObject implements DataSerializable {
+
+  private int field1;
+
+  static {
+    Instantiator.register(new Instantiator(TestObject19.class, -136) {
+      public DataSerializable newInstance() {
+        return new TestObject19();
+      }
+    });
+  }
+
+  /**
+   * Initializes a Instantiator1DUnitTestObject1.
+   */
+  public void init(int index) {
+    Random random = new Random();
+    this.field1 = random.nextInt();
+  }
+
+  public int getIndex() {
+    return 1;
+  }
+
+  public void validate(int index) {
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.field1 = in.readInt();
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(this.field1);
+  }
+}
+
+class TestObject20 extends ConfigurableObject implements DataSerializable {
+
+  private int field1;
+
+  static {
+    InstantiatorPropagationDUnitTest.testObject20Loaded = true;
+    Instantiator.register(new Instantiator(TestObject20.class, -138) {
+      public DataSerializable newInstance() {
+        return new TestObject20();
+      }
+    });
+  }
+
+  /**
+   * Initializes a Instantiator1DUnitTestObject1.
+   */
+  public void init(int index) {
+    Random random = new Random();
+    this.field1 = random.nextInt();
+  }
+
+  public int getIndex() {
+    return 1;
+  }
+
+  public void validate(int index) {
+  }
+
+  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+    this.field1 = in.readInt();
+  }
+
+  public void toData(DataOutput out) throws IOException {
+    out.writeInt(this.field1);
+  }
+  
+}


[19/50] [abbrv] incubator-geode git commit: GEODE-638: Add build task to allow for custom set of tests to be run

Posted by kl...@apache.org.
GEODE-638: Add build task to allow for custom set of tests to be run


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/a6398d91
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/a6398d91
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/a6398d91

Branch: refs/heads/feature/GEODE-291
Commit: a6398d919685d63c2bc89c1f4f605a5b73f3f257
Parents: eddef32
Author: Jens Deppe <jd...@pivotal.io>
Authored: Mon Dec 7 15:25:15 2015 -0800
Committer: Jens Deppe <jd...@pivotal.io>
Committed: Tue Dec 8 09:34:52 2015 -0800

----------------------------------------------------------------------
 build.gradle | 18 +++++++++++++++++-
 1 file changed, 17 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a6398d91/build.gradle
----------------------------------------------------------------------
diff --git a/build.gradle b/build.gradle
index b5465b8..0c94573 100755
--- a/build.gradle
+++ b/build.gradle
@@ -386,7 +386,23 @@ subprojects {
     //I'm hoping this might deal with SOME OOMEs I've seen
     forkEvery 30
   }
-  
+
+  // By proving a file with an arbitrary list of test classes, we can select only those
+  // tests to run. Activated using -Dcustom.tests=<file> customTest
+  def customTestList = []
+  def customTestFile = System.getProperty('custom.tests')
+  if (customTestFile != null) {
+    new File(customTestFile).eachLine { customTestList << it }
+  }
+
+  task customTest(type:Test) {
+    include { x ->
+      (x.isDirectory() || customTestList.any { y -> x.getName().contains(y) } ) ? true : false
+    }
+
+    forkEvery 30
+  }
+
   // apply common test configuration
   gradle.taskGraph.whenReady( { graph ->
     tasks.withType(Test).each { test ->


[26/50] [abbrv] incubator-geode git commit: GEODE-53 - Minor fixes to text and adding links to Apache Incubator

Posted by kl...@apache.org.
GEODE-53 - Minor fixes to text and adding links to Apache Incubator


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/c32a5b27
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/c32a5b27
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/c32a5b27

Branch: refs/heads/feature/GEODE-291
Commit: c32a5b27070ff727aa3838430a63df045715fc2e
Parents: d16e78d
Author: William Markito <wm...@pivotal.io>
Authored: Tue Dec 8 16:34:23 2015 -0800
Committer: William Markito <wm...@pivotal.io>
Committed: Tue Dec 8 16:34:23 2015 -0800

----------------------------------------------------------------------
 gemfire-site/content/community/index.html         | 4 ++--
 gemfire-site/content/index.html                   | 2 +-
 gemfire-site/content/releases/index.html          | 2 +-
 gemfire-site/website/content/community/index.html | 2 +-
 gemfire-site/website/content/index.html           | 2 +-
 gemfire-site/website/layouts/footer.html          | 2 +-
 6 files changed, 7 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c32a5b27/gemfire-site/content/community/index.html
----------------------------------------------------------------------
diff --git a/gemfire-site/content/community/index.html b/gemfire-site/content/community/index.html
index b1d0783..3c612b4 100644
--- a/gemfire-site/content/community/index.html
+++ b/gemfire-site/content/community/index.html
@@ -246,7 +246,7 @@
 	    	<h2>Join Our Community of Contributors!</h2>
         <p>The Apache Geode team welcomes contributors who want to support the Geode technology. Our community builds everything from this website, from the Geode code to documentation and best practices information.</p>
 
-        <p>We especially welcome additions and corrections to the documentation, wiki, and website to improve the user experience. Bug reports and fixes and additions to the Apache Geode code are welcome. Helping users learn best practices also earns karma in our community.</p>
+        <p>We especially welcome additions and corrections to the documentation, wiki, and website to improve the user experience. Bug reports and fixes and additions to the Apache Geode code are welcome. Helping users learn best practices also earns good karma in our community.</p>
 		</div>
 	</div>
 </section>
@@ -592,7 +592,7 @@
         <div class="row">
           <center>
             <div id="copyright">
-                <img src="/img/egg-logo.png" /><br/><br/>
+                <a href="http://incubator.apache.org" target="_blank"><img src="/img/egg-logo.png" /></a><br/><br/>
                 <p class="credits">
                 Apache Geode is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.
                 </p>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c32a5b27/gemfire-site/content/index.html
----------------------------------------------------------------------
diff --git a/gemfire-site/content/index.html b/gemfire-site/content/index.html
index a450eea..a9d3425 100644
--- a/gemfire-site/content/index.html
+++ b/gemfire-site/content/index.html
@@ -258,7 +258,7 @@ Today Apache Geode is used by over 600 enterprise customers for high-scale busin
         <div class="row">
           <center>
             <div id="copyright">
-                <img src="/img/egg-logo.png" /><br/><br/>
+                <a href="http://incubator.apache.org" target="_blank"><img src="/img/egg-logo.png" /></a><br/><br/>
                 <p class="credits">
                 Apache Geode is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.
                 </p>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c32a5b27/gemfire-site/content/releases/index.html
----------------------------------------------------------------------
diff --git a/gemfire-site/content/releases/index.html b/gemfire-site/content/releases/index.html
index 25bd47c..fb986ff 100644
--- a/gemfire-site/content/releases/index.html
+++ b/gemfire-site/content/releases/index.html
@@ -202,7 +202,7 @@
         <div class="row">
           <center>
             <div id="copyright">
-                <img src="/img/egg-logo.png" /><br/><br/>
+                <a href="http://incubator.apache.org" target="_blank"><img src="/img/egg-logo.png" /></a><br/><br/>
                 <p class="credits">
                 Apache Geode is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.
                 </p>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c32a5b27/gemfire-site/website/content/community/index.html
----------------------------------------------------------------------
diff --git a/gemfire-site/website/content/community/index.html b/gemfire-site/website/content/community/index.html
index 7abb0a9..ec58600 100644
--- a/gemfire-site/website/content/community/index.html
+++ b/gemfire-site/website/content/community/index.html
@@ -8,7 +8,7 @@
 	    	<h2>Join Our Community of Contributors!</h2>
         <p>The Apache Geode team welcomes contributors who want to support the Geode technology. Our community builds everything from this website, from the Geode code to documentation and best practices information.</p>
 
-        <p>We especially welcome additions and corrections to the documentation, wiki, and website to improve the user experience. Bug reports and fixes and additions to the Apache Geode code are welcome. Helping users learn best practices also earns karma in our community.</p>
+        <p>We especially welcome additions and corrections to the documentation, wiki, and website to improve the user experience. Bug reports and fixes and additions to the Apache Geode code are welcome. Helping users learn best practices also earns good karma in our community.</p>
 		</div>
 	</div>
 </section>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c32a5b27/gemfire-site/website/content/index.html
----------------------------------------------------------------------
diff --git a/gemfire-site/website/content/index.html b/gemfire-site/website/content/index.html
index 17058e7..555d024 100644
--- a/gemfire-site/website/content/index.html
+++ b/gemfire-site/website/content/index.html
@@ -13,7 +13,7 @@ title: Performance is key. Consistency is a must.
                   Take advantage of Apache Geode's unique technology that blends advanced techniques for data replication, partitioning and distributed processing.
 
                   <br/><br/>
-                  Apache Geode provides a database-like consistency model, reliable transaction processing and a shared-nothing architecture to maintain very low latency performance with high concurrency processing.<br/></p>
+                  Apache Geode (incubating) provides a database-like consistency model, reliable transaction processing and a shared-nothing architecture to maintain very low latency performance with high concurrency processing.<br/></p>
             </div>
 
             <div class="btn-wrapper">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c32a5b27/gemfire-site/website/layouts/footer.html
----------------------------------------------------------------------
diff --git a/gemfire-site/website/layouts/footer.html b/gemfire-site/website/layouts/footer.html
index 158ae46..c024af6 100644
--- a/gemfire-site/website/layouts/footer.html
+++ b/gemfire-site/website/layouts/footer.html
@@ -64,7 +64,7 @@
         <div class="row">
           <center>
             <div id="copyright">
-                <img src="/img/egg-logo.png" /><br/><br/>
+                <a href="http://incubator.apache.org" target="_blank"><img src="/img/egg-logo.png" /></a><br/><br/>
                 <p class="credits">
                 Apache Geode is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.
                 </p>


[03/50] [abbrv] incubator-geode git commit: GEODE-617: Change xsd namespace for lucene to geode.apache.org

Posted by kl...@apache.org.
GEODE-617: Change xsd namespace for lucene to geode.apache.org


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/e414a493
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/e414a493
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/e414a493

Branch: refs/heads/feature/GEODE-291
Commit: e414a493325d9742772e647965e1bcd1f45650d1
Parents: dec83b4
Author: Dan Smith <up...@apache.org>
Authored: Tue Dec 1 16:42:08 2015 -0800
Committer: Dan Smith <up...@apache.org>
Committed: Mon Dec 7 10:10:52 2015 -0800

----------------------------------------------------------------------
 .../cache/xmlcache/GeodeEntityResolver.java     |  8 +--
 .../lucene/internal/xml/LuceneXmlConstants.java |  2 +-
 .../geode.apache.org/lucene/lucene-1.0.xsd      | 57 +++++++++++++++++++
 .../lucene/lucene-1.0.xsd                       | 58 --------------------
 ...erIntegrationJUnitTest.createIndex.cache.xml |  6 +-
 ...serIntegrationJUnitTest.parseIndex.cache.xml |  6 +-
 6 files changed, 68 insertions(+), 69 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e414a493/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/GeodeEntityResolver.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/GeodeEntityResolver.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/GeodeEntityResolver.java
index 559a1f8..67cda99 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/GeodeEntityResolver.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/xmlcache/GeodeEntityResolver.java
@@ -26,8 +26,8 @@ import org.xml.sax.ext.EntityResolver2;
 
 /**
  * Resolves entities for XSDs or DTDs with SYSTEM IDs rooted at
- * http://www.pivotal.io/xml/ns from the classpath at
- * /META-INF/schemas/schema.pivotal.io/.
+ * http://geode.apache.org/schema from the classpath at
+ * /META-INF/schemas/geode.apache.org/.
  * 
  * Loaded by {@link ServiceLoader} on {@link EntityResolver2} class. See file
  * <code>META-INF/services/org.xml.sax.ext.EntityResolver2</code>
@@ -38,9 +38,9 @@ import org.xml.sax.ext.EntityResolver2;
  */
 public final class GeodeEntityResolver extends DefaultEntityResolver2 {
 
-  private static final String SYSTEM_ID_ROOT = "http://geode.incubator.apache.org/schema";
+  private static final String SYSTEM_ID_ROOT = "http://geode.apache.org/schema";
 
-  private static final String CLASSPATH_ROOT = "/META-INF/schemas/geode.incubator.apache.org/";
+  private static final String CLASSPATH_ROOT = "/META-INF/schemas/geode.apache.org/";
 
   @Override
   public InputSource resolveEntity(final String name, final String publicId, final String baseURI, final String systemId) throws SAXException, IOException {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e414a493/gemfire-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneXmlConstants.java
----------------------------------------------------------------------
diff --git a/gemfire-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneXmlConstants.java b/gemfire-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneXmlConstants.java
index 303424e..bc80180 100644
--- a/gemfire-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneXmlConstants.java
+++ b/gemfire-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneXmlConstants.java
@@ -20,7 +20,7 @@
 package com.gemstone.gemfire.cache.lucene.internal.xml;
 
 public class LuceneXmlConstants {
-  public static final String NAMESPACE= "http://geode.incubator.apache.org/schema/lucene";
+  public static final String NAMESPACE= "http://geode.apache.org/schema/lucene";
   public static final String PREFIX = "lucene";
   public static final String SERVICE = "service";
   public static final String NAME = "name";

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e414a493/gemfire-lucene/src/main/resources/META-INF/schemas/geode.apache.org/lucene/lucene-1.0.xsd
----------------------------------------------------------------------
diff --git a/gemfire-lucene/src/main/resources/META-INF/schemas/geode.apache.org/lucene/lucene-1.0.xsd b/gemfire-lucene/src/main/resources/META-INF/schemas/geode.apache.org/lucene/lucene-1.0.xsd
new file mode 100644
index 0000000..bfe9f6c
--- /dev/null
+++ b/gemfire-lucene/src/main/resources/META-INF/schemas/geode.apache.org/lucene/lucene-1.0.xsd
@@ -0,0 +1,57 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<xsd:schema
+    targetNamespace="http://geode.apache.org/schema/lucene"
+    xmlns:xsd="http://www.w3.org/2001/XMLSchema"
+    elementFormDefault="qualified"
+    attributeFormDefault="unqualified"
+    version="1.0">
+  
+  <xsd:import
+      namespace="http://schema.pivotal.io/gemfire/cache"
+      schemaLocation="http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd"/>
+  
+  <xsd:annotation>
+    <xsd:documentation><![CDATA[
+XML schema for Lucene indexes in Geode.
+
+  <cache
+    xmlns="http://schema.pivotal.io/gemfire/cache"
+    xmlns:lucene="http://geode.apache.org/schema/lucene"
+    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+    xsi:schemaLocation="http://schema.pivotal.io/gemfire/cache
+        http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd
+        http://geode.apache.org/schema/lucene
+        http://geode.apache.org/schema/lucene/lucene-1.0.xsd"
+    version="9.0">
+    
+    ]]></xsd:documentation>
+  </xsd:annotation>
+  <xsd:element name="index">
+    <xsd:complexType>
+    	<xsd:sequence>
+    	  <xsd:element name="field" maxOccurs="unbounded">
+			<xsd:complexType>
+				<xsd:attribute name="name" type="xsd:string" />
+			</xsd:complexType>
+    	  </xsd:element>
+    	</xsd:sequence>
+    	<xsd:attribute name="name" type="xsd:string"/>
+    </xsd:complexType>
+  </xsd:element>
+</xsd:schema>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e414a493/gemfire-lucene/src/main/resources/META-INF/schemas/geode.incubator.apache.org/lucene/lucene-1.0.xsd
----------------------------------------------------------------------
diff --git a/gemfire-lucene/src/main/resources/META-INF/schemas/geode.incubator.apache.org/lucene/lucene-1.0.xsd b/gemfire-lucene/src/main/resources/META-INF/schemas/geode.incubator.apache.org/lucene/lucene-1.0.xsd
deleted file mode 100644
index b1eae03..0000000
--- a/gemfire-lucene/src/main/resources/META-INF/schemas/geode.incubator.apache.org/lucene/lucene-1.0.xsd
+++ /dev/null
@@ -1,58 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-<xsd:schema
-    targetNamespace="http://geode.incubator.apache.org/schema/lucene"
-    xmlns:gpdb="http://geode.incubator.apache.org/schema/lucene"
-    xmlns:xsd="http://www.w3.org/2001/XMLSchema"
-    elementFormDefault="qualified"
-    attributeFormDefault="unqualified"
-    version="1.0">
-  
-  <xsd:import
-      namespace="http://schema.pivotal.io/gemfire/cache"
-      schemaLocation="http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd"/>
-  
-  <xsd:annotation>
-    <xsd:documentation><![CDATA[
-XML schema for Lucene indexes in Geode.
-
-  <cache
-    xmlns="http://schema.pivotal.io/gemfire/cache"
-    xmlns:lucene="http://geode.incubator.apache.org/schema/lucene"
-    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-    xsi:schemaLocation="http://schema.pivotal.io/gemfire/cache
-        http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd
-        http://geode.incubator.apache.org/schema/lucene
-        http://geode.incubator.apache.org/schema/lucene/lucene-1.0.xsd"
-    version="9.0">
-    
-    ]]></xsd:documentation>
-  </xsd:annotation>
-  <xsd:element name="index">
-    <xsd:complexType>
-    	<xsd:sequence>
-    	  <xsd:element name="field" maxOccurs="unbounded">
-			<xsd:complexType>
-				<xsd:attribute name="name" type="xsd:string" />
-			</xsd:complexType>
-    	  </xsd:element>
-    	</xsd:sequence>
-    	<xsd:attribute name="name" type="xsd:string"/>
-    </xsd:complexType>
-  </xsd:element>
-</xsd:schema>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e414a493/gemfire-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.createIndex.cache.xml
----------------------------------------------------------------------
diff --git a/gemfire-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.createIndex.cache.xml b/gemfire-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.createIndex.cache.xml
index 7f804e0..42e4e84 100644
--- a/gemfire-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.createIndex.cache.xml
+++ b/gemfire-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.createIndex.cache.xml
@@ -1,12 +1,12 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <cache
     xmlns="http://schema.pivotal.io/gemfire/cache"
-    xmlns:lucene="http://geode.incubator.apache.org/schema/lucene"
+    xmlns:lucene="http://geode.apache.org/schema/lucene"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
     xsi:schemaLocation="http://schema.pivotal.io/gemfire/cache
         http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd
-        http://geode.incubator.apache.org/schema/lucene
-        http://geode.incubator.apache.org/schema/lucene/lucene-1.0.xsd"
+        http://geode.apache.org/schema/lucene
+        http://geode.apache.org/schema/lucene/lucene-1.0.xsd"
     version="9.0">
 
 	<region name="region" refid="PARTITION">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e414a493/gemfire-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.parseIndex.cache.xml
----------------------------------------------------------------------
diff --git a/gemfire-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.parseIndex.cache.xml b/gemfire-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.parseIndex.cache.xml
index 7f804e0..42e4e84 100644
--- a/gemfire-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.parseIndex.cache.xml
+++ b/gemfire-lucene/src/test/resources/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserIntegrationJUnitTest.parseIndex.cache.xml
@@ -1,12 +1,12 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <cache
     xmlns="http://schema.pivotal.io/gemfire/cache"
-    xmlns:lucene="http://geode.incubator.apache.org/schema/lucene"
+    xmlns:lucene="http://geode.apache.org/schema/lucene"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
     xsi:schemaLocation="http://schema.pivotal.io/gemfire/cache
         http://schema.pivotal.io/gemfire/cache/cache-9.0.xsd
-        http://geode.incubator.apache.org/schema/lucene
-        http://geode.incubator.apache.org/schema/lucene/lucene-1.0.xsd"
+        http://geode.apache.org/schema/lucene
+        http://geode.apache.org/schema/lucene/lucene-1.0.xsd"
     version="9.0">
 
 	<region name="region" refid="PARTITION">


[12/50] [abbrv] incubator-geode git commit: GEODE-563: Moving gfsh tests from closed

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/GetCommandOnRegionWithCacheLoaderDuringCacheMissDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/GetCommandOnRegionWithCacheLoaderDuringCacheMissDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/GetCommandOnRegionWithCacheLoaderDuringCacheMissDUnitTest.java
new file mode 100644
index 0000000..1e234c5
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/GetCommandOnRegionWithCacheLoaderDuringCacheMissDUnitTest.java
@@ -0,0 +1,371 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheLoader;
+import com.gemstone.gemfire.cache.CacheLoaderException;
+import com.gemstone.gemfire.cache.LoaderHelper;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.management.DistributedRegionMXBean;
+import com.gemstone.gemfire.management.ManagementService;
+import com.gemstone.gemfire.management.ManagerMXBean;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.internal.cli.HeadlessGfsh;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import com.gemstone.gemfire.management.internal.cli.result.CompositeResultData;
+import com.gemstone.gemfire.management.internal.cli.result.ResultData;
+import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+import dunit.DistributedTestCase;
+import dunit.Host;
+import dunit.SerializableRunnable;
+import dunit.VM;
+
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * The GetCommandOnRegionWithCacheLoaderDuringCacheMissDUnitTest class is test suite of test cases testing the Gfsh
+ * 'get' data command when a cache miss occurs on data in a Region with a CacheLoader defined.
+ * <p>
+ *
+ * @author John Blum
+ * @see com.gemstone.gemfire.management.internal.cli.commands.CliCommandTestBase
+ * @see com.gemstone.gemfire.management.internal.cli.commands.DataCommands
+ * @since 8.0
+ */
+@SuppressWarnings("unused")
+public class GetCommandOnRegionWithCacheLoaderDuringCacheMissDUnitTest extends CliCommandTestBase {
+
+  private static final String GEMFIRE_MANAGER_NAME = "GemManagerNode";
+  private static final String GEMFIRE_SERVER_NAME = "GemServerDataNode";
+  private static final String GEMFIRE_LOG_LEVEL = System.getProperty("logLevel", "config");
+  private static final String USERS_REGION_NAME = "Users";
+
+  protected static String getRegionPath(final String regionName) {
+    return (regionName.startsWith(Region.SEPARATOR) ? regionName : String.format("%1$s%2$s", Region.SEPARATOR,
+        regionName));
+  }
+
+  protected static String toString(final Result result) {
+    assert result != null : "The Result object from the command execution was null!";
+
+    StringBuilder buffer = new StringBuilder(System.getProperty("line.separator"));
+
+    while (result.hasNextLine()) {
+      buffer.append(result.nextLine());
+      buffer.append(System.getProperty("line.separator"));
+    }
+
+    return buffer.toString();
+  }
+
+  public GetCommandOnRegionWithCacheLoaderDuringCacheMissDUnitTest(final String testName) {
+    super(testName);
+  }
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+
+    Properties managerDistributedSystemProperties = createDistributedSystemProperties(GEMFIRE_MANAGER_NAME);
+    HeadlessGfsh gfsh = createDefaultSetup(managerDistributedSystemProperties);
+
+    assertNotNull(gfsh);
+    assertTrue(gfsh.isConnectedAndReady());
+
+    setupGemFire();
+    verifyGemFireSetup(createPeer(Host.getHost(0).getVM(0), managerDistributedSystemProperties));
+  }
+
+  private void setupGemFire() {
+    initializePeer(createPeer(Host.getHost(0).getVM(1), createDistributedSystemProperties(GEMFIRE_SERVER_NAME)));
+  }
+
+  protected Properties createDistributedSystemProperties(final String gemfireName) {
+    Properties distributedSystemProperties = new Properties();
+
+    distributedSystemProperties.setProperty(DistributionConfig.LOG_LEVEL_NAME, GEMFIRE_LOG_LEVEL);
+    distributedSystemProperties.setProperty(DistributionConfig.NAME_NAME, gemfireName);
+
+    return distributedSystemProperties;
+  }
+
+  protected Peer createPeer(final VM vm, final Properties distributedSystemProperties) {
+    return new Peer(vm, distributedSystemProperties);
+  }
+
+  protected void initializePeer(final Peer peer) {
+    peer.run(new SerializableRunnable(
+        String.format("Initializes the '%1$s' with the '%2$s' Region having a CacheLoader.", GEMFIRE_SERVER_NAME,
+            USERS_REGION_NAME)) {
+      @Override
+      public void run() {
+        // create the GemFire Distributed System with custom distribution configuration properties and settings
+        getSystem(peer.getConfiguration());
+
+        Cache cache = getCache();
+        RegionFactory<String, User> regionFactory = cache.createRegionFactory(RegionShortcut.REPLICATE);
+
+        regionFactory.setCacheLoader(new UserDataStoreCacheLoader());
+        regionFactory.setInitialCapacity(51);
+        regionFactory.setKeyConstraint(String.class);
+        regionFactory.setLoadFactor(0.75f);
+        regionFactory.setStatisticsEnabled(false);
+        regionFactory.setValueConstraint(User.class);
+
+        Region<String, User> users = regionFactory.create(USERS_REGION_NAME);
+
+        assertNotNull(users);
+        assertEquals("Users", users.getName());
+        assertEquals("/Users", users.getFullPath());
+        assertTrue(users.isEmpty());
+        assertNull(users.put("jonbloom", new User("jonbloom")));
+        assertFalse(users.isEmpty());
+        assertEquals(1, users.size());
+        assertEquals(new User("jonbloom"), users.get("jonbloom"));
+      }
+    });
+  }
+
+  private void verifyGemFireSetup(final Peer manager) {
+    manager.run(new SerializableRunnable("Verifies the GemFire Cluster was properly configured and initialized!") {
+      @Override
+      public void run() {
+        final ManagementService managementService = ManagementService.getExistingManagementService(getCache());
+
+        WaitCriterion waitOnManagerCriterion = new WaitCriterion() {
+          @Override
+          public boolean done() {
+            ManagerMXBean managerBean = managementService.getManagerMXBean();
+            DistributedRegionMXBean usersRegionBean = managementService.getDistributedRegionMXBean(
+                getRegionPath(USERS_REGION_NAME));
+
+            return !(managerBean == null || usersRegionBean == null);
+          }
+
+          @Override
+          public String description() {
+            return String.format("Probing for the GemFire Manager '%1$s' and '%2$s' Region MXBeans...",
+                manager.getName(), USERS_REGION_NAME);
+          }
+        };
+
+        DistributedTestCase.waitForCriterion(waitOnManagerCriterion, 30000, 2000, true);
+      }
+    });
+  }
+
+  protected void doHousekeeping() {
+    runCommand(CliStrings.LIST_MEMBER);
+
+    runCommand(new CommandStringBuilder(CliStrings.DESCRIBE_MEMBER).addOption(CliStrings.DESCRIBE_MEMBER__IDENTIFIER,
+        GEMFIRE_SERVER_NAME).toString());
+
+    runCommand(CliStrings.LIST_REGION);
+
+    runCommand(new CommandStringBuilder(CliStrings.DESCRIBE_REGION).addOption(CliStrings.DESCRIBE_REGION__NAME,
+        USERS_REGION_NAME).toString());
+  }
+
+  protected void log(final Result result) {
+    log("Result", toString(result));
+  }
+
+  protected void log(final String tag, final String message) {
+    //System.out.printf("%1$s (%2$s)%n", tag, message);
+    getLogWriter().info(String.format("%1$s (%2$s)%n", tag, message));
+  }
+
+  protected CommandResult runCommand(final String command) {
+    CommandResult result = executeCommand(command);
+
+    assertNotNull(result);
+    assertEquals(Result.Status.OK, result.getStatus());
+
+    log(result);
+
+    return result;
+  }
+
+  protected void assertResult(final boolean expectedResult, final CommandResult commandResult) {
+    if (ResultData.TYPE_COMPOSITE.equals(commandResult.getType())) {
+      boolean actualResult = (Boolean) ((CompositeResultData) commandResult.getResultData()).retrieveSectionByIndex(
+          0).retrieveObject("Result");
+      assertEquals(expectedResult, actualResult);
+    } else {
+      fail(String.format("Expected composite result data; but was '%1$s'!%n", commandResult.getType()));
+    }
+  }
+
+  public void testGetOnCacheMiss() {
+    doHousekeeping();
+
+    CommandStringBuilder command = new CommandStringBuilder(CliStrings.GET);
+    command.addOption(CliStrings.GET__REGIONNAME, USERS_REGION_NAME);
+    command.addOption(CliStrings.GET__KEY, "jonbloom");
+
+    assertResult(true, runCommand(command.toString()));
+
+    command = new CommandStringBuilder(CliStrings.GET);
+    command.addOption(CliStrings.GET__REGIONNAME, USERS_REGION_NAME);
+    command.addOption(CliStrings.GET__KEY, "jondoe");
+    command.addOption(CliStrings.GET__LOAD, "false");
+
+    assertResult(false, runCommand(command.toString()));
+
+    command = new CommandStringBuilder(CliStrings.GET);
+    command.addOption(CliStrings.GET__REGIONNAME, USERS_REGION_NAME);
+    command.addOption(CliStrings.GET__KEY, "jondoe");
+    command.addOption(CliStrings.GET__LOAD, "true");
+
+    assertResult(true, runCommand(command.toString()));
+
+    // NOTE test the unspecified default value for the --load-on-cache-miss
+    command = new CommandStringBuilder(CliStrings.GET);
+    command.addOption(CliStrings.GET__REGIONNAME, USERS_REGION_NAME);
+    command.addOption(CliStrings.GET__KEY, "janedoe");
+
+    assertResult(true, runCommand(command.toString()));
+
+    // NOTE now test an absolute cache miss both for in the Region as well as the CacheLoader
+    command = new CommandStringBuilder(CliStrings.GET);
+    command.addOption(CliStrings.GET__REGIONNAME, USERS_REGION_NAME);
+    command.addOption(CliStrings.GET__KEY, "nonexistinguser");
+    command.addOption(CliStrings.GET__LOAD, "true");
+
+    assertResult(false, runCommand(command.toString()));
+  }
+
+  protected static final class Peer implements Serializable {
+
+    private final Properties distributedSystemProperties;
+    private final VM vm;
+
+    public Peer(final VM vm, final Properties distributedSystemProperties) {
+      assert distributedSystemProperties != null : "The GemFire Distributed System configuration properties and settings cannot be null!";
+      this.vm = vm;
+      this.distributedSystemProperties = distributedSystemProperties;
+    }
+
+    public Properties getConfiguration() {
+      return this.distributedSystemProperties;
+    }
+
+    public String getName() {
+      return getConfiguration().getProperty(DistributionConfig.NAME_NAME);
+    }
+
+    public VM getVm() {
+      return vm;
+    }
+
+    public void run(final Runnable runnable) {
+      if (getVm() == null) {
+        runnable.run();
+      } else {
+        getVm().invoke(runnable);
+      }
+    }
+
+    @Override
+    public String toString() {
+      StringBuilder buffer = new StringBuilder(getClass().getSimpleName());
+
+      buffer.append(" {configuration = ").append(getConfiguration());
+      buffer.append(", name = ").append(getName());
+      buffer.append(", pid = ").append(getVm().getPid());
+      buffer.append("}");
+
+      return buffer.toString();
+    }
+  }
+
+  protected static class User implements Serializable {
+
+    private final String username;
+
+    public User(final String username) {
+      assert username != null : "The username cannot be null!";
+      this.username = username;
+    }
+
+    public String getUsername() {
+      return username;
+    }
+
+    @Override
+    public boolean equals(final Object obj) {
+      if (obj == this) {
+        return true;
+      }
+
+      if (!(obj instanceof User)) {
+        return false;
+      }
+
+      User that = (User) obj;
+
+      return this.getUsername().equals(that.getUsername());
+    }
+
+    @Override
+    public int hashCode() {
+      int hashValue = 17;
+      hashValue = 37 * hashValue + getUsername().hashCode();
+      return hashValue;
+    }
+
+    @Override
+    public String toString() {
+      return getUsername();
+    }
+  }
+
+  protected static class UserDataStoreCacheLoader implements CacheLoader<String, User>, Serializable {
+
+    private static final Map<String, User> userDataStore = new HashMap<String, User>(5);
+
+    static {
+      userDataStore.put("jackhandy", createUser("jackhandy"));
+      userDataStore.put("janedoe", createUser("janedoe"));
+      userDataStore.put("jondoe", createUser("jondoe"));
+      userDataStore.put("piedoe", createUser("piedoe"));
+      userDataStore.put("supertool", createUser("supertool"));
+    }
+
+    protected static User createUser(final String username) {
+      return new User(username);
+    }
+
+    @Override
+    public User load(final LoaderHelper<String, User> helper) throws CacheLoaderException {
+      return userDataStore.get(helper.getKey());
+    }
+
+    @Override
+    public void close() {
+      userDataStore.clear();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/IndexCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/IndexCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/IndexCommandsDUnitTest.java
new file mode 100644
index 0000000..bb99dc2
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/IndexCommandsDUnitTest.java
@@ -0,0 +1,817 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.DataPolicy;
+import com.gemstone.gemfire.cache.DiskStoreFactory;
+import com.gemstone.gemfire.cache.EvictionAction;
+import com.gemstone.gemfire.cache.EvictionAttributes;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.query.Index;
+import com.gemstone.gemfire.distributed.Locator;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.distributed.internal.InternalLocator;
+import com.gemstone.gemfire.distributed.internal.SharedConfiguration;
+import com.gemstone.gemfire.internal.AvailablePort;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.cli.Result.Status;
+import com.gemstone.gemfire.management.internal.cli.domain.Stock;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+import dunit.DistributedTestCase;
+import dunit.Host;
+import dunit.SerializableCallable;
+import dunit.SerializableRunnable;
+import dunit.VM;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Properties;
+
+public class IndexCommandsDUnitTest extends CliCommandTestBase {
+
+  private static final long serialVersionUID = 1L;
+  private static final String VM1Name = "VM1";
+  private static final String group1 = "G1";
+  private static final String indexName = "Id1";
+  private static final String parRegPersName = "ParRegPers";
+  private static final String repRegPersName = "RepRegPer";
+
+  public IndexCommandsDUnitTest(String name) {
+    super(name);
+    // TODO Auto-generated constructor stub
+  }
+
+  Region<?, ?> createParReg(String regionName, Cache cache, Class keyConstraint, Class valueConstraint) {
+    RegionFactory regionFactory = cache.createRegionFactory();
+    regionFactory.setDataPolicy(DataPolicy.PARTITION);
+    regionFactory.setKeyConstraint(keyConstraint);
+    regionFactory.setValueConstraint(valueConstraint);
+    return regionFactory.create(regionName);
+  }
+
+  private Region<?, ?> createParRegWithPersistence(String regionName, String diskStoreName, String diskDirName) {
+    Cache cache = getCache();
+    File diskStoreDirFile = new File(diskDirName);
+    diskStoreDirFile.deleteOnExit();
+
+    if (!diskStoreDirFile.exists()) {
+      diskStoreDirFile.mkdirs();
+    }
+
+    DiskStoreFactory diskStoreFactory = cache.createDiskStoreFactory();
+    diskStoreFactory.setDiskDirs(new File[]{diskStoreDirFile});
+    diskStoreFactory.setMaxOplogSize(1);
+    diskStoreFactory.setAllowForceCompaction(true);
+    diskStoreFactory.setAutoCompact(false);
+    diskStoreFactory.create(diskStoreName);
+
+    /****
+     * Eviction Attributes
+     */
+    EvictionAttributes ea = EvictionAttributes.createLRUEntryAttributes(1, EvictionAction.OVERFLOW_TO_DISK);
+
+    RegionFactory regionFactory = cache.createRegionFactory();
+    regionFactory.setDiskStoreName(diskStoreName);
+    regionFactory.setDiskSynchronous(true);
+    regionFactory.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+    regionFactory.setEvictionAttributes(ea);
+
+    return regionFactory.create(regionName);
+  }
+
+  private Region<?, ?> createRepRegWithPersistence(String regionName, String diskStoreName, String diskDirName) {
+    Cache cache = getCache();
+    File diskStoreDirFile = new File(diskDirName);
+    diskStoreDirFile.deleteOnExit();
+
+    if (!diskStoreDirFile.exists()) {
+      diskStoreDirFile.mkdirs();
+    }
+
+    DiskStoreFactory diskStoreFactory = cache.createDiskStoreFactory();
+    diskStoreFactory.setDiskDirs(new File[]{diskStoreDirFile});
+    diskStoreFactory.setMaxOplogSize(1);
+    diskStoreFactory.setAllowForceCompaction(true);
+    diskStoreFactory.setAutoCompact(false);
+    diskStoreFactory.create(diskStoreName);
+
+    /****
+     * Eviction Attributes
+     */
+    EvictionAttributes ea = EvictionAttributes.createLRUEntryAttributes(1, EvictionAction.OVERFLOW_TO_DISK);
+
+    RegionFactory regionFactory = cache.createRegionFactory();
+    regionFactory.setDiskStoreName(diskStoreName);
+    regionFactory.setDiskSynchronous(true);
+    regionFactory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
+    regionFactory.setEvictionAttributes(ea);
+
+    return regionFactory.create(regionName);
+  }
+
+  public void testCreateKeyIndexOnRegionWithPersistence() {
+    setupSystemPersist();
+
+    //Creating key indexes on Persistent Partitioned Region
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.CREATE_INDEX);
+    csb.addOption(CliStrings.CREATE_INDEX__NAME, "id1");
+    csb.addOption(CliStrings.CREATE_INDEX__EXPRESSION, "ty");
+    csb.addOption(CliStrings.CREATE_INDEX__REGION, "/" + parRegPersName);
+    csb.addOption(CliStrings.CREATE_INDEX__TYPE, "key");
+    String commandString = csb.toString();
+    writeToLog("Command String :\n ", commandString);
+    CommandResult commandResult = executeCommand(commandString);
+    String resultAsString = commandResultToString(commandResult);
+    writeToLog("Command Result :\n", resultAsString);
+    assertTrue(Status.OK.equals(commandResult.getStatus()));
+    //Creating key indexes on Persistent Replicated Regions
+    csb = new CommandStringBuilder(CliStrings.CREATE_INDEX);
+    csb.addOption(CliStrings.CREATE_INDEX__NAME, "id2");
+    csb.addOption(CliStrings.CREATE_INDEX__EXPRESSION, "ee");
+    csb.addOption(CliStrings.CREATE_INDEX__REGION, "/" + repRegPersName);
+    csb.addOption(CliStrings.CREATE_INDEX__TYPE, "key");
+    commandString = csb.toString();
+    writeToLog("Command String :\n ", commandString);
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    writeToLog("Command Result :\n", resultAsString);
+    assertTrue(Status.OK.equals(commandResult.getStatus()));
+  }
+
+  public void testCreateAndDestroyIndex() {
+    setupSystem();
+    /***
+     * Basic Create and Destroy 
+     */
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.CREATE_INDEX);
+    csb.addOption(CliStrings.CREATE_INDEX__NAME, indexName);
+    csb.addOption(CliStrings.CREATE_INDEX__EXPRESSION, "key");
+    csb.addOption(CliStrings.CREATE_INDEX__REGION, "/StocksParReg");
+
+    String commandString = csb.toString();
+    writeToLog("Command String :\n ", commandString);
+    CommandResult commandResult = executeCommand(commandString);
+    String resultAsString = commandResultToString(commandResult);
+    writeToLog("testCreateAndDestroyIndex", resultAsString);
+    assertEquals(Status.OK, commandResult.getStatus());
+
+    csb = new CommandStringBuilder(CliStrings.LIST_INDEX);
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    assertEquals(Status.OK, commandResult.getStatus());
+    assertTrue(resultAsString.contains(indexName));
+
+    csb = new CommandStringBuilder(CliStrings.DESTROY_INDEX);
+    csb.addOption(CliStrings.DESTROY_INDEX__NAME, indexName);
+    csb.addOption(CliStrings.DESTROY_INDEX__REGION, "/StocksParReg");
+    commandString = csb.toString();
+    writeToLog("Command String :\n ", commandString);
+
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    writeToLog("testCreateAndDestroyIndex", resultAsString);
+    assertEquals(commandResult.getStatus(), Status.OK);
+
+    commandResult = executeCommand(CliStrings.LIST_INDEX);
+    resultAsString = commandResultToString(commandResult);
+    assertEquals(commandResult.getStatus(), Status.OK);
+    assertFalse(resultAsString.contains(indexName));
+  }
+
+  public void testCreateIndexMultipleIterators() {
+    setupSystem();
+
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.CREATE_INDEX);
+    csb.addOption(CliStrings.CREATE_INDEX__NAME, indexName);
+    csb.addOption(CliStrings.CREATE_INDEX__EXPRESSION, "\"h.low\"");
+    csb.addOption(CliStrings.CREATE_INDEX__REGION, "\"/StocksParReg s, s.history h\"");
+
+    String commandString = csb.toString();
+    writeToLog("Command String :\n ", commandString);
+    CommandResult commandResult = executeCommand(commandString);
+    String resultAsString = commandResultToString(commandResult);
+    writeToLog("testCreateIndexMultipleIterators", resultAsString);
+    assertEquals(Status.OK, commandResult.getStatus());
+
+    csb = new CommandStringBuilder(CliStrings.LIST_INDEX);
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    writeToLog("testCreateIndexMultipleIterators", resultAsString);
+    assertEquals(Status.OK, commandResult.getStatus());
+    assertTrue(resultAsString.contains(indexName));
+  }
+
+  public void testCreateMultipleIndexes() {
+    setupSystem();
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.DEFINE_INDEX);
+    csb.addOption(CliStrings.CREATE_INDEX__NAME, indexName);
+    csb.addOption(CliStrings.CREATE_INDEX__EXPRESSION, "key");
+    csb.addOption(CliStrings.CREATE_INDEX__REGION, "/StocksParReg");
+
+    String commandString = csb.toString();
+    writeToLog("Command String :\n ", commandString);
+    CommandResult commandResult = executeCommand(commandString);
+    String resultAsString = commandResultToString(commandResult);
+    writeToLog("testCreateMultipleIndexes", resultAsString);
+    assertEquals(Status.OK, commandResult.getStatus());
+
+    csb = new CommandStringBuilder(CliStrings.DEFINE_INDEX);
+    csb.addOption(CliStrings.CREATE_INDEX__NAME, indexName + "2");
+    csb.addOption(CliStrings.CREATE_INDEX__EXPRESSION, "key");
+    csb.addOption(CliStrings.CREATE_INDEX__REGION, "/StocksParReg");
+
+    csb = new CommandStringBuilder(CliStrings.CREATE_DEFINED_INDEXES);
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    assertEquals(Status.OK, commandResult.getStatus());
+
+    csb = new CommandStringBuilder(CliStrings.LIST_INDEX);
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    assertEquals(Status.OK, commandResult.getStatus());
+    assertTrue(resultAsString.contains(indexName));
+  }
+
+  public void testClearMultipleIndexes() {
+    setupSystem();
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.DEFINE_INDEX);
+    csb.addOption(CliStrings.CREATE_INDEX__NAME, indexName);
+    csb.addOption(CliStrings.CREATE_INDEX__EXPRESSION, "key");
+    csb.addOption(CliStrings.CREATE_INDEX__REGION, "/StocksParReg");
+
+    String commandString = csb.toString();
+    writeToLog("Command String :\n ", commandString);
+    CommandResult commandResult = executeCommand(commandString);
+    String resultAsString = commandResultToString(commandResult);
+    writeToLog("testClearMultipleIndexes", resultAsString);
+    assertEquals(Status.OK, commandResult.getStatus());
+
+    csb = new CommandStringBuilder(CliStrings.DEFINE_INDEX);
+    csb.addOption(CliStrings.CREATE_INDEX__NAME, indexName + "2");
+    csb.addOption(CliStrings.CREATE_INDEX__EXPRESSION, "key");
+    csb.addOption(CliStrings.CREATE_INDEX__REGION, "/StocksParReg");
+
+    csb = new CommandStringBuilder(CliStrings.CLEAR_DEFINED_INDEXES);
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    assertEquals(Status.OK, commandResult.getStatus());
+
+    csb = new CommandStringBuilder(CliStrings.LIST_INDEX);
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    assertEquals(Status.OK, commandResult.getStatus());
+    assertTrue(!resultAsString.contains(indexName));
+  }
+
+  public void testCreateAndDestroyIndexOnMember() {
+    setupSystem();
+    /***
+     * Basic Create and Destroy 
+     */
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.CREATE_INDEX);
+    csb.addOption(CliStrings.CREATE_INDEX__NAME, indexName);
+    csb.addOption(CliStrings.CREATE_INDEX__EXPRESSION, "key");
+    csb.addOption(CliStrings.CREATE_INDEX__REGION, "/StocksParReg");
+    csb.addOption(CliStrings.CREATE_INDEX__MEMBER, VM1Name);
+    csb.addOption(CliStrings.CREATE_INDEX__TYPE, "key");
+
+    String commandString = csb.toString();
+    CommandResult commandResult = executeCommand(commandString);
+    String resultAsString = commandResultToString(commandResult);
+    writeToLog("Command String :\n ", commandString);
+    writeToLog("testCreateAndDestroyIndexOnMember", resultAsString);
+
+    assertEquals(Status.OK, commandResult.getStatus());
+
+    csb = new CommandStringBuilder(CliStrings.LIST_INDEX);
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    writeToLog("Command String :\n ", commandString);
+    writeToLog("testCreateAndDestroyIndexOnMember", resultAsString);
+    assertEquals(Status.OK, commandResult.getStatus());
+    assertTrue(resultAsString.contains(indexName));
+    assertTrue(resultAsString.contains(VM1Name));
+
+    csb = new CommandStringBuilder(CliStrings.DESTROY_INDEX);
+    csb.addOption(CliStrings.DESTROY_INDEX__NAME, indexName);
+    csb.addOption(CliStrings.DESTROY_INDEX__REGION, "/StocksParReg");
+    csb.addOption(CliStrings.DESTROY_INDEX__MEMBER, VM1Name);
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    writeToLog("Command String :\n ", commandString);
+    writeToLog("testCreateAndDestroyIndexOnMember", resultAsString);
+    assertEquals(commandResult.getStatus(), Status.OK);
+
+    commandResult = executeCommand(CliStrings.LIST_INDEX);
+    resultAsString = commandResultToString(commandResult);
+    writeToLog("Command String :\n ", commandString);
+    writeToLog("testCreateAndDestroyIndexOnMember", resultAsString);
+    assertEquals(commandResult.getStatus(), Status.OK);
+    assertFalse(resultAsString.contains(VM1Name));
+  }
+
+  public void testCreateAndDestroyIndexOnGroup() {
+    setupSystem();
+    /***
+     * Basic Create and Destroy 
+     */
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.CREATE_INDEX);
+    csb.addOption(CliStrings.CREATE_INDEX__NAME, indexName);
+    csb.addOption(CliStrings.CREATE_INDEX__EXPRESSION, "key");
+    csb.addOption(CliStrings.CREATE_INDEX__REGION, "/StocksParReg");
+    csb.addOption(CliStrings.CREATE_INDEX__TYPE, "hash");
+    csb.addOption(CliStrings.CREATE_INDEX__GROUP, group1);
+
+    String commandString = csb.toString();
+    CommandResult commandResult = executeCommand(commandString);
+    String resultAsString = commandResultToString(commandResult);
+    writeToLog("Command String :\n ", commandString);
+    writeToLog("testCreateAndDestroyIndexOnGroup", resultAsString);
+    assertEquals(Status.OK, commandResult.getStatus());
+
+    csb = new CommandStringBuilder(CliStrings.LIST_INDEX);
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    assertEquals(Status.OK, commandResult.getStatus());
+    assertEquals(true, resultAsString.contains(indexName));
+    assertEquals(true, resultAsString.contains(VM1Name));
+
+    csb = new CommandStringBuilder(CliStrings.DESTROY_INDEX);
+    csb.addOption(CliStrings.DESTROY_INDEX__NAME, indexName);
+    csb.addOption(CliStrings.DESTROY_INDEX__REGION, "/StocksParReg");
+    csb.addOption(CliStrings.DESTROY_INDEX__GROUP, group1);
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    writeToLog("Command String :\n ", commandString);
+    writeToLog("testCreateAndDestroyIndexOnGroup", resultAsString);
+    assertEquals(commandResult.getStatus(), Status.OK);
+
+    commandResult = executeCommand(CliStrings.LIST_INDEX);
+    resultAsString = commandResultToString(commandResult);
+    assertEquals(commandResult.getStatus(), Status.OK);
+    assertFalse(resultAsString.contains(VM1Name));
+
+    /***
+     * In case of a partitioned region , the index might get created on a 
+     * member which hosts the region and is not the member of the group1
+     */
+    if (resultAsString.contains(indexName)) {
+      csb = new CommandStringBuilder(CliStrings.DESTROY_INDEX);
+      csb.addOption(CliStrings.DESTROY_INDEX__NAME, indexName);
+      csb.addOption(CliStrings.DESTROY_INDEX__REGION, "/StocksParReg");
+      commandString = csb.toString();
+      commandResult = executeCommand(commandString);
+      resultAsString = commandResultToString(commandResult);
+      assertEquals(commandResult.getStatus(), Status.OK);
+
+      commandResult = executeCommand(CliStrings.LIST_INDEX);
+      resultAsString = commandResultToString(commandResult);
+      writeToLog("Command String :\n ", commandString);
+      writeToLog("testCreateAndDestroyIndexOnGroup", resultAsString);
+
+      assertEquals(commandResult.getStatus(), Status.OK);
+      assertFalse(resultAsString.contains(indexName));
+      assertTrue(resultAsString.contains(CliStrings.LIST_INDEX__INDEXES_NOT_FOUND_MESSAGE));
+    }
+  }
+
+  public void testCreateAndDestroyIndexWithIncorrectInput() {
+    setupSystem();
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.CREATE_INDEX);
+    csb.addOption(CliStrings.CREATE_INDEX__NAME, indexName);
+    csb.addOption(CliStrings.CREATE_INDEX__EXPRESSION, "key");
+    csb.addOption(CliStrings.CREATE_INDEX__REGION, "/StocksParReg");
+    csb.addOption(CliStrings.CREATE_INDEX__TYPE, "hash");
+    String commandString = csb.toString();
+    CommandResult commandResult = executeCommand(commandString);
+    String resultAsString = commandResultToString(commandResult);
+
+    assertEquals(commandResult.getStatus(), Status.OK);
+
+    //CREATE the same index 
+    csb = new CommandStringBuilder(CliStrings.CREATE_INDEX);
+    csb.addOption(CliStrings.CREATE_INDEX__NAME, indexName);
+    csb.addOption(CliStrings.CREATE_INDEX__EXPRESSION, "key");
+    csb.addOption(CliStrings.CREATE_INDEX__REGION, "/StocksParReg");
+    csb.addOption(CliStrings.CREATE_INDEX__TYPE, "hash");
+
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    assertTrue(commandResult.getStatus().equals(Status.ERROR));
+    //assertTrue(resultAsString.contains(CliStrings.format(CliStrings.CREATE_INDEX__NAME__CONFLICT, indexName)));
+    writeToLog("Command String :\n ", commandString);
+    writeToLog("testCreateAndDestroyIndexWithIncorrectInput", resultAsString);
+
+
+    //Create index on a wrong regionPath
+    csb = new CommandStringBuilder(CliStrings.CREATE_INDEX);
+    csb.addOption(CliStrings.CREATE_INDEX__NAME, indexName);
+    csb.addOption(CliStrings.CREATE_INDEX__EXPRESSION, "key");
+    csb.addOption(CliStrings.CREATE_INDEX__REGION, "/StocsParReg");
+    csb.addOption(CliStrings.CREATE_INDEX__TYPE, "hash");
+
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    writeToLog("Command String :\n ", commandString);
+    writeToLog("testCreateAndDestroyIndexWithIncorrectInput", resultAsString);
+    assertTrue(commandResult.getStatus().equals(Status.ERROR));
+    //assertTrue(resultAsString.contains(CliStrings.format(CliStrings.CREATE_INDEX__INVALID__REGIONPATH, "/StocsParReg")));
+
+    //Create index with wrong expression 
+    csb = new CommandStringBuilder(CliStrings.CREATE_INDEX);
+    csb.addOption(CliStrings.CREATE_INDEX__NAME, "Id2");
+    csb.addOption(CliStrings.CREATE_INDEX__EXPRESSION, "rey");
+    csb.addOption(CliStrings.CREATE_INDEX__REGION, "/StocksParReg");
+    csb.addOption(CliStrings.CREATE_INDEX__TYPE, "hash");
+
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    writeToLog("Command String :\n ", commandString);
+    writeToLog("testCreateAndDestroyIndexWithIncorrectInput", resultAsString);
+    assertTrue(commandResult.getStatus().equals(Status.ERROR));
+
+    //Create index with wrong type 
+    csb = new CommandStringBuilder(CliStrings.CREATE_INDEX);
+    csb.addOption(CliStrings.CREATE_INDEX__NAME, indexName);
+    csb.addOption(CliStrings.CREATE_INDEX__EXPRESSION, "key");
+    csb.addOption(CliStrings.CREATE_INDEX__REGION, "/StocksParReg");
+    csb.addOption(CliStrings.CREATE_INDEX__TYPE, "bash");
+
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    writeToLog("Command String :\n ", commandString);
+    writeToLog("testCreateAndDestroyIndexWithIncorrectInput", resultAsString);
+    assertTrue(resultAsString.contains(CliStrings.CREATE_INDEX__INVALID__INDEX__TYPE__MESSAGE));
+    assertTrue(commandResult.getStatus().equals(Status.ERROR));
+
+    //Destroy index with incorrect indexName 
+    csb = new CommandStringBuilder(CliStrings.DESTROY_INDEX);
+    csb.addOption(CliStrings.DESTROY_INDEX__NAME, "Id2");
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    writeToLog("Command String :\n ", commandString);
+    writeToLog("testCreateAndDestroyIndexWithIncorrectInput", resultAsString);
+    assertTrue(commandResult.getStatus().equals(Status.ERROR));
+    assertTrue(resultAsString.contains(CliStrings.format(CliStrings.DESTROY_INDEX__INDEX__NOT__FOUND, "Id2")));
+
+    //Destroy index with incorrect region 
+    csb = new CommandStringBuilder(CliStrings.DESTROY_INDEX);
+    csb.addOption(CliStrings.DESTROY_INDEX__NAME, indexName);
+    csb.addOption(CliStrings.DESTROY_INDEX__REGION, "Region");
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    writeToLog("Command String :\n ", commandString);
+    writeToLog("testCreateAndDestroyIndexWithIncorrectInput", resultAsString);
+    assertTrue(commandResult.getStatus().equals(Status.ERROR));
+    assertTrue(resultAsString.contains(CliStrings.format(CliStrings.DESTROY_INDEX__REGION__NOT__FOUND, "Region")));
+
+    //Destroy index with incorrect memberName
+    csb = new CommandStringBuilder(CliStrings.DESTROY_INDEX);
+    csb.addOption(CliStrings.DESTROY_INDEX__NAME, indexName);
+    csb.addOption(CliStrings.DESTROY_INDEX__REGION, "Region");
+    csb.addOption(CliStrings.DESTROY_INDEX__MEMBER, "wrongOne");
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    writeToLog("Command String :\n ", commandString);
+    writeToLog("testCreateAndDestroyIndexWithIncorrectInput", resultAsString);
+    assertTrue(commandResult.getStatus().equals(Status.ERROR));
+
+    //Destroy index with no option
+    csb = new CommandStringBuilder(CliStrings.DESTROY_INDEX);
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    writeToLog("Command String :\n ", commandString);
+    writeToLog("testCreateAndDestroyIndexWithIncorrectInput", resultAsString);
+    assertTrue(commandResult.getStatus().equals(Status.ERROR));
+  }
+
+  public void testDestroyIndexWithoutIndexName() {
+    setupSystem();
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.CREATE_INDEX);
+    csb.addOption(CliStrings.CREATE_INDEX__NAME, indexName);
+    csb.addOption(CliStrings.CREATE_INDEX__EXPRESSION, "key");
+    csb.addOption(CliStrings.CREATE_INDEX__REGION, "/StocksParReg");
+    csb.addOption(CliStrings.CREATE_INDEX__TYPE, "hash");
+    String commandString = csb.toString();
+    CommandResult commandResult = executeCommand(commandString);
+    String resultAsString = commandResultToString(commandResult);
+    assertEquals(commandResult.getStatus(), Status.OK);
+
+    csb = new CommandStringBuilder(CliStrings.LIST_INDEX);
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    assertEquals(Status.OK, commandResult.getStatus());
+    assertEquals(true, resultAsString.contains(indexName));
+    assertEquals(true, resultAsString.contains(VM1Name));
+
+    csb = new CommandStringBuilder(CliStrings.DESTROY_INDEX);
+    csb.addOption(CliStrings.DESTROY_INDEX__GROUP, group1);
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    assertEquals(Status.OK, commandResult.getStatus());
+
+    csb = new CommandStringBuilder(CliStrings.LIST_INDEX);
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    assertEquals(Status.OK, commandResult.getStatus());
+
+    csb = new CommandStringBuilder(CliStrings.CREATE_INDEX);
+    csb.addOption(CliStrings.CREATE_INDEX__NAME, indexName);
+    csb.addOption(CliStrings.CREATE_INDEX__EXPRESSION, "key");
+    csb.addOption(CliStrings.CREATE_INDEX__REGION, "/StocksParReg");
+    csb.addOption(CliStrings.CREATE_INDEX__TYPE, "hash");
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    assertEquals(Status.OK, commandResult.getStatus());
+
+    csb = new CommandStringBuilder(CliStrings.DESTROY_INDEX);
+    csb.addOption(CliStrings.DESTROY_INDEX__REGION, "StocksParReg");
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    assertEquals(Status.OK, commandResult.getStatus());
+
+    csb = new CommandStringBuilder(CliStrings.LIST_INDEX);
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    resultAsString = commandResultToString(commandResult);
+    assertEquals(Status.OK, commandResult.getStatus());
+    assertTrue(resultAsString.contains(CliStrings.LIST_INDEX__INDEXES_NOT_FOUND_MESSAGE));
+  }
+
+  /**
+   * Asserts that creating and destroying indexes correctly updates the shared configuration.
+   */
+  public void testCreateDestroyUpdatesSharedConfig() {
+    disconnectAllFromDS();
+
+    final String regionName = "testIndexSharedConfigRegion";
+    final String groupName = "testIndexSharedConfigGroup";
+
+    // Start the Locator and wait for shared configuration to be available
+    final int locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
+    Host.getHost(0).getVM(3).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+
+        final File locatorLogFile = new File("locator-" + locatorPort + ".log");
+        final Properties locatorProps = new Properties();
+        locatorProps.setProperty(DistributionConfig.NAME_NAME, "Locator");
+        locatorProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        locatorProps.setProperty(DistributionConfig.LOG_LEVEL_NAME, "fine");
+        locatorProps.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "true");
+        try {
+          final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locatorPort, locatorLogFile, null,
+              locatorProps);
+
+          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+            @Override
+            public boolean done() {
+              return locator.isSharedConfigurationRunning();
+            }
+
+            @Override
+            public String description() {
+              return "Waiting for shared configuration to be started";
+            }
+          };
+          DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+        } catch (IOException ioex) {
+          fail("Unable to create a locator with a shared configuration");
+        }
+      }
+    });
+
+    // Start the default manager
+    Properties managerProps = new Properties();
+    managerProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    managerProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+    createDefaultSetup(managerProps);
+
+    // Create a cache in VM 1
+    VM vm = Host.getHost(0).getVM(1);
+    vm.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        localProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, groupName);
+        getSystem(localProps);
+        assertNotNull(getCache());
+
+        Region parReg = createParReg(regionName, getCache(), String.class, Stock.class);
+        parReg.put("VMW", new Stock("VMW", 98));
+      }
+    });
+
+    // Test creating the index
+    CommandStringBuilder commandStringBuilder = new CommandStringBuilder(CliStrings.CREATE_INDEX);
+    commandStringBuilder.addOption(CliStrings.CREATE_INDEX__EXPRESSION, "key");
+    commandStringBuilder.addOption(CliStrings.CREATE_INDEX__NAME, indexName);
+    commandStringBuilder.addOption(CliStrings.CREATE_INDEX__GROUP, groupName);
+    commandStringBuilder.addOption(CliStrings.CREATE_INDEX__REGION, "\"/" + regionName + " p\"");
+    CommandResult cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    // Make sure the index exists in the shared config
+    Host.getHost(0).getVM(3).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        SharedConfiguration sharedConfig = ((InternalLocator) Locator.getLocator()).getSharedConfiguration();
+        String xmlFromConfig;
+        try {
+          xmlFromConfig = sharedConfig.getConfiguration(groupName).getCacheXmlContent();
+          assertTrue(xmlFromConfig.contains(indexName));
+        } catch (Exception e) {
+          fail("Error occurred in cluster configuration service", e);
+        }
+      }
+    });
+
+    //Restart a member and make sure he gets the shared configuration
+    vm = Host.getHost(0).getVM(1);
+    vm.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        getCache().close();
+
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        localProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, groupName);
+        localProps.setProperty(DistributionConfig.USE_CLUSTER_CONFIGURATION_NAME, "true");
+        getSystem(localProps);
+        Cache cache = getCache();
+        assertNotNull(cache);
+        Region region = cache.getRegion(regionName);
+        assertNotNull(region);
+        Index index = cache.getQueryService().getIndex(region, indexName);
+        assertNotNull(index);
+      }
+    });
+
+    // Test destroying the index
+    commandStringBuilder = new CommandStringBuilder(CliStrings.DESTROY_INDEX);
+    commandStringBuilder.addOption(CliStrings.DESTROY_INDEX__NAME, indexName);
+    commandStringBuilder.addOption(CliStrings.DESTROY_INDEX__GROUP, groupName);
+    commandStringBuilder.addOption(CliStrings.DESTROY_INDEX__REGION, "/" + regionName);
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    // Make sure the index was removed from the shared config
+    Host.getHost(0).getVM(3).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        SharedConfiguration sharedConfig = ((InternalLocator) Locator.getLocator()).getSharedConfiguration();
+        String xmlFromConfig;
+        try {
+          xmlFromConfig = sharedConfig.getConfiguration(groupName).getCacheXmlContent();
+          assertFalse(xmlFromConfig.contains(indexName));
+        } catch (Exception e) {
+          fail("Error occurred in cluster configuration service", e);
+        }
+      }
+    });
+
+    //Restart the data member cache to make sure that the index is destroyed.
+    vm = Host.getHost(0).getVM(1);
+    vm.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        getCache().close();
+
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        localProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, groupName);
+        localProps.setProperty(DistributionConfig.USE_CLUSTER_CONFIGURATION_NAME, "true");
+        getSystem(localProps);
+        Cache cache = getCache();
+        assertNotNull(cache);
+        Region region = cache.getRegion(regionName);
+        assertNotNull(region);
+        Index index = cache.getQueryService().getIndex(region, indexName);
+        assertNull(index);
+      }
+    });
+  }
+
+  private void writeToLog(String text, String resultAsString) {
+    getLogWriter().info(testName + "\n");
+    getLogWriter().info(resultAsString);
+  }
+
+  private void setupSystem() {
+    disconnectAllFromDS();
+    createDefaultSetup(null);
+    final String parRegName = "StocksParReg";
+
+    final VM manager = Host.getHost(0).getVM(0);
+    final VM vm1 = Host.getHost(0).getVM(1);
+
+    manager.invoke(new SerializableCallable() {
+      public Object call() {
+        Region parReg = createParReg(parRegName, getCache(), String.class, Stock.class);
+        parReg.put("VMW", new Stock("VMW", 98));
+        return parReg.put("APPL", new Stock("APPL", 600));
+      }
+    });
+
+    vm1.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        Properties props = new Properties();
+        props.setProperty(DistributionConfig.NAME_NAME, VM1Name);
+        props.setProperty(DistributionConfig.GROUPS_NAME, group1);
+        getSystem(props);
+        Region parReg = createParReg(parRegName, getCache(), String.class, Stock.class);
+        parReg.put("MSFT", new Stock("MSFT", 27));
+        return parReg.put("GOOG", new Stock("GOOG", 540));
+      }
+    });
+  }
+
+  private void setupSystemPersist() {
+    disconnectAllFromDS();
+    createDefaultSetup(null);
+    final String parRegName = "StocksParReg";
+
+    final VM manager = Host.getHost(0).getVM(0);
+    final VM vm1 = Host.getHost(0).getVM(1);
+
+    manager.invoke(new SerializableCallable() {
+      public Object call() {
+        Region parReg = createParReg(parRegName, getCache(), String.class, Stock.class);
+        parReg.put("VMW", new Stock("VMW", 98));
+        Region parRegPers = createParRegWithPersistence(parRegPersName, "testCreateIndexDiskstore1",
+            "testCreateIndexDiskDir1");
+        Region repRegPers = createRepRegWithPersistence(repRegPersName, "testCreateIndexDiskstore1",
+            "testCreateIndexDiskDir1");
+        return parReg.put("APPL", new Stock("APPL", 600));
+      }
+    });
+
+    vm1.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        Properties props = new Properties();
+        props.setProperty(DistributionConfig.NAME_NAME, VM1Name);
+        props.setProperty(DistributionConfig.GROUPS_NAME, group1);
+        getSystem(props);
+        Region parReg = createParReg(parRegName, getCache(), String.class, Stock.class);
+        parReg.put("MSFT", new Stock("MSFT", 27));
+        Region parRegPers = createParRegWithPersistence(parRegPersName, "testCreateIndexDiskstore2",
+            "testCreateIndexDiskDir2");
+        Region repRegPers = createRepRegWithPersistence(repRegPersName, "testCreateIndexDiskstore2",
+            "testCreateIndexDiskDir2");
+        return parReg.put("GOOG", new Stock("GOOG", 540));
+      }
+    });
+  }
+
+  @Override
+  public void tearDown2() throws Exception {
+    super.tearDown2();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListAndDescribeDiskStoreCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListAndDescribeDiskStoreCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListAndDescribeDiskStoreCommandsDUnitTest.java
new file mode 100644
index 0000000..e7bc575
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListAndDescribeDiskStoreCommandsDUnitTest.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.DataPolicy;
+import com.gemstone.gemfire.cache.DiskStore;
+import com.gemstone.gemfire.cache.DiskStoreFactory;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.Scope;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import dunit.Host;
+import dunit.SerializableRunnable;
+import dunit.VM;
+
+import java.io.Serializable;
+import java.util.Properties;
+
+/**
+ * The ListAndDescribeDiskStoreCommandsDUnitTest class is a test suite of functional tests cases testing the proper
+ * functioning of the 'list disk-store' and 'describe disk-store' commands. </p>
+ *
+ * @author John Blum
+ * @see com.gemstone.gemfire.management.internal.cli.commands.CliCommandTestBase
+ * @see com.gemstone.gemfire.management.internal.cli.commands.DiskStoreCommands
+ * @since 7.0
+ */
+public class ListAndDescribeDiskStoreCommandsDUnitTest extends CliCommandTestBase {
+
+  protected static String toString(final Result result) {
+    assert result != null : "The Result object from the command execution cannot be null!";
+
+    final StringBuilder buffer = new StringBuilder(System.getProperty("line.separator"));
+
+    while (result.hasNextLine()) {
+      buffer.append(result.nextLine());
+      buffer.append(System.getProperty("line.separator"));
+    }
+
+    return buffer.toString().trim();
+  }
+
+  public ListAndDescribeDiskStoreCommandsDUnitTest(final String testName) {
+    super(testName);
+  }
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    createDefaultSetup(null);
+    setupGemFire();
+  }
+
+  @Override
+  public void tearDown2() throws Exception {
+    super.tearDown2();
+  }
+
+  protected Peer createPeer(final Properties distributedSystemConfiguration, final VM vm) {
+    return new Peer(distributedSystemConfiguration, vm);
+  }
+
+  protected void setupGemFire() throws Exception {
+    final Host host = Host.getHost(0);
+
+    final VM vm1 = host.getVM(1);
+    final VM vm2 = host.getVM(2);
+
+    final Peer peer1 = createPeer(createDistributedSystemProperties("consumerServer"), vm1);
+    final Peer peer2 = createPeer(createDistributedSystemProperties("producerServer"), vm2);
+
+    createPersistentRegion(peer1, "consumers", "consumerData");
+    createPersistentRegion(peer1, "observers", "observerData");
+    createPersistentRegion(peer2, "producer", "producerData");
+    createPersistentRegion(peer2, "producer-factory", "producerData");
+  }
+
+  protected Properties createDistributedSystemProperties(final String gemfireName) {
+    final Properties distributedSystemProperties = new Properties();
+
+    distributedSystemProperties.setProperty(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+    distributedSystemProperties.setProperty(DistributionConfig.NAME_NAME, gemfireName);
+
+    return distributedSystemProperties;
+  }
+
+  protected void createPersistentRegion(final Peer peer, final String regionName, final String diskStoreName) {
+    peer.run(new SerializableRunnable("Creating Persistent Region for Member " + peer.getName()) {
+      @Override
+      public void run() {
+        getSystem(peer.getDistributedSystemConfiguration());
+
+        final Cache cache = getCache();
+
+        DiskStore diskStore = cache.findDiskStore(diskStoreName);
+
+        if (diskStore == null) {
+          final DiskStoreFactory diskStoreFactory = cache.createDiskStoreFactory();
+          diskStoreFactory.setDiskDirs(getDiskDirs());
+          diskStore = diskStoreFactory.create(diskStoreName);
+        }
+
+        final RegionFactory regionFactory = cache.createRegionFactory();
+
+        regionFactory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
+        regionFactory.setDiskStoreName(diskStore.getName());
+        regionFactory.setScope(Scope.DISTRIBUTED_NO_ACK);
+        regionFactory.create(regionName);
+      }
+    });
+  }
+
+  public void testListDiskStore() throws Exception {
+    final Result result = executeCommand(CliStrings.LIST_DISK_STORE);
+
+    assertNotNull(result);
+    getLogWriter().info(toString(result));
+    assertEquals(Result.Status.OK, result.getStatus());
+  }
+
+  public void testDescribeDiskStore() throws Exception {
+    final Result result = executeCommand(
+        CliStrings.DESCRIBE_DISK_STORE + " --member=producerServer --name=producerData");
+
+    assertNotNull(result);
+    getLogWriter().info(toString(result));
+    assertEquals(Result.Status.OK, result.getStatus());
+  }
+
+  public void testDescribeDiskStoreWithInvalidMemberName() throws Exception {
+    final Result commandResult = executeCommand(
+        CliStrings.DESCRIBE_DISK_STORE + " --member=badMemberName --name=producerData");
+
+    assertNotNull(commandResult);
+    assertEquals(Result.Status.ERROR, commandResult.getStatus());
+    assertEquals(CliStrings.format(CliStrings.MEMBER_NOT_FOUND_ERROR_MESSAGE, "badMemberName"),
+        toString(commandResult));
+  }
+
+  public void testDescribeDiskStoreWithInvalidDiskStoreName() {
+    final Result commandResult = executeCommand(
+        CliStrings.DESCRIBE_DISK_STORE + " --member=producerServer --name=badDiskStoreName");
+
+    assertNotNull(commandResult);
+    assertEquals(Result.Status.ERROR, commandResult.getStatus());
+    assertEquals("A disk store with name (badDiskStoreName) was not found on member (producerServer).",
+        toString(commandResult));
+  }
+
+  protected static class Peer implements Serializable {
+
+    private final Properties distributedSystemConfiguration;
+    private final VM vm;
+
+    protected Peer(final Properties distributedSystemConfiguration, final VM vm) {
+      assert distributedSystemConfiguration != null : "The GemFire distributed system configuration properties cannot be null!";
+      this.distributedSystemConfiguration = distributedSystemConfiguration;
+      this.vm = vm;
+    }
+
+    public Properties getDistributedSystemConfiguration() {
+      return distributedSystemConfiguration;
+    }
+
+    public String getName() {
+      return getDistributedSystemConfiguration().getProperty(DistributionConfig.NAME_NAME);
+    }
+
+    public VM getVm() {
+      return vm;
+    }
+
+    public void run(final Runnable runnable) {
+      if (getVm() == null) {
+        runnable.run();
+      } else {
+        getVm().invoke(runnable);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListAndDescribeRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListAndDescribeRegionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListAndDescribeRegionDUnitTest.java
new file mode 100644
index 0000000..5c172cc
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListAndDescribeRegionDUnitTest.java
@@ -0,0 +1,320 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.EvictionAction;
+import com.gemstone.gemfire.cache.EvictionAttributes;
+import com.gemstone.gemfire.cache.FixedPartitionAttributes;
+import com.gemstone.gemfire.cache.PartitionAttributes;
+import com.gemstone.gemfire.cache.PartitionAttributesFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
+import com.gemstone.gemfire.compression.SnappyCompressor;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.internal.cache.RegionEntryContext;
+import com.gemstone.gemfire.management.cli.Result.Status;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+import com.gemstone.gemfire.management.internal.cli.util.RegionAttributesNames;
+import dunit.Host;
+import dunit.SerializableRunnable;
+import dunit.VM;
+
+import java.util.Properties;
+
+public class ListAndDescribeRegionDUnitTest extends CliCommandTestBase {
+
+  public ListAndDescribeRegionDUnitTest(String name) {
+    super(name);
+    // TODO Auto-generated constructor stub
+  }
+
+  private static final String REGION1 = "region1";
+  private static final String REGION2 = "region2";
+  private static final String REGION3 = "region3";
+  private static final String SUBREGION1A = "subregion1A";
+  private static final String SUBREGION1B = "subregion1B";
+  private static final String SUBREGION1C = "subregion1C";
+  private static final String PR1 = "PR1";
+  private static final String LOCALREGIONONMANAGER = "LocalRegionOnManager";
+
+
+  static class CacheListener2 extends CacheListenerAdapter {
+  }
+
+  static class CacheListener1 extends CacheListenerAdapter {
+  }
+
+  private Properties createProperties(String name, String groups) {
+    Properties props = new Properties();
+    props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    props.setProperty(DistributionConfig.LOG_LEVEL_NAME, "info");
+    props.setProperty(DistributionConfig.STATISTIC_SAMPLING_ENABLED_NAME, "true");
+    props.setProperty(DistributionConfig.ENABLE_TIME_STATISTICS_NAME, "true");
+    props.setProperty(DistributionConfig.NAME_NAME, name);
+    props.setProperty(DistributionConfig.GROUPS_NAME, groups);
+    return props;
+  }
+
+  private void createPartitionedRegion1() {
+    final Cache cache = getCache();
+    // Create the data region
+    RegionFactory<String, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+    dataRegionFactory.create(PR1);
+  }
+
+  private void setupSystem() {
+    final Properties managerProps = createProperties("Manager", "G1");
+    createDefaultSetup(managerProps);
+
+    final Properties server1Props = createProperties("Server1", "G2");
+    final Host host = Host.getHost(0);
+    final VM[] servers = {host.getVM(0), host.getVM(1)};
+
+    //The mananger VM
+    servers[0].invoke(new SerializableRunnable() {
+      public void run() {
+        final Cache cache = getCache();
+        RegionFactory<String, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+        dataRegionFactory.setConcurrencyLevel(4);
+        EvictionAttributes ea = EvictionAttributes.createLIFOEntryAttributes(100, EvictionAction.LOCAL_DESTROY);
+        dataRegionFactory.setEvictionAttributes(ea);
+        dataRegionFactory.setEnableAsyncConflation(true);
+
+        FixedPartitionAttributes fpa = FixedPartitionAttributes.createFixedPartition("Par1", true);
+        PartitionAttributes pa = new PartitionAttributesFactory().setLocalMaxMemory(100).setRecoveryDelay(
+            2).setTotalMaxMemory(200).setRedundantCopies(1).addFixedPartitionAttributes(fpa).create();
+        dataRegionFactory.setPartitionAttributes(pa);
+
+        dataRegionFactory.create(PR1);
+        createLocalRegion(LOCALREGIONONMANAGER);
+      }
+    });
+
+    servers[1].invoke(new SerializableRunnable() {
+      public void run() {
+        getSystem(server1Props);
+        final Cache cache = getCache();
+        RegionFactory<String, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+        dataRegionFactory.setConcurrencyLevel(4);
+        EvictionAttributes ea = EvictionAttributes.createLIFOEntryAttributes(100, EvictionAction.LOCAL_DESTROY);
+        dataRegionFactory.setEvictionAttributes(ea);
+        dataRegionFactory.setEnableAsyncConflation(true);
+
+        FixedPartitionAttributes fpa = FixedPartitionAttributes.createFixedPartition("Par2", 4);
+        PartitionAttributes pa = new PartitionAttributesFactory().setLocalMaxMemory(150).setRecoveryDelay(
+            4).setTotalMaxMemory(200).setRedundantCopies(1).addFixedPartitionAttributes(fpa).create();
+        dataRegionFactory.setPartitionAttributes(pa);
+
+        dataRegionFactory.create(PR1);
+        createRegionsWithSubRegions();
+      }
+    });
+  }
+
+  private void createPartitionedRegion(String regionName) {
+
+    final Cache cache = getCache();
+    // Create the data region
+    RegionFactory<String, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+    dataRegionFactory.setConcurrencyLevel(4);
+    EvictionAttributes ea = EvictionAttributes.createLIFOEntryAttributes(100, EvictionAction.LOCAL_DESTROY);
+    dataRegionFactory.setEvictionAttributes(ea);
+    dataRegionFactory.setEnableAsyncConflation(true);
+
+    FixedPartitionAttributes fpa = FixedPartitionAttributes.createFixedPartition("Par1", true);
+    PartitionAttributes pa = new PartitionAttributesFactory().setLocalMaxMemory(100).setRecoveryDelay(
+        2).setTotalMaxMemory(200).setRedundantCopies(1).addFixedPartitionAttributes(fpa).create();
+    dataRegionFactory.setPartitionAttributes(pa);
+    dataRegionFactory.addCacheListener(new CacheListener1());
+    dataRegionFactory.addCacheListener(new CacheListener2());
+    dataRegionFactory.create(regionName);
+  }
+
+
+  private void createLocalRegion(final String regionName) {
+    final Cache cache = getCache();
+    // Create the data region
+    RegionFactory<String, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.LOCAL);
+    dataRegionFactory.create(regionName);
+  }
+
+  /**
+   * Creates a region that uses compression on region entry values.
+   *
+   * @param regionName a unique region name.
+   */
+  private void createCompressedRegion(final String regionName) {
+    final Cache cache = getCache();
+
+    RegionFactory<String, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.REPLICATE);
+    dataRegionFactory.setCompressor(SnappyCompressor.getDefaultInstance());
+    dataRegionFactory.create(regionName);
+  }
+
+  @SuppressWarnings("deprecation")
+  private void createRegionsWithSubRegions() {
+    final Cache cache = getCache();
+
+    RegionFactory<String, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.REPLICATE);
+    dataRegionFactory.setConcurrencyLevel(3);
+    Region<String, Integer> region1 = dataRegionFactory.create(REGION1);
+    region1.createSubregion(SUBREGION1C, region1.getAttributes());
+    Region<String, Integer> subregion2 = region1.createSubregion(SUBREGION1A, region1.getAttributes());
+
+    subregion2.createSubregion(SUBREGION1B, subregion2.getAttributes());
+    dataRegionFactory.create(REGION2);
+    dataRegionFactory.create(REGION3);
+  }
+
+
+  public void testListRegion() {
+    setupSystem();
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.LIST_REGION);
+    String commandString = csb.toString();
+    CommandResult commandResult = executeCommand(commandString);
+    String commandResultAsString = commandResultToString(commandResult);
+    getLogWriter().info("Command String : " + commandString);
+    getLogWriter().info("Output : \n" + commandResultAsString);
+    assertEquals(Status.OK, commandResult.getStatus());
+    assertTrue(commandResultAsString.contains(PR1));
+    assertTrue(commandResultAsString.contains(LOCALREGIONONMANAGER));
+    assertTrue(commandResultAsString.contains(REGION1));
+    assertTrue(commandResultAsString.contains(REGION2));
+    assertTrue(commandResultAsString.contains(REGION3));
+
+
+    csb = new CommandStringBuilder(CliStrings.LIST_REGION);
+    csb.addOption(CliStrings.LIST_REGION__MEMBER, "Manager");
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    commandResultAsString = commandResultToString(commandResult);
+    getLogWriter().info("Command String : " + commandString);
+    getLogWriter().info("Output : \n" + commandResultAsString);
+    assertEquals(Status.OK, commandResult.getStatus());
+    assertTrue(commandResultAsString.contains(PR1));
+    assertTrue(commandResultAsString.contains(LOCALREGIONONMANAGER));
+
+    csb = new CommandStringBuilder(CliStrings.LIST_REGION);
+    csb.addOption(CliStrings.LIST_REGION__MEMBER, "Server1");
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    commandResultAsString = commandResultToString(commandResult);
+    getLogWriter().info("Command String : " + commandString);
+    getLogWriter().info("Output : \n" + commandResultAsString);
+    assertEquals(Status.OK, commandResult.getStatus());
+    assertTrue(commandResultAsString.contains(PR1));
+    assertTrue(commandResultAsString.contains(REGION1));
+    assertTrue(commandResultAsString.contains(REGION2));
+    assertTrue(commandResultAsString.contains(REGION3));
+    assertTrue(commandResultAsString.contains(SUBREGION1A));
+
+    csb = new CommandStringBuilder(CliStrings.LIST_REGION);
+    csb.addOption(CliStrings.LIST_REGION__GROUP, "G1");
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    commandResultAsString = commandResultToString(commandResult);
+    getLogWriter().info("Command String : " + commandString);
+    getLogWriter().info("Output : \n" + commandResultAsString);
+    assertEquals(Status.OK, commandResult.getStatus());
+    assertTrue(commandResultAsString.contains(PR1));
+    assertTrue(commandResultAsString.contains(LOCALREGIONONMANAGER));
+
+    csb = new CommandStringBuilder(CliStrings.LIST_REGION);
+    csb.addOption(CliStrings.LIST_REGION__GROUP, "G2");
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    commandResultAsString = commandResultToString(commandResult);
+    getLogWriter().info("Command String : " + commandString);
+    getLogWriter().info("Output : \n" + commandResultAsString);
+    assertEquals(Status.OK, commandResult.getStatus());
+    assertTrue(commandResultAsString.contains(PR1));
+    assertTrue(commandResultAsString.contains(REGION1));
+    assertTrue(commandResultAsString.contains(REGION2));
+    assertTrue(commandResultAsString.contains(REGION3));
+    assertTrue(commandResultAsString.contains(SUBREGION1A));
+  }
+
+  public void testDescribeRegion() {
+    setupSystem();
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.DESCRIBE_REGION);
+    csb.addOption(CliStrings.DESCRIBE_REGION__NAME, PR1);
+    String commandString = csb.toString();
+    CommandResult commandResult = executeCommand(commandString);
+    String commandResultAsString = commandResultToString(commandResult);
+    getLogWriter().info("Command String : " + commandString);
+    getLogWriter().info("Output : \n" + commandResultAsString);
+    assertEquals(Status.OK, commandResult.getStatus());
+    assertTrue(commandResultAsString.contains(PR1));
+    assertTrue(commandResultAsString.contains("Server1"));
+
+    csb = new CommandStringBuilder(CliStrings.DESCRIBE_REGION);
+    csb.addOption(CliStrings.DESCRIBE_REGION__NAME, LOCALREGIONONMANAGER);
+    commandString = csb.toString();
+    commandResult = executeCommand(commandString);
+    commandResultAsString = commandResultToString(commandResult);
+    getLogWriter().info("Command String : " + commandString);
+    getLogWriter().info("Output : \n" + commandResultAsString);
+    assertEquals(Status.OK, commandResult.getStatus());
+    assertTrue(commandResultAsString.contains(LOCALREGIONONMANAGER));
+    assertTrue(commandResultAsString.contains("Manager"));
+  }
+
+  /**
+   * Asserts that a describe region command issued on a region with compression returns the correct non default region
+   * attribute for compression and the correct codec value.
+   */
+  public void testDescribeRegionWithCompressionCodec() {
+    final String regionName = "compressedRegion";
+    VM vm = Host.getHost(0).getVM(1);
+
+    setupSystem();
+
+    // Create compressed region
+    vm.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        createCompressedRegion(regionName);
+      }
+    });
+
+    // Test the describe command; look for compression
+    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.DESCRIBE_REGION);
+    csb.addOption(CliStrings.DESCRIBE_REGION__NAME, regionName);
+    String commandString = csb.toString();
+    CommandResult commandResult = executeCommand(commandString);
+    String commandResultAsString = commandResultToString(commandResult);
+    assertEquals(Status.OK, commandResult.getStatus());
+    assertTrue(commandResultAsString.contains(regionName));
+    assertTrue(commandResultAsString.contains(RegionAttributesNames.COMPRESSOR));
+    assertTrue(commandResultAsString.contains(RegionEntryContext.DEFAULT_COMPRESSION_PROVIDER));
+
+    // Destroy compressed region
+    vm.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        Region region = getCache().getRegion(regionName);
+        assertNotNull(region);
+        region.destroyRegion();
+      }
+    });
+  }
+}


[08/50] [abbrv] incubator-geode git commit: GEODE-53 - Applying latest feedback from mailing list:

Posted by kl...@apache.org.
GEODE-53 - Applying latest feedback from mailing list:

 - Moving mailing lists to top-level menu and up in the community page
 - Removed logos and Tools section
 - Changed mailing list archive link from ASF to markmail (better UI)
 - Merged fixes from PR #47


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/d16e78d1
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/d16e78d1
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/d16e78d1

Branch: refs/heads/feature/GEODE-291
Commit: d16e78d1db74af2de0e03502078650850c5b48aa
Parents: 31e85e1
Author: William Markito <wm...@pivotal.io>
Authored: Mon Dec 7 16:05:52 2015 -0800
Committer: William Markito <wm...@pivotal.io>
Committed: Mon Dec 7 16:05:52 2015 -0800

----------------------------------------------------------------------
 gemfire-site/content/community/index.html       | 68 ++++++++++----------
 gemfire-site/content/index.html                 |  9 +--
 gemfire-site/content/releases/index.html        |  3 +-
 .../website/content/community/index.html        | 65 +++++++++----------
 gemfire-site/website/content/index.html         |  6 +-
 gemfire-site/website/layouts/header.html        |  3 +-
 6 files changed, 78 insertions(+), 76 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d16e78d1/gemfire-site/content/community/index.html
----------------------------------------------------------------------
diff --git a/gemfire-site/content/community/index.html b/gemfire-site/content/community/index.html
index caf98e2..b1d0783 100644
--- a/gemfire-site/content/community/index.html
+++ b/gemfire-site/content/community/index.html
@@ -223,7 +223,8 @@
                 <li><a href="https://issues.apache.org/jira/browse/GEODE/"
                        target="_blank"><span class="icns icon-bug"></span></a></li>
                 <li><a href="http://stackoverflow.com/search?q=Apache%20Geode" target="_blank"><span class="icns icon-stackexchange"></span></a></li>
-                <li><a href="/community/#live"><span class="icns icon-comments"></span></a></li>
+                <li><a href="/community/#mailing-lists"><span class="icns icon-envelope"></span></a></li>
+                <!-- <li><a href="/community/#live"><span class="icns icon-comments"></span></a></li> -->
                 <li><a href="https://twitter.com/apachegeode" target="_blank"><span class="icns icon-twitter"></span></a></li>
                 <li><a href="https://cwiki.apache.org/confluence/display/geode/" target="_blank"><span class="icns icon-edit"></span></a></li>
                 <li><a href="/releases/"><span class="icns icon-releases"></span></a></li>
@@ -250,6 +251,35 @@
 	</div>
 </section>
 
+<section class="bf-community">
+    <div class="container">
+    	<div class="row">
+    	    <div class="col-md-12">
+    	    	<h2 class="icns-envelope" id="mailing-lists"><span>Mailing-lists</span></h2>
+			</div>
+		</div>
+		<div class="row">
+	    	<div class="col-md-4">
+	    		<h3>Users</h3>
+	    		<p><em>Perfect if you build apps against Apache Geode or deploy Apache Geode.</em></p>
+	    		<p>To subscribe, send a blank email to<br/><a href="mailto:user-subscribe@geode.incubator.apache.org">user-subscribe@geode.incubator.apache.org</a>.</p>
+	    		<p>You can also <a href="http://markmail.org/search/?q=list%3Aorg.apache.geode.user+order%3Adate-backward">read the archives</a>.</p>
+			</div>
+	    	<div class="col-md-4">
+	    		<h3>Dev</h3>
+	    		<p><em>If you are building contributions & modifications to Apache Geode this is the list for you.</em><p>
+	    		<p>To subscribe, send a blank email to<br/><a href="mailto:dev-subscribe@geode.incubator.apache.org">dev-subscribe@geode.incubator.apache.org</a>.</p>
+	    		<p>You can also <a href="http://markmail.org/search/?q=list%3Aorg.apache.geode.dev+order%3Adate-backward">read the archives</a>.</p>
+			</div>
+	    	<div class="col-md-4">
+	    		<h3>Commits</h3>
+	    		<p><em>This list receives an email whenever new code is contributed to Apache Geode.</em><p>
+	    		<p>To subscribe, send a blank email to<br/><a href="mailto:commits-subscribe@geode.incubator.apache.org">commits-subscribe@geode.incubator.apache.org</a>.</p>
+	    		<p>You can also <a href="http://markmail.org/search/?q=list%3Aorg.apache.geode.commits+order%3Adate-backward">read the archives</a>.</p>
+			</div>
+		</div>
+	</div>
+</section>
 
 <section class="bf-community">
     <div class="container">
@@ -341,36 +371,6 @@
     <div class="container">
     	<div class="row">
     	    <div class="col-md-12">
-    	    	<h2 class="icns-envelope" id="mailing-lists"><span>Mailing-lists</span></h2>
-			</div>
-		</div>
-		<div class="row">
-	    	<div class="col-md-4">
-	    		<h3>Users</h3>
-	    		<p><em>Perfect if you build apps against Apache Geode or deploy Apache Geode.</em></p>
-	    		<p>To subscribe, send a blank email to<br/><a href="mailto:user-subscribe@geode.incubator.apache.org">user-subscribe@geode.incubator.apache.org</a>.</p>
-	    		<p>You can also <a href="https://mail-archives.apache.org/mod_mbox/incubator-geode-user/">read the archives</a>.</p>
-			</div>
-	    	<div class="col-md-4">
-	    		<h3>Dev</h3>
-	    		<p><em>If you are building contributions & modifications to Apache Geode this is the list for you.</em><p>
-	    		<p>To subscribe, send a blank email to<br/><a href="mailto:dev-subscribe@geode.incubator.apache.org">dev-subscribe@geode.incubator.apache.org</a>.</p>
-	    		<p>You can also <a href="https://mail-archives.apache.org/mod_mbox/incubator-geode-dev/">read the archives</a>.</p>
-			</div>
-	    	<div class="col-md-4">
-	    		<h3>Commits</h3>
-	    		<p><em>This list receives an email whenever new code is contributed to Apache Geode.</em><p>
-	    		<p>To subscribe, send a blank email to<br/><a href="mailto:commits-subscribe@geode.incubator.apache.org">commits-subscribe@geode.incubator.apache.org</a>.</p>
-	    		<p>You can also <a href="https://mail-archives.apache.org/mod_mbox/incubator-geode-commits/">read the archives</a>.</p>
-			</div>
-		</div>
-	</div>
-</section>
-
-<section class="bf-community">
-    <div class="container">
-    	<div class="row">
-    	    <div class="col-md-12">
     	    	<h2 class="icns-briefcase" id="deployments"><span>Deployments</span></h2>
 			</div>
 		</div>
@@ -502,14 +502,14 @@
 	</div>
 </section>
 
-<section class="bf-community tools">
+<!-- <section class="bf-community tools">
     <div class="container">
     	<div class="row">
     	    <div class="col-md-12">
     	    	<h2 class="icns-tools" id="tools"><span>Tools</span></h2>
 			</div>
 		</div>
-		<div class="row">
+		 <div class="row">
 			<div class="col-md-2 text-center">
 				<a href="http://www.jetbrains.com/idea/" target="_blank"><img src="/img/intellij.png" /></a>
 				<p>IntelliJ IDEA</p>
@@ -521,7 +521,7 @@
 		</div>
 		These great companies have provided free product licenses to the Apache Geode team. We use these tools and love them.  Thank you!
 	</div>
-</section>
+</section> -->
 
 
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d16e78d1/gemfire-site/content/index.html
----------------------------------------------------------------------
diff --git a/gemfire-site/content/index.html b/gemfire-site/content/index.html
index de8327d..a450eea 100644
--- a/gemfire-site/content/index.html
+++ b/gemfire-site/content/index.html
@@ -54,7 +54,8 @@
                 <li><a href="https://issues.apache.org/jira/browse/GEODE/"
                        target="_blank"><span class="icns icon-bug"></span></a></li>
                 <li><a href="http://stackoverflow.com/search?q=Apache%20Geode" target="_blank"><span class="icns icon-stackexchange"></span></a></li>
-                <li><a href="/community/#live"><span class="icns icon-comments"></span></a></li>
+                <li><a href="/community/#mailing-lists"><span class="icns icon-envelope"></span></a></li>
+                <!-- <li><a href="/community/#live"><span class="icns icon-comments"></span></a></li> -->
                 <li><a href="https://twitter.com/apachegeode" target="_blank"><span class="icns icon-twitter"></span></a></li>
                 <li><a href="https://cwiki.apache.org/confluence/display/geode/" target="_blank"><span class="icns icon-edit"></span></a></li>
                 <li><a href="/releases/"><span class="icns icon-releases"></span></a></li>
@@ -154,7 +155,7 @@
 <section class="bf-questions">
     <div class="container">
             <div class="col-md-12 text-center cta">
-                And much more... Interested ? You can check our <a href="https://cwiki.apache.org/confluence/display/GEODE/Index#Index-Geodein5minutesGeodein5minutes" target="_blank" class="btn btn-inverse btn-lg">Geode in 5 minutes tutorial</a> <span class="avoidwrap">, ask a question on the <a href="/community/" class="btn btn-inverse btn-lg">Mailing lists</a> or <a href="http://stackoverflow.com/search?q=Apache%20Geode" class="btn btn-inverse btn-lg">StackOverflow</a></span>
+                And much more... Interested? You can check our <a href="https://cwiki.apache.org/confluence/display/GEODE/Index#Index-Geodein5minutesGeodein5minutes" target="_blank" class="btn btn-inverse btn-lg">Geode in 5 minutes tutorial</a> <span class="avoidwrap">, ask a question on the <a href="/community/" class="btn btn-inverse btn-lg">Mailing lists</a> or <a href="http://stackoverflow.com/search?q=Apache%20Geode" class="btn btn-inverse btn-lg">StackOverflow</a></span>
             </div>
     </div>
 </section
@@ -167,14 +168,14 @@
                 <h2>About the Project</h2>
                 <p>Apache Geode is a data management platform that provides real-time, consistent access to data-intensive applications throughout widely distributed cloud architectures.</p>
 
-                <p>By pooling memory, CPU, network resources, and optionally local disk across multiple processes to manage application objects and behavior, it uses dynamic replication and data partitioning techniques to implement high availability, improved performance, scalability, and fault tolerance. In addition to being a distributed data container, Apache Geode is an in-memory data management system that provides reliable asynchronous event notifications and guaranteed message delivery.</p>
+                <p>By pooling memory, CPU, network resources, and (optionally) local disk across multiple processes to manage application objects and behavior, it uses dynamic replication and data partitioning techniques to implement high availability, improved performance, scalability, and fault tolerance. In addition to being a distributed data container, Apache Geode is an in-memory data management system that provides reliable asynchronous event notifications and guaranteed message delivery.</p>
 
                 <p>Apache Geode is a mature, robust technology originally developed by GemStone Systems in Beaverton, Oregon.
 Commercially available as GemFireâ„¢, the technology was first widely deployed in the financial sector as the transactional, low-latency data engine used
 in Wall Street trading platforms.
 Today Apache Geode is used by over 600 enterprise customers for high-scale business applications that must meet low latency and 24x7 availability requirements.</p>
 
-                <p>This project is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
+                <p>This project is undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
             </div>
             <!--
             <div class="col-md-4 text-left">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d16e78d1/gemfire-site/content/releases/index.html
----------------------------------------------------------------------
diff --git a/gemfire-site/content/releases/index.html b/gemfire-site/content/releases/index.html
index 513fb25..25bd47c 100644
--- a/gemfire-site/content/releases/index.html
+++ b/gemfire-site/content/releases/index.html
@@ -54,7 +54,8 @@
                 <li><a href="https://issues.apache.org/jira/browse/GEODE/"
                        target="_blank"><span class="icns icon-bug"></span></a></li>
                 <li><a href="http://stackoverflow.com/search?q=Apache%20Geode" target="_blank"><span class="icns icon-stackexchange"></span></a></li>
-                <li><a href="/community/#live"><span class="icns icon-comments"></span></a></li>
+                <li><a href="/community/#mailing-lists"><span class="icns icon-envelope"></span></a></li>
+                <!-- <li><a href="/community/#live"><span class="icns icon-comments"></span></a></li> -->
                 <li><a href="https://twitter.com/apachegeode" target="_blank"><span class="icns icon-twitter"></span></a></li>
                 <li><a href="https://cwiki.apache.org/confluence/display/geode/" target="_blank"><span class="icns icon-edit"></span></a></li>
                 <li><a href="/releases/"><span class="icns icon-releases"></span></a></li>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d16e78d1/gemfire-site/website/content/community/index.html
----------------------------------------------------------------------
diff --git a/gemfire-site/website/content/community/index.html b/gemfire-site/website/content/community/index.html
index 43c9cba..7abb0a9 100644
--- a/gemfire-site/website/content/community/index.html
+++ b/gemfire-site/website/content/community/index.html
@@ -13,6 +13,35 @@
 	</div>
 </section>
 
+<section class="bf-community">
+    <div class="container">
+    	<div class="row">
+    	    <div class="col-md-12">
+    	    	<h2 class="icns-envelope" id="mailing-lists"><span>Mailing-lists</span></h2>
+			</div>
+		</div>
+		<div class="row">
+	    	<div class="col-md-4">
+	    		<h3>Users</h3>
+	    		<p><em>Perfect if you build apps against Apache Geode or deploy Apache Geode.</em></p>
+	    		<p>To subscribe, send a blank email to<br/><a href="mailto:user-subscribe@geode.incubator.apache.org">user-subscribe@geode.incubator.apache.org</a>.</p>
+	    		<p>You can also <a href="http://markmail.org/search/?q=list%3Aorg.apache.geode.user+order%3Adate-backward">read the archives</a>.</p>
+			</div>
+	    	<div class="col-md-4">
+	    		<h3>Dev</h3>
+	    		<p><em>If you are building contributions & modifications to Apache Geode this is the list for you.</em><p>
+	    		<p>To subscribe, send a blank email to<br/><a href="mailto:dev-subscribe@geode.incubator.apache.org">dev-subscribe@geode.incubator.apache.org</a>.</p>
+	    		<p>You can also <a href="http://markmail.org/search/?q=list%3Aorg.apache.geode.dev+order%3Adate-backward">read the archives</a>.</p>
+			</div>
+	    	<div class="col-md-4">
+	    		<h3>Commits</h3>
+	    		<p><em>This list receives an email whenever new code is contributed to Apache Geode.</em><p>
+	    		<p>To subscribe, send a blank email to<br/><a href="mailto:commits-subscribe@geode.incubator.apache.org">commits-subscribe@geode.incubator.apache.org</a>.</p>
+	    		<p>You can also <a href="http://markmail.org/search/?q=list%3Aorg.apache.geode.commits+order%3Adate-backward">read the archives</a>.</p>
+			</div>
+		</div>
+	</div>
+</section>
 
 <section class="bf-community">
     <div class="container">
@@ -104,36 +133,6 @@
     <div class="container">
     	<div class="row">
     	    <div class="col-md-12">
-    	    	<h2 class="icns-envelope" id="mailing-lists"><span>Mailing-lists</span></h2>
-			</div>
-		</div>
-		<div class="row">
-	    	<div class="col-md-4">
-	    		<h3>Users</h3>
-	    		<p><em>Perfect if you build apps against Apache Geode or deploy Apache Geode.</em></p>
-	    		<p>To subscribe, send a blank email to<br/><a href="mailto:user-subscribe@geode.incubator.apache.org">user-subscribe@geode.incubator.apache.org</a>.</p>
-	    		<p>You can also <a href="https://mail-archives.apache.org/mod_mbox/incubator-geode-user/">read the archives</a>.</p>
-			</div>
-	    	<div class="col-md-4">
-	    		<h3>Dev</h3>
-	    		<p><em>If you are building contributions & modifications to Apache Geode this is the list for you.</em><p>
-	    		<p>To subscribe, send a blank email to<br/><a href="mailto:dev-subscribe@geode.incubator.apache.org">dev-subscribe@geode.incubator.apache.org</a>.</p>
-	    		<p>You can also <a href="https://mail-archives.apache.org/mod_mbox/incubator-geode-dev/">read the archives</a>.</p>
-			</div>
-	    	<div class="col-md-4">
-	    		<h3>Commits</h3>
-	    		<p><em>This list receives an email whenever new code is contributed to Apache Geode.</em><p>
-	    		<p>To subscribe, send a blank email to<br/><a href="mailto:commits-subscribe@geode.incubator.apache.org">commits-subscribe@geode.incubator.apache.org</a>.</p>
-	    		<p>You can also <a href="https://mail-archives.apache.org/mod_mbox/incubator-geode-commits/">read the archives</a>.</p>
-			</div>
-		</div>
-	</div>
-</section>
-
-<section class="bf-community">
-    <div class="container">
-    	<div class="row">
-    	    <div class="col-md-12">
     	    	<h2 class="icns-briefcase" id="deployments"><span>Deployments</span></h2>
 			</div>
 		</div>
@@ -265,14 +264,14 @@
 	</div>
 </section>
 
-<section class="bf-community tools">
+<!-- <section class="bf-community tools">
     <div class="container">
     	<div class="row">
     	    <div class="col-md-12">
     	    	<h2 class="icns-tools" id="tools"><span>Tools</span></h2>
 			</div>
 		</div>
-		<div class="row">
+		 <div class="row">
 			<div class="col-md-2 text-center">
 				<a href="http://www.jetbrains.com/idea/" target="_blank"><img src="/img/intellij.png" /></a>
 				<p>IntelliJ IDEA</p>
@@ -284,4 +283,4 @@
 		</div>
 		These great companies have provided free product licenses to the Apache Geode team. We use these tools and love them.  Thank you!
 	</div>
-</section>
+</section> -->

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d16e78d1/gemfire-site/website/content/index.html
----------------------------------------------------------------------
diff --git a/gemfire-site/website/content/index.html b/gemfire-site/website/content/index.html
index 1925150..17058e7 100644
--- a/gemfire-site/website/content/index.html
+++ b/gemfire-site/website/content/index.html
@@ -89,7 +89,7 @@ title: Performance is key. Consistency is a must.
 <section class="bf-questions">
     <div class="container">
             <div class="col-md-12 text-center cta">
-                And much more... Interested ? You can check our <a href="https://cwiki.apache.org/confluence/display/GEODE/Index#Index-Geodein5minutesGeodein5minutes" target="_blank" class="btn btn-inverse btn-lg">Geode in 5 minutes tutorial</a> <span class="avoidwrap">, ask a question on the <a href="/community/" class="btn btn-inverse btn-lg">Mailing lists</a> or <a href="http://stackoverflow.com/search?q=Apache%20Geode" class="btn btn-inverse btn-lg">StackOverflow</a></span>
+                And much more... Interested? You can check our <a href="https://cwiki.apache.org/confluence/display/GEODE/Index#Index-Geodein5minutesGeodein5minutes" target="_blank" class="btn btn-inverse btn-lg">Geode in 5 minutes tutorial</a> <span class="avoidwrap">, ask a question on the <a href="/community/" class="btn btn-inverse btn-lg">Mailing lists</a> or <a href="http://stackoverflow.com/search?q=Apache%20Geode" class="btn btn-inverse btn-lg">StackOverflow</a></span>
             </div>
     </div>
 </section
@@ -102,14 +102,14 @@ title: Performance is key. Consistency is a must.
                 <h2>About the Project</h2>
                 <p>Apache Geode is a data management platform that provides real-time, consistent access to data-intensive applications throughout widely distributed cloud architectures.</p>
 
-                <p>By pooling memory, CPU, network resources, and optionally local disk across multiple processes to manage application objects and behavior, it uses dynamic replication and data partitioning techniques to implement high availability, improved performance, scalability, and fault tolerance. In addition to being a distributed data container, Apache Geode is an in-memory data management system that provides reliable asynchronous event notifications and guaranteed message delivery.</p>
+                <p>By pooling memory, CPU, network resources, and (optionally) local disk across multiple processes to manage application objects and behavior, it uses dynamic replication and data partitioning techniques to implement high availability, improved performance, scalability, and fault tolerance. In addition to being a distributed data container, Apache Geode is an in-memory data management system that provides reliable asynchronous event notifications and guaranteed message delivery.</p>
 
                 <p>Apache Geode is a mature, robust technology originally developed by GemStone Systems in Beaverton, Oregon.
 Commercially available as GemFireâ„¢, the technology was first widely deployed in the financial sector as the transactional, low-latency data engine used
 in Wall Street trading platforms.
 Today Apache Geode is used by over 600 enterprise customers for high-scale business applications that must meet low latency and 24x7 availability requirements.</p>
 
-                <p>This project is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
+                <p>This project is undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
             </div>
             <!--
             <div class="col-md-4 text-left">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d16e78d1/gemfire-site/website/layouts/header.html
----------------------------------------------------------------------
diff --git a/gemfire-site/website/layouts/header.html b/gemfire-site/website/layouts/header.html
index 1e7f18a..8ea572a 100644
--- a/gemfire-site/website/layouts/header.html
+++ b/gemfire-site/website/layouts/header.html
@@ -220,7 +220,8 @@
                 <li><a href="https://issues.apache.org/jira/browse/GEODE/"
                        target="_blank"><span class="icns icon-bug"></span></a></li>
                 <li><a href="http://stackoverflow.com/search?q=Apache%20Geode" target="_blank"><span class="icns icon-stackexchange"></span></a></li>
-                <li><a href="/community/#live"><span class="icns icon-comments"></span></a></li>
+                <li><a href="/community/#mailing-lists"><span class="icns icon-envelope"></span></a></li>
+                <!-- <li><a href="/community/#live"><span class="icns icon-comments"></span></a></li> -->
                 <li><a href="https://twitter.com/apachegeode" target="_blank"><span class="icns icon-twitter"></span></a></li>
                 <li><a href="https://cwiki.apache.org/confluence/display/geode/" target="_blank"><span class="icns icon-edit"></span></a></li>
                 <li><a href="/releases/"><span class="icns icon-releases"></span></a></li>


[34/50] [abbrv] incubator-geode git commit: GEODE-608: Refactor rat configuration into a separate script

Posted by kl...@apache.org.
GEODE-608: Refactor rat configuration into a separate script

Split out the rat configuration and update the excludes list.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/3da5bccb
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/3da5bccb
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/3da5bccb

Branch: refs/heads/feature/GEODE-291
Commit: 3da5bccb5b63185f2d503772fab9733aa08da035
Parents: e45539a
Author: Anthony Baker <ab...@pivotal.io>
Authored: Sat Dec 5 10:47:43 2015 -0800
Committer: Anthony Baker <ab...@pivotal.io>
Committed: Thu Dec 10 09:44:57 2015 -0800

----------------------------------------------------------------------
 build.gradle      | 112 +-----------------------------------------
 gradle/rat.gradle | 129 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 130 insertions(+), 111 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/3da5bccb/build.gradle
----------------------------------------------------------------------
diff --git a/build.gradle b/build.gradle
index 6045b72..9042976 100755
--- a/build.gradle
+++ b/build.gradle
@@ -10,118 +10,8 @@ buildscript {
   }
 }
 
+apply from: 'gradle/rat.gradle'
 apply plugin: 'wrapper'
-apply plugin: "org.nosphere.apache.rat"
-
-rat {
-  excludes = [
-    // git
-    '.git/**',
-    '**/.gitignore',
-    
-    // gradle
-    '**/.gradle/**',
-    'gradlew',
-    'gradlew.bat',
-    'gradle/wrapper/gradle-wrapper.properties',
-    '**/build/**',
-    
-    // IDE
-    'etc/eclipseFormatterProfile.xml',
-    'etc/intellijIdeaCodeStyle.xml',
-    '**/.project',
-    '**/.classpath',
-    '**/.settings/**',
-    '**/build-eclipse/**',
-    '*.iml',
-    '.idea/**',
-
-    // text files
-    '**/*.fig',
-    '**/*.txt',
-    '**/*.md',
-    '**/*.json',
-    '**/*.tx0',
-    '**/*.txo',
-    
-    // binary files
-    '**/*.cer',
-    '**/*.gfs',
-    '**/keystore',
-    '**/*.ser',
-    '**/*.xls',
-    
-    // other text files
-    'gemfire-spark-connector/project/plugins.sbt',
-    'gemfire-spark-connector/project/build.properties',
-    
-    // ANTLR generated files
-    'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/OQLLexer.java',
-    'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/OQLLexerTokenTypes.java',
-    'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/OQLParser.java',
-    'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/OQLLexerTokenTypes.txt',
-    
-    // Service META-INF
-    '**/META-INF/services/org.xml.sax.ext.EntityResolver2',
-    '**/META-INF/services/com.gemstone.gemfire.internal.cache.CacheService',
-    '**/META-INF/services/com.gemstone.gemfire.internal.cache.xmlcache.XmlParser',
-    '**/META-INF/services/org.springframework.shell.core.CommandMarker',
-
-    // --- Other Licenses ---
-    
-    // Copied from other ASF projects 
-    'gemfire-core/src/main/resources/com/gemstone/gemfire/admin/jmx/internal/doc-files/mbeans-descriptors.dtd',
-    'gemfire-core/src/main/resources/com/gemstone/gemfire/admin/jmx/mbeans-descriptors.xml',
-
-    // Public Domain http://meyerweb.com/eric/tools/css/reset/
-    'gemfire-web-api/src/main/webapp/docs/css/reset.css',
-
-    // JSON License - permissive, used for Good, not Evil
-    'gemfire-json/src/main/java/org/json/CDL.java',
-    'gemfire-json/src/main/java/org/json/Cookie.java',
-    'gemfire-json/src/main/java/org/json/CookieList.java',
-    'gemfire-json/src/main/java/org/json/CDL.java',
-    'gemfire-json/src/main/java/org/json/Cookie.java',
-    'gemfire-json/src/main/java/org/json/CookieList.java',
-    'gemfire-json/src/main/java/org/json/HTTP.java',
-    'gemfire-json/src/main/java/org/json/HTTPTokener.java',
-    'gemfire-json/src/main/java/org/json/JSONArray.java',
-    'gemfire-json/src/main/java/org/json/JSONException.java',
-    'gemfire-json/src/main/java/org/json/JSONML.java',
-    'gemfire-json/src/main/java/org/json/JSONObject.java',
-    'gemfire-json/src/main/java/org/json/JSONString.java',
-    'gemfire-json/src/main/java/org/json/JSONStringer.java',
-    'gemfire-json/src/main/java/org/json/JSONTokener.java',
-    'gemfire-json/src/main/java/org/json/JSONWriter.java',
-    'gemfire-json/src/main/java/org/json/XML.java',
-    'gemfire-json/src/main/java/org/json/XMLTokener.java',
-
-    // MIT License
-    'gemfire-web-api/src/main/webapp/docs/lib/backbone-min.js',
-    'gemfire-web-api/src/main/webapp/docs/lib/jquery-1.8.0.min.js',
-    'gemfire-web-api/src/main/webapp/docs/lib/jquery.ba-bbq.min.js',
-    'gemfire-web-api/src/main/webapp/docs/lib/jquery.slideto.min.js',
-    'gemfire-web-api/src/main/webapp/docs/lib/jquery.wiggle.min.js',
-    'gemfire-web-api/src/main/webapp/docs/lib/underscore-min.js',
-    'gemfire-site/src/jbake/**',
-
-    // MIT or ISC
-    'gemfire-web-api/src/main/webapp/docs/lib/shred.bundle.js',
-    'gemfire-web-api/src/main/webapp/docs/lib/shred/content.js',
-
-    // BSD License
-    'gemfire-web-api/src/main/webapp/docs/lib/highlight.7.3.pack.js',
-
-    // Apache License
-    'gemfire-web-api/src/main/webapp/docs/o2c.html',
-    'gemfire-web-api/src/main/webapp/docs/index.html',
-    'gemfire-web-api/src/main/webapp/docs/lib/swagger-oauth.js',
-    'gemfire-web-api/src/main/webapp/docs/lib/swagger.js',
-    'gemfire-web-api/src/main/webapp/docs/css/screen.css',
-    'gemfire-web-api/src/main/webapp/docs/swagger-ui.js',
-    'gemfire-web-api/src/main/webapp/docs/swagger-ui.min.js'
-  ]
-}
 
 // Load all properties in dependency-version.properties as project properties, so all projects can read them
 Properties dependencyVersions = new Properties()

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/3da5bccb/gradle/rat.gradle
----------------------------------------------------------------------
diff --git a/gradle/rat.gradle b/gradle/rat.gradle
new file mode 100644
index 0000000..96d1944
--- /dev/null
+++ b/gradle/rat.gradle
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+apply plugin: "org.nosphere.apache.rat"
+
+rat {
+  excludes = [
+    // git
+    '.git/**',
+    '**/.gitignore',
+    
+    // gradle
+    '**/.gradle/**',
+    'gradlew',
+    'gradlew.bat',
+    'gradle/wrapper/gradle-wrapper.properties',
+    '**/build/**',
+    
+    // IDE
+    'etc/eclipseFormatterProfile.xml',
+    'etc/intellijIdeaCodeStyle.xml',
+    '**/.project',
+    '**/.classpath',
+    '**/.settings/**',
+    '**/build-eclipse/**',
+    '*.iml',
+    '.idea/**',
+
+    // text files
+    '**/*.fig',
+    '**/*.txt',
+    '**/*.md',
+    '**/*.json',
+    '**/*.tx0',
+    '**/*.txo',
+    
+    // binary files
+    '**/*.cer',
+    '**/*.gfs',
+    '**/keystore',
+    '**/*.ser',
+    '**/*.xls',
+    
+    // other text files
+    'gemfire-spark-connector/project/plugins.sbt',
+    'gemfire-spark-connector/project/build.properties',
+    '**/log4j2*.xml',
+ 
+    // ANTLR generated files
+    'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/OQLLexer.java',
+    'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/OQLLexerTokenTypes.java',
+    'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/OQLParser.java',
+    'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/OQLLexerTokenTypes.txt',
+    
+    // Service META-INF
+    '**/META-INF/services/org.xml.sax.ext.EntityResolver2',
+    '**/META-INF/services/com.gemstone.gemfire.internal.cache.CacheService',
+    '**/META-INF/services/com.gemstone.gemfire.internal.cache.xmlcache.XmlParser',
+    '**/META-INF/services/org.springframework.shell.core.CommandMarker',
+
+    // --- Other Licenses ---
+    
+    // Copied from other ASF projects 
+    'gemfire-core/src/main/resources/com/gemstone/gemfire/admin/jmx/internal/doc-files/mbeans-descriptors.dtd',
+    'gemfire-core/src/main/resources/com/gemstone/gemfire/admin/jmx/mbeans-descriptors.xml',
+
+    // Public Domain http://meyerweb.com/eric/tools/css/reset/
+    'gemfire-web-api/src/main/webapp/docs/css/reset.css',
+
+    // JSON License - permissive, used for Good, not Evil
+    'gemfire-json/src/main/java/org/json/CDL.java',
+    'gemfire-json/src/main/java/org/json/Cookie.java',
+    'gemfire-json/src/main/java/org/json/CookieList.java',
+    'gemfire-json/src/main/java/org/json/CDL.java',
+    'gemfire-json/src/main/java/org/json/Cookie.java',
+    'gemfire-json/src/main/java/org/json/CookieList.java',
+    'gemfire-json/src/main/java/org/json/HTTP.java',
+    'gemfire-json/src/main/java/org/json/HTTPTokener.java',
+    'gemfire-json/src/main/java/org/json/JSONArray.java',
+    'gemfire-json/src/main/java/org/json/JSONException.java',
+    'gemfire-json/src/main/java/org/json/JSONML.java',
+    'gemfire-json/src/main/java/org/json/JSONObject.java',
+    'gemfire-json/src/main/java/org/json/JSONString.java',
+    'gemfire-json/src/main/java/org/json/JSONStringer.java',
+    'gemfire-json/src/main/java/org/json/JSONTokener.java',
+    'gemfire-json/src/main/java/org/json/JSONWriter.java',
+    'gemfire-json/src/main/java/org/json/XML.java',
+    'gemfire-json/src/main/java/org/json/XMLTokener.java',
+
+    // MIT License
+    'gemfire-web-api/src/main/webapp/docs/lib/backbone-min.js',
+    'gemfire-web-api/src/main/webapp/docs/lib/jquery-1.8.0.min.js',
+    'gemfire-web-api/src/main/webapp/docs/lib/jquery.ba-bbq.min.js',
+    'gemfire-web-api/src/main/webapp/docs/lib/jquery.slideto.min.js',
+    'gemfire-web-api/src/main/webapp/docs/lib/jquery.wiggle.min.js',
+    'gemfire-web-api/src/main/webapp/docs/lib/underscore-min.js',
+    'gemfire-site/src/jbake/**',
+
+    // MIT or ISC
+    'gemfire-web-api/src/main/webapp/docs/lib/shred.bundle.js',
+    'gemfire-web-api/src/main/webapp/docs/lib/shred/content.js',
+
+    // BSD License
+    'gemfire-web-api/src/main/webapp/docs/lib/highlight.7.3.pack.js',
+
+    // Apache License
+    'gemfire-web-api/src/main/webapp/docs/o2c.html',
+    'gemfire-web-api/src/main/webapp/docs/index.html',
+    'gemfire-web-api/src/main/webapp/docs/lib/swagger-oauth.js',
+    'gemfire-web-api/src/main/webapp/docs/lib/swagger.js',
+    'gemfire-web-api/src/main/webapp/docs/css/screen.css',
+    'gemfire-web-api/src/main/webapp/docs/swagger-ui.js',
+    'gemfire-web-api/src/main/webapp/docs/swagger-ui.min.js'
+  ]
+}
+


[17/50] [abbrv] incubator-geode git commit: GEODE-563: Moving gfsh tests from closed

Posted by kl...@apache.org.
GEODE-563: Moving gfsh tests from closed


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/eddef322
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/eddef322
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/eddef322

Branch: refs/heads/feature/GEODE-291
Commit: eddef322defedea8396697e65c989b85c3d9c433
Parents: 1e93c6f
Author: Jens Deppe <jd...@pivotal.io>
Authored: Thu Dec 3 10:21:59 2015 -0800
Committer: Jens Deppe <jd...@pivotal.io>
Committed: Tue Dec 8 09:23:28 2015 -0800

----------------------------------------------------------------------
 gemfire-assembly/build.gradle                   |   13 +
 .../LauncherLifecycleCommandsDUnitTest.java     | 1005 +++++++++
 .../LauncherLifecycleCommandsJUnitTest.java     |  625 ++++++
 .../SharedConfigurationEndToEndDUnitTest.java   |  434 ++++
 .../management/internal/cli/HeadlessGfsh.java   |  376 ++++
 .../internal/cli/HeadlessGfshJUnitTest.java     |   87 +
 .../management/internal/cli/ResultHandler.java  |   23 +
 .../internal/cli/TableBuilderJUnitTest.java     |  183 ++
 .../cli/commands/CliCommandTestBase.java        |  560 +++++
 .../cli/commands/ConfigCommandsDUnitTest.java   |  497 +++++
 ...eateAlterDestroyRegionCommandsDUnitTest.java | 1148 ++++++++++
 .../cli/commands/DeployCommandsDUnitTest.java   |  480 ++++
 .../commands/DiskStoreCommandsDUnitTest.java    | 1154 ++++++++++
 .../cli/commands/FunctionCommandsDUnitTest.java |  593 +++++
 .../commands/GemfireDataCommandsDUnitTest.java  | 2087 ++++++++++++++++++
 ...WithCacheLoaderDuringCacheMissDUnitTest.java |  371 ++++
 .../cli/commands/IndexCommandsDUnitTest.java    |  817 +++++++
 ...stAndDescribeDiskStoreCommandsDUnitTest.java |  197 ++
 .../ListAndDescribeRegionDUnitTest.java         |  320 +++
 .../cli/commands/ListIndexCommandDUnitTest.java |  672 ++++++
 .../cli/commands/MemberCommandsDUnitTest.java   |  286 +++
 .../MiscellaneousCommandsDUnitTest.java         |  492 +++++
 ...laneousCommandsExportLogsPart1DUnitTest.java |  139 ++
 ...laneousCommandsExportLogsPart2DUnitTest.java |  148 ++
 ...laneousCommandsExportLogsPart3DUnitTest.java |  150 ++
 ...laneousCommandsExportLogsPart4DUnitTest.java |  141 ++
 .../cli/commands/QueueCommandsDUnitTest.java    |  385 ++++
 .../SharedConfigurationCommandsDUnitTest.java   |  338 +++
 .../cli/commands/ShellCommandsDUnitTest.java    |  365 +++
 .../cli/commands/ShowDeadlockDUnitTest.java     |  271 +++
 .../cli/commands/ShowMetricsDUnitTest.java      |  347 +++
 .../cli/commands/ShowStackTraceDUnitTest.java   |  149 ++
 .../cli/commands/UserCommandsDUnitTest.java     |  164 ++
 33 files changed, 15017 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-assembly/build.gradle
----------------------------------------------------------------------
diff --git a/gemfire-assembly/build.gradle b/gemfire-assembly/build.gradle
index 8de6d4e..514b4a1 100755
--- a/gemfire-assembly/build.gradle
+++ b/gemfire-assembly/build.gradle
@@ -31,6 +31,8 @@ dependencies {
 
   testCompile project(path: ':gemfire-junit', configuration: 'testOutput')
   testCompile project(path: ':gemfire-core', configuration: 'testOutput')
+
+  testRuntime files("${System.getProperty('java.home')}/../lib/tools.jar")
 }
 
 sourceSets {
@@ -287,6 +289,17 @@ afterEvaluate {
   } 
 }
 
+// Create a configuration closure to configure test targets with the install directory
+def dependOnInstalledProduct = {
+  dependsOn installDist
+  def install = file("$buildDir/install/${distributions.main.baseName}")
+  environment ('GEMFIRE', install)
+}
+
+// Add the configuration closure to the test targets so they depend on the install directory
+test dependOnInstalledProduct
+distributedTest dependOnInstalledProduct
+
 artifacts {
   archives depsJar, gfshDepsJar
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/LauncherLifecycleCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/LauncherLifecycleCommandsDUnitTest.java b/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/LauncherLifecycleCommandsDUnitTest.java
new file mode 100644
index 0000000..afb2770
--- /dev/null
+++ b/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/LauncherLifecycleCommandsDUnitTest.java
@@ -0,0 +1,1005 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.client.ClientCache;
+import com.gemstone.gemfire.cache.client.ClientCacheFactory;
+import com.gemstone.gemfire.cache.client.ClientRegionFactory;
+import com.gemstone.gemfire.cache.client.ClientRegionShortcut;
+import com.gemstone.gemfire.cache.client.Pool;
+import com.gemstone.gemfire.cache.client.PoolFactory;
+import com.gemstone.gemfire.cache.client.PoolManager;
+import com.gemstone.gemfire.distributed.AbstractLauncher.ServiceState;
+import com.gemstone.gemfire.distributed.AbstractLauncher.Status;
+import com.gemstone.gemfire.distributed.LocatorLauncher;
+import com.gemstone.gemfire.distributed.LocatorLauncher.Builder;
+import com.gemstone.gemfire.distributed.LocatorLauncher.Command;
+import com.gemstone.gemfire.distributed.LocatorLauncher.LocatorState;
+import com.gemstone.gemfire.distributed.ServerLauncher;
+import com.gemstone.gemfire.distributed.ServerLauncher.ServerState;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.internal.lang.ObjectUtils;
+import com.gemstone.gemfire.internal.lang.StringUtils;
+import com.gemstone.gemfire.internal.lang.SystemUtils;
+import com.gemstone.gemfire.internal.process.ProcessType;
+import com.gemstone.gemfire.internal.util.IOUtils;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+import org.junit.FixMethodOrder;
+import org.junit.runners.MethodSorters;
+
+import javax.management.MBeanServerConnection;
+import javax.management.ObjectName;
+import javax.management.Query;
+import javax.management.QueryExp;
+import javax.management.remote.JMXConnector;
+import javax.management.remote.JMXConnectorFactory;
+import javax.management.remote.JMXServiceURL;
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileFilter;
+import java.io.FileOutputStream;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.lang.management.ManagementFactory;
+import java.net.InetAddress;
+import java.nio.charset.Charset;
+import java.text.DateFormat;
+import java.text.MessageFormat;
+import java.text.SimpleDateFormat;
+import java.util.Calendar;
+import java.util.Queue;
+import java.util.Set;
+import java.util.concurrent.ConcurrentLinkedDeque;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * The LauncherLifecycleCommandsDUnitTest class is a test suite of integration tests testing the contract and
+ * functionality of the GemFire launcher lifecycle commands inside Gfsh.
+ *
+ * @author John Blum
+ * @see javax.management.MBeanServerConnection
+ * @see javax.management.remote.JMXConnector
+ * @see com.gemstone.gemfire.distributed.AbstractLauncher
+ * @see com.gemstone.gemfire.distributed.LocatorLauncher
+ * @see com.gemstone.gemfire.distributed.ServerLauncher
+ * @see com.gemstone.gemfire.internal.AvailablePortHelper
+ * @see com.gemstone.gemfire.management.internal.cli.shell.Gfsh
+ * @see com.gemstone.gemfire.management.internal.cli.commands.CliCommandTestBase
+ * @see com.gemstone.gemfire.management.internal.cli.commands.LauncherLifecycleCommands
+ * @see com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder
+ * @since 7.0
+ */
+@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+public class LauncherLifecycleCommandsDUnitTest extends CliCommandTestBase {
+
+  protected static final long COMMAND_EXECUTION_TIMEOUT = TimeUnit.MINUTES.toSeconds(2);
+
+  protected static final DateFormat TIMESTAMP = new SimpleDateFormat("yyyyMMddHHmmssSSS");
+
+  private final Queue<Integer> processIds = new ConcurrentLinkedDeque<>();
+
+  public LauncherLifecycleCommandsDUnitTest(final String testName) {
+    super(testName);
+  }
+
+  protected static String getMemberId(final int jmxManagerPort, final String memberName) throws Exception {
+    return getMemberId(InetAddress.getLocalHost().getHostName(), jmxManagerPort, memberName);
+  }
+
+  protected static String getMemberId(final String jmxManagerHost, final int jmxManagerPort,
+      final String memberName) throws Exception {
+    JMXConnector connector = null;
+
+    try {
+      connector = JMXConnectorFactory.connect(new JMXServiceURL(
+          String.format("service:jmx:rmi://%1$s/jndi/rmi://%1$s:%2$d/jmxrmi", jmxManagerHost, jmxManagerPort)));
+
+      MBeanServerConnection connection = connector.getMBeanServerConnection();
+
+      ObjectName objectNamePattern = ObjectName.getInstance("GemFire:type=Member,*");
+
+      QueryExp query = Query.eq(Query.attr("Name"), Query.value(memberName));
+
+      Set<ObjectName> objectNames = connection.queryNames(objectNamePattern, query);
+
+      assertNotNull(objectNames);
+      assertFalse(objectNames.isEmpty());
+      assertEquals(1, objectNames.size());
+
+      //final ObjectName objectName = ObjectName.getInstance("GemFire:type=Member,Name=" + memberName);
+      ObjectName objectName = objectNames.iterator().next();
+
+      //System.err.printf("ObjectName for Member with Name (%1$s) is %2$s%n", memberName, objectName);
+
+      return ObjectUtils.toString(connection.getAttribute(objectName, "Id"));
+    } finally {
+      IOUtils.close(connector);
+    }
+  }
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+  }
+
+  @Override
+  protected void tearDownAfter() throws Exception {
+    super.tearDownAfter();
+
+    LauncherLifecycleCommands launcherLifecycleCommands = new LauncherLifecycleCommands();
+    Integer pid;
+
+    while ((pid = processIds.poll()) != null) {
+      if (launcherLifecycleCommands.isVmWithProcessIdRunning(pid)) {
+        try {
+          String killCommand = String.format("%1$s %2$d", SystemUtils.isWindows() ? "taskkill /F /PID" : "kill -9",
+              pid);
+          Runtime.getRuntime().exec(killCommand);
+        } catch (Throwable ignore) {
+        }
+      }
+    }
+  }
+
+  @SuppressWarnings("unused")
+  protected void assertStatus(final LocatorState expectedStatus, final LocatorState actualStatus) {
+    assertEquals(expectedStatus.getStatus(), actualStatus.getStatus());
+    assertEquals(expectedStatus.getTimestamp(), actualStatus.getTimestamp());
+    assertEquals(expectedStatus.getServiceLocation(), actualStatus.getServiceLocation());
+    assertTrue(ObjectUtils.equalsIgnoreNull(expectedStatus.getPid(), actualStatus.getPid()));
+    assertEquals(expectedStatus.getUptime(), actualStatus.getUptime());
+    assertEquals(expectedStatus.getWorkingDirectory(), actualStatus.getWorkingDirectory());
+    assertEquals(expectedStatus.getJvmArguments(), actualStatus.getJvmArguments());
+    assertEquals(expectedStatus.getClasspath(), actualStatus.getClasspath());
+    assertEquals(expectedStatus.getGemFireVersion(), actualStatus.getGemFireVersion());
+    assertEquals(expectedStatus.getJavaVersion(), actualStatus.getJavaVersion());
+  }
+
+  protected Integer readPid(final File workingDirectory) throws IOException {
+    assertTrue(String.format("The working directory (%1$s) must exist!", workingDirectory),
+        workingDirectory != null && workingDirectory.isDirectory());
+
+    File[] files = workingDirectory.listFiles(new FileFilter() {
+      @Override
+      public boolean accept(final File pathname) {
+        return (pathname != null && pathname.isFile() && pathname.getAbsolutePath().endsWith(".pid"));
+      }
+    });
+
+    assertNotNull(files);
+    assertTrue(files.length > 0);
+
+    File pidFile = files[0];
+
+    BufferedReader fileReader = null;
+
+    try {
+      fileReader = new BufferedReader(new FileReader(pidFile), 1024);
+      return Integer.parseInt(fileReader.readLine().trim());
+    } catch (Exception ignore) {
+      return null;
+    } finally {
+      IOUtils.close(fileReader);
+    }
+  }
+
+  protected String serviceStateStatusStringNormalized(final ServiceState serviceState) {
+    return serviceStateStatusStringNormalized(serviceState.toString());
+  }
+
+  protected String serviceStateStatusStringNormalized(final String serviceStateStatus) {
+    assertNotNull(serviceStateStatus);
+    assertTrue("serviceStateStatus is missing 'Uptime': " + serviceStateStatus, serviceStateStatus.contains("Uptime"));
+    assertTrue("serviceStateStatus is missing 'JVM Arguments': " + serviceStateStatus,
+        serviceStateStatus.contains("JVM Arguments"));
+
+    return serviceStateStatus.substring(0, serviceStateStatus.indexOf("Uptime")).concat(
+        serviceStateStatus.substring(serviceStateStatus.indexOf("JVM Arguments")));
+  }
+
+  protected Status stopLocator(final File workingDirectory) {
+    return stopLocator(IOUtils.tryGetCanonicalPathElseGetAbsolutePath(workingDirectory));
+  }
+
+  protected Status stopLocator(final String workingDirectory) {
+    return waitForGemFireProcessToStop(
+        new Builder().setCommand(Command.STOP).setWorkingDirectory(workingDirectory).build().stop(), workingDirectory);
+  }
+
+  protected Status stopServer(final File workingDirectory) {
+    return stopServer(IOUtils.tryGetCanonicalPathElseGetAbsolutePath(workingDirectory));
+  }
+
+  protected Status stopServer(final String workingDirectory) {
+    return waitForGemFireProcessToStop(
+        new ServerLauncher.Builder().setCommand(ServerLauncher.Command.STOP).setWorkingDirectory(
+            workingDirectory).build().stop(), workingDirectory);
+  }
+
+  protected String toString(final Result result) {
+    assert result != null : "The Result object from the command execution cannot be null!";
+
+    StringBuilder buffer = new StringBuilder(StringUtils.LINE_SEPARATOR);
+
+    while (result.hasNextLine()) {
+      buffer.append(result.nextLine());
+      buffer.append(StringUtils.LINE_SEPARATOR);
+    }
+
+    return buffer.toString();
+  }
+
+  protected Status waitForGemFireProcessToStop(final ServiceState serviceState, final String workingDirectory) {
+    if (!Status.STOPPED.equals(serviceState.getStatus())) {
+      try {
+        final Integer pid = readPid(new File(workingDirectory));
+
+        if (pid != null) {
+          WaitCriterion waitCriteria = new WaitCriterion() {
+            private LauncherLifecycleCommands launcherLifecycleCommands = new LauncherLifecycleCommands();
+
+            @Override
+            public boolean done() {
+              return !launcherLifecycleCommands.isVmWithProcessIdRunning(pid);
+            }
+
+            @Override
+            public String description() {
+              return String.format("Waiting for GemFire Process with PID (%1$d) to stop.", pid);
+            }
+          };
+
+          waitForCriterion(waitCriteria, TimeUnit.SECONDS.toMillis(15), TimeUnit.SECONDS.toMillis(5), false);
+
+          if (!waitCriteria.done()) {
+            processIds.offer(pid);
+          }
+        }
+      } catch (IOException ignore) {
+      }
+    }
+
+    return serviceState.getStatus();
+  }
+
+  protected void writePid(final File pidFile, final int pid) throws IOException {
+    assertTrue("The PID file must actually exist!", pidFile != null && pidFile.isFile());
+
+    FileWriter writer = null;
+
+    try {
+      writer = new FileWriter(pidFile, false);
+      writer.write(String.valueOf(pid));
+      writer.write(System.getProperty("line.separator"));
+      writer.flush();
+    } finally {
+      IOUtils.close(writer);
+    }
+  }
+
+  public void test000StartLocatorCapturesOutputOnError() throws IOException {
+    final int locatorPort = AvailablePortHelper.getRandomAvailableTCPPort();
+
+    String pathname = (getClass().getSimpleName() + "_" + testName);
+    File workingDirectory = new File(pathname);
+
+    assertTrue(workingDirectory.isDirectory() || workingDirectory.mkdir());
+
+    File pidFile = new File(workingDirectory, ProcessType.LOCATOR.getPidFileName());
+
+    assertTrue(pidFile.createNewFile());
+
+    writePid(pidFile, getPidOrOne());
+    pidFile.deleteOnExit();
+
+    assertTrue(pidFile.isFile());
+
+    CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_LOCATOR);
+
+    command.addOption(CliStrings.START_LOCATOR__MEMBER_NAME, pathname);
+    command.addOption(CliStrings.START_LOCATOR__DIR, pathname);
+    command.addOption(CliStrings.START_LOCATOR__PORT, String.valueOf(locatorPort));
+    command.addOption(CliStrings.START_LOCATOR__ENABLE__SHARED__CONFIGURATION, Boolean.FALSE.toString());
+    command.addOption(CliStrings.START_LOCATOR__J, "-Dgemfire.http-service-port=0");
+    command.addOption(CliStrings.START_LOCATOR__J,
+        "-Dgemfire.jmx-manager-port=" + AvailablePortHelper.getRandomAvailableTCPPort());
+
+    CommandResult result = executeCommand(command.toString());
+
+    assertNotNull(result);
+    assertEquals(Result.Status.ERROR, result.getStatus());
+
+    String resultString = toString(result);
+
+    assertTrue(resultString, resultString.contains(
+        "Exception in thread \"main\" java.lang.RuntimeException: A PID file already exists and a Locator may be running in " + IOUtils.tryGetCanonicalFileElseGetAbsoluteFile(
+            workingDirectory)));
+    assertTrue(resultString, resultString.contains(
+        "Caused by: com.gemstone.gemfire.internal.process.FileAlreadyExistsException: Pid file already exists: " + IOUtils.tryGetCanonicalFileElseGetAbsoluteFile(
+            pidFile)));
+  }
+
+  /*
+   * This method makes an effort to get the PID of the running process. If it is unable to determine accurately, it
+   * simply returns 1.
+   */
+  private int getPidOrOne() {
+    int pid = 1;
+    String[] name = ManagementFactory.getRuntimeMXBean().getName().split("@");
+    if (name.length > 1) {
+      try {
+        pid = Integer.parseInt(name[0]);
+      } catch (NumberFormatException nex) {
+        // Ignored
+      }
+    }
+
+    return pid;
+  }
+
+  public void test001StartLocatorFailsFastOnMissingGemFirePropertiesFile() {
+    String gemfirePropertiesPathname = "/path/to/missing/gemfire.properties";
+
+    CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_LOCATOR);
+
+    command.addOption(CliStrings.START_LOCATOR__MEMBER_NAME, getClass().getSimpleName().concat("_").concat(testName));
+    command.addOption(CliStrings.START_LOCATOR__PORT, "0");
+    command.addOption(CliStrings.START_LOCATOR__PROPERTIES, gemfirePropertiesPathname);
+    command.addOption(CliStrings.START_LOCATOR__J, "-Dgemfire.http-service-port=0");
+    command.addOption(CliStrings.START_LOCATOR__J, "-Dgemfire.jmx-manager=false");
+    command.addOption(CliStrings.START_LOCATOR__J, "-Dgemfire.jmx-manager-port=0");
+    command.addOption(CliStrings.START_LOCATOR__J, "-Dgemfire.jmx-manager-start=false");
+
+    CommandResult result = executeCommand(command.toString());
+
+    assertNotNull(result);
+    assertEquals(Result.Status.ERROR, result.getStatus());
+
+    String resultString = toString(result);
+
+    assertTrue(resultString, resultString.contains(
+        MessageFormat.format(CliStrings.GEMFIRE_0_PROPERTIES_1_NOT_FOUND_MESSAGE, StringUtils.EMPTY_STRING,
+            gemfirePropertiesPathname)));
+  }
+
+  public void test002StartLocatorFailsFastOnMissingGemFireSecurityPropertiesFile() {
+    String gemfireSecurityPropertiesPathname = "/path/to/missing/gemfire-security.properties";
+
+    CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_LOCATOR);
+
+    command.addOption(CliStrings.START_LOCATOR__MEMBER_NAME, getClass().getSimpleName().concat("_").concat(testName));
+    command.addOption(CliStrings.START_LOCATOR__PORT, "0");
+    command.addOption(CliStrings.START_LOCATOR__SECURITY_PROPERTIES, gemfireSecurityPropertiesPathname);
+    command.addOption(CliStrings.START_LOCATOR__J, "-Dgemfire.http-service-port=0");
+    command.addOption(CliStrings.START_LOCATOR__J, "-Dgemfire.jmx-manager=false");
+    command.addOption(CliStrings.START_LOCATOR__J, "-Dgemfire.jmx-manager-port=0");
+    command.addOption(CliStrings.START_LOCATOR__J, "-Dgemfire.jmx-manager-start=false");
+
+    CommandResult result = executeCommand(command.toString());
+
+    assertNotNull(result);
+    assertEquals(Result.Status.ERROR, result.getStatus());
+
+    String resultString = toString(result);
+
+    assertTrue(resultString, resultString.contains(
+        MessageFormat.format(CliStrings.GEMFIRE_0_PROPERTIES_1_NOT_FOUND_MESSAGE, "Security ",
+            gemfireSecurityPropertiesPathname)));
+  }
+
+  public void test003StartServerFailsFastOnMissingCacheXmlFile() {
+    String cacheXmlPathname = "/path/to/missing/cache.xml";
+
+    CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_SERVER);
+
+    command.addOption(CliStrings.START_SERVER__NAME, getClass().getSimpleName().concat("_").concat(testName));
+    command.addOption(CliStrings.START_SERVER__CACHE_XML_FILE, cacheXmlPathname);
+
+    CommandResult result = executeCommand(command.toString());
+
+    assertNotNull(result);
+    assertEquals(Result.Status.ERROR, result.getStatus());
+
+    String resultString = toString(result);
+
+    assertTrue(resultString,
+        resultString.contains(MessageFormat.format(CliStrings.CACHE_XML_NOT_FOUND_MESSAGE, cacheXmlPathname)));
+  }
+
+  public void test004StartServerFailsFastOnMissingGemFirePropertiesFile() {
+    String gemfirePropertiesFile = "/path/to/missing/gemfire.properties";
+
+    CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_SERVER);
+
+    command.addOption(CliStrings.START_SERVER__NAME, getClass().getSimpleName().concat("_").concat(testName));
+    command.addOption(CliStrings.START_SERVER__PROPERTIES, gemfirePropertiesFile);
+
+    CommandResult result = executeCommand(command.toString());
+
+    assertNotNull(result);
+    assertEquals(Result.Status.ERROR, result.getStatus());
+
+    String resultString = toString(result);
+
+    assertTrue(resultString, resultString.contains(
+        MessageFormat.format(CliStrings.GEMFIRE_0_PROPERTIES_1_NOT_FOUND_MESSAGE, StringUtils.EMPTY_STRING,
+            gemfirePropertiesFile)));
+  }
+
+  public void test005StartServerFailsFastOnMissingGemFireSecurityPropertiesFile() {
+    String gemfireSecuritiesPropertiesFile = "/path/to/missing/gemfire-securities.properties";
+
+    CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_SERVER);
+
+    command.addOption(CliStrings.START_SERVER__NAME, getClass().getSimpleName().concat("_").concat(testName));
+    command.addOption(CliStrings.START_SERVER__SECURITY_PROPERTIES, gemfireSecuritiesPropertiesFile);
+
+    CommandResult result = executeCommand(command.toString());
+
+    assertNotNull(result);
+    assertEquals(Result.Status.ERROR, result.getStatus());
+
+    String resultString = toString(result);
+
+    assertTrue(resultString, resultString.contains(
+        MessageFormat.format(CliStrings.GEMFIRE_0_PROPERTIES_1_NOT_FOUND_MESSAGE, "Security ",
+            gemfireSecuritiesPropertiesFile)));
+  }
+
+  public void test006StartLocatorInRelativeDirectory() {
+    final int locatorPort = AvailablePortHelper.getRandomAvailableTCPPort();
+
+    String pathname = (getClass().getSimpleName() + "_" + testName);
+    File workingDirectory = new File(pathname);
+
+    assertTrue(workingDirectory.isDirectory() || workingDirectory.mkdir());
+
+    try {
+      CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_LOCATOR);
+
+      command.addOption(CliStrings.START_LOCATOR__MEMBER_NAME, pathname);
+      command.addOption(CliStrings.START_LOCATOR__CONNECT, Boolean.FALSE.toString());
+      command.addOption(CliStrings.START_LOCATOR__DIR, pathname);
+      command.addOption(CliStrings.START_LOCATOR__PORT, String.valueOf(locatorPort));
+      command.addOption(CliStrings.START_LOCATOR__ENABLE__SHARED__CONFIGURATION, Boolean.FALSE.toString());
+      command.addOption(CliStrings.START_LOCATOR__J, "-Dgemfire.http-service-port=0");
+      command.addOption(CliStrings.START_LOCATOR__J,
+          "-Dgemfire.jmx-manager-port=" + AvailablePortHelper.getRandomAvailableTCPPort());
+
+      CommandResult result = executeCommand(command.toString());
+
+      assertNotNull(result);
+      assertEquals(Result.Status.OK, result.getStatus());
+
+      String locatorOutput = toString(result);
+
+      assertNotNull(locatorOutput);
+      assertTrue("Locator output was: " + locatorOutput,
+          locatorOutput.contains("Locator in " + IOUtils.tryGetCanonicalFileElseGetAbsoluteFile(workingDirectory)));
+    } finally {
+      stopLocator(workingDirectory);
+    }
+  }
+
+  public void test007StatusLocatorUsingMemberNameIDWhenGfshIsNotConnected() {
+    CommandResult result = executeCommand(CliStrings.STATUS_LOCATOR + " --name=" + testName);
+
+    assertNotNull(result);
+    assertEquals(Result.Status.ERROR, result.getStatus());
+    assertEquals(CliStrings.format(CliStrings.STATUS_SERVICE__GFSH_NOT_CONNECTED_ERROR_MESSAGE, "Locator"),
+        StringUtils.trim(toString(result)));
+  }
+
+  public void test008StatusLocatorUsingMemberName() {
+    final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
+
+    final int jmxManagerPort = ports[0];
+    final int locatorPort = ports[1];
+
+    String pathname = (getClass().getSimpleName() + "_" + testName);
+    File workingDirectory = new File(pathname);
+
+    assertTrue(workingDirectory.isDirectory() || workingDirectory.mkdir());
+
+    try {
+      CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_LOCATOR);
+
+      command.addOption(CliStrings.START_LOCATOR__MEMBER_NAME, pathname);
+      command.addOption(CliStrings.START_LOCATOR__CONNECT, Boolean.FALSE.toString());
+      command.addOption(CliStrings.START_LOCATOR__DIR, pathname);
+      command.addOption(CliStrings.START_LOCATOR__PORT, String.valueOf(locatorPort));
+      command.addOption(CliStrings.START_LOCATOR__ENABLE__SHARED__CONFIGURATION, Boolean.FALSE.toString());
+      command.addOption(CliStrings.START_LOCATOR__FORCE, Boolean.TRUE.toString());
+      command.addOption(CliStrings.START_LOCATOR__J, "-Dgemfire.http-service-port=0");
+      command.addOption(CliStrings.START_LOCATOR__J, "-Dgemfire.jmx-manager-port=" + jmxManagerPort);
+
+      CommandResult result = executeCommand(command.toString());
+
+      assertNotNull(result);
+      assertEquals(Result.Status.OK, result.getStatus());
+
+      LocatorLauncher locatorLauncher = new LocatorLauncher.Builder().setCommand(
+          LocatorLauncher.Command.STATUS).setBindAddress(null).setPort(locatorPort).setWorkingDirectory(
+          workingDirectory.getPath()).build();
+
+      assertNotNull(locatorLauncher);
+
+      LocatorState expectedLocatorState = locatorLauncher.waitOnStatusResponse(60, 10, TimeUnit.SECONDS);
+
+      assertNotNull(expectedLocatorState);
+      assertEquals(Status.ONLINE, expectedLocatorState.getStatus());
+
+      result = executeCommand(String.format("%1$s --locator=localhost[%2$d]", CliStrings.CONNECT, locatorPort));
+
+      assertNotNull(result);
+      assertEquals(Result.Status.OK, result.getStatus());
+
+      result = executeCommand(String.format("%1$s --name=invalidLocatorMemberName", CliStrings.STATUS_LOCATOR));
+
+      assertNotNull(result);
+      assertEquals(Result.Status.ERROR, result.getStatus());
+      assertEquals(CliStrings.format(CliStrings.STATUS_LOCATOR__NO_LOCATOR_FOUND_FOR_MEMBER_ERROR_MESSAGE,
+          "invalidLocatorMemberName"), StringUtils.trim(toString(result)));
+
+      result = executeCommand(String.format("%1$s --name=%2$s", CliStrings.STATUS_LOCATOR, pathname));
+
+      assertNotNull(result);
+      assertEquals(Result.Status.OK, result.getStatus());
+      assertTrue(serviceStateStatusStringNormalized(toString(result)).contains(
+          serviceStateStatusStringNormalized(expectedLocatorState)));
+    } finally {
+      stopLocator(workingDirectory);
+    }
+  }
+
+  public void test009StatusLocatorUsingMemberId() throws Exception {
+    final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
+
+    final int jmxManagerPort = ports[0];
+    final int locatorPort = ports[1];
+
+    String pathname = (getClass().getSimpleName() + "_" + testName);
+    File workingDirectory = new File(pathname);
+
+    assertTrue(workingDirectory.isDirectory() || workingDirectory.mkdir());
+
+    try {
+      CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_LOCATOR);
+
+      command.addOption(CliStrings.START_LOCATOR__MEMBER_NAME, pathname);
+      command.addOption(CliStrings.START_LOCATOR__CONNECT, Boolean.FALSE.toString());
+      command.addOption(CliStrings.START_LOCATOR__DIR, pathname);
+      command.addOption(CliStrings.START_LOCATOR__PORT, String.valueOf(locatorPort));
+      command.addOption(CliStrings.START_LOCATOR__ENABLE__SHARED__CONFIGURATION, Boolean.FALSE.toString());
+      command.addOption(CliStrings.START_LOCATOR__FORCE, Boolean.TRUE.toString());
+      command.addOption(CliStrings.START_LOCATOR__J, "-Dgemfire.http-service-port=0");
+      command.addOption(CliStrings.START_LOCATOR__J, "-Dgemfire.jmx-manager-port=" + jmxManagerPort);
+
+      CommandResult result = executeCommand(command.toString());
+
+      assertNotNull(result);
+      assertEquals(Result.Status.OK, result.getStatus());
+
+      LocatorLauncher locatorLauncher = new LocatorLauncher.Builder().setCommand(
+          LocatorLauncher.Command.STATUS).setBindAddress(null).setPort(locatorPort).setWorkingDirectory(
+          workingDirectory.getPath()).build();
+
+      assertNotNull(locatorLauncher);
+
+      LocatorState expectedLocatorState = locatorLauncher.waitOnStatusResponse(60, 10, TimeUnit.SECONDS);
+
+      assertNotNull(expectedLocatorState);
+      assertEquals(Status.ONLINE, expectedLocatorState.getStatus());
+
+      result = executeCommand(String.format("%1$s --locator=localhost[%2$d]", CliStrings.CONNECT, locatorPort));
+
+      assertNotNull(result);
+      assertEquals(Result.Status.OK, result.getStatus());
+
+      result = executeCommand(
+          String.format("%1$s --name=%2$s", CliStrings.STATUS_LOCATOR, getMemberId(jmxManagerPort, pathname)));
+
+      assertNotNull(result);
+      assertEquals(Result.Status.OK, result.getStatus());
+      assertTrue(serviceStateStatusStringNormalized(toString(result)).contains(
+          serviceStateStatusStringNormalized(expectedLocatorState)));
+    } finally {
+      stopLocator(workingDirectory);
+    }
+  }
+
+  public void test010StopLocatorUsingMemberNameIDWhenGfshIsNotConnected() {
+    CommandResult result = executeCommand(CliStrings.STOP_LOCATOR + " --name=" + testName);
+
+    assertNotNull(result);
+    assertEquals(Result.Status.ERROR, result.getStatus());
+    assertEquals(CliStrings.format(CliStrings.STOP_SERVICE__GFSH_NOT_CONNECTED_ERROR_MESSAGE, "Locator"),
+        StringUtils.trim(toString(result)));
+  }
+
+  public void test011StopLocatorUsingMemberName() {
+    final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
+
+    final int jmxManagerPort = ports[0];
+    final int locatorPort = ports[1];
+
+    String pathname = (getClass().getSimpleName() + "_" + testName);
+    File workingDirectory = new File(pathname);
+
+    assertTrue(workingDirectory.isDirectory() || workingDirectory.mkdir());
+
+    CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_LOCATOR);
+
+    command.addOption(CliStrings.START_LOCATOR__MEMBER_NAME, pathname);
+    command.addOption(CliStrings.START_LOCATOR__CONNECT, Boolean.FALSE.toString());
+    command.addOption(CliStrings.START_LOCATOR__DIR, pathname);
+    command.addOption(CliStrings.START_LOCATOR__PORT, String.valueOf(locatorPort));
+    command.addOption(CliStrings.START_LOCATOR__ENABLE__SHARED__CONFIGURATION, Boolean.FALSE.toString());
+    command.addOption(CliStrings.START_LOCATOR__FORCE, Boolean.TRUE.toString());
+    command.addOption(CliStrings.START_LOCATOR__J, "-Dgemfire.http-service-port=0");
+    command.addOption(CliStrings.START_LOCATOR__J, "-Dgemfire.jmx-manager-port=" + jmxManagerPort);
+
+    CommandResult result = executeCommand(command.toString());
+
+    assertNotNull(result);
+    assertEquals(Result.Status.OK, result.getStatus());
+
+    final LocatorLauncher locatorLauncher = new LocatorLauncher.Builder().setCommand(
+        LocatorLauncher.Command.STOP).setBindAddress(null).setPort(locatorPort).setWorkingDirectory(
+        workingDirectory.getPath()).build();
+
+    assertNotNull(locatorLauncher);
+
+    LocatorState locatorStatus = locatorLauncher.waitOnStatusResponse(60, 10, TimeUnit.SECONDS);
+
+    assertNotNull(locatorStatus);
+    assertEquals(Status.ONLINE, locatorStatus.getStatus());
+
+    result = executeCommand(String.format("%1$s --locator=localhost[%2$d]", CliStrings.CONNECT, locatorPort));
+
+    assertNotNull(result);
+    assertEquals(Result.Status.OK, result.getStatus());
+
+    result = executeCommand(String.format("%1$s --name=invalidLocatorMemberName", CliStrings.STOP_LOCATOR));
+
+    assertNotNull(result);
+    assertEquals(Result.Status.ERROR, result.getStatus());
+    assertEquals(CliStrings.format(CliStrings.STOP_LOCATOR__NO_LOCATOR_FOUND_FOR_MEMBER_ERROR_MESSAGE,
+        "invalidLocatorMemberName"), StringUtils.trim(toString(result)));
+
+    locatorStatus = locatorLauncher.status();
+
+    assertNotNull(locatorStatus);
+    assertEquals(Status.ONLINE, locatorStatus.getStatus());
+
+    result = executeCommand(String.format("%1$s --name=%2$s", CliStrings.STOP_LOCATOR, pathname));
+
+    assertNotNull(result);
+    assertEquals(Result.Status.OK, result.getStatus());
+
+    // TODO figure out what output to assert and validate on now that 'stop locator' uses Gfsh's logger
+    // and standard err/out...
+    //assertEquals(CliStrings.format(CliStrings.STOP_LOCATOR__SHUTDOWN_MEMBER_MESSAGE, pathname),
+    //  StringUtils.trim(toString(result)));
+
+    WaitCriterion waitCriteria = new WaitCriterion() {
+      @Override
+      public boolean done() {
+        final LocatorState locatorStatus = locatorLauncher.status();
+        return (locatorStatus != null && Status.NOT_RESPONDING.equals(locatorStatus.getStatus()));
+      }
+
+      @Override
+      public String description() {
+        return "wait for the Locator to stop; the Locator will no longer respond after it stops";
+      }
+    };
+
+    waitForCriterion(waitCriteria, 15 * 1000, 5000, true);
+
+    locatorStatus = locatorLauncher.status();
+
+    assertNotNull(locatorStatus);
+    assertEquals(Status.NOT_RESPONDING, locatorStatus.getStatus());
+  }
+
+  // @see Trac Bug # 46760
+  public void test012StopLocatorUsingMemberId() throws Exception {
+    final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
+
+    final int jmxManagerPort = ports[0];
+    final int locatorPort = ports[1];
+
+    String pathname = (getClass().getSimpleName() + "_" + testName);
+    File workingDirectory = new File(pathname);
+
+    assertTrue(workingDirectory.isDirectory() || workingDirectory.mkdir());
+
+    CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_LOCATOR);
+
+    command.addOption(CliStrings.START_LOCATOR__MEMBER_NAME, pathname);
+    command.addOption(CliStrings.START_LOCATOR__CONNECT, Boolean.FALSE.toString());
+    command.addOption(CliStrings.START_LOCATOR__DIR, pathname);
+    command.addOption(CliStrings.START_LOCATOR__PORT, String.valueOf(locatorPort));
+    command.addOption(CliStrings.START_LOCATOR__ENABLE__SHARED__CONFIGURATION, Boolean.FALSE.toString());
+    command.addOption(CliStrings.START_LOCATOR__FORCE, Boolean.TRUE.toString());
+    command.addOption(CliStrings.START_LOCATOR__J, "-Dgemfire.http-service-port=0");
+    command.addOption(CliStrings.START_LOCATOR__J, "-Dgemfire.jmx-manager-port=" + jmxManagerPort);
+
+    CommandResult result = executeCommand(command.toString());
+
+    assertNotNull(result);
+    assertEquals(Result.Status.OK, result.getStatus());
+
+    final LocatorLauncher locatorLauncher = new LocatorLauncher.Builder().setCommand(
+        LocatorLauncher.Command.STOP).setBindAddress(null).setPort(locatorPort).setWorkingDirectory(
+        workingDirectory.getPath()).build();
+
+    assertNotNull(locatorLauncher);
+
+    LocatorState locatorState = locatorLauncher.waitOnStatusResponse(60, 10, TimeUnit.SECONDS);
+
+    assertNotNull(locatorState);
+    assertEquals(Status.ONLINE, locatorState.getStatus());
+
+    result = executeCommand(String.format("%1$s --locator=localhost[%2$d]", CliStrings.CONNECT, locatorPort));
+
+    assertNotNull(result);
+    assertEquals(Result.Status.OK, result.getStatus());
+
+    String memberId = getMemberId(jmxManagerPort, pathname);
+
+    result = executeCommand(String.format("%1$s --name=%2$s", CliStrings.STOP_LOCATOR, memberId));
+
+    assertNotNull(result);
+    assertEquals(Result.Status.OK, result.getStatus());
+
+    // TODO figure out what output to assert and validate on now that 'stop locator' uses Gfsh's logger
+    // and standard err/out...
+    //assertEquals(CliStrings.format(CliStrings.STOP_LOCATOR__SHUTDOWN_MEMBER_MESSAGE, memberId),
+    //  StringUtils.trim(toString(result)));
+
+    WaitCriterion waitCriteria = new WaitCriterion() {
+      @Override
+      public boolean done() {
+        LocatorState locatorState = locatorLauncher.status();
+        return (locatorState != null && Status.NOT_RESPONDING.equals(locatorState.getStatus()));
+      }
+
+      @Override
+      public String description() {
+        return "wait for the Locator to stop; the Locator will no longer respond after it stops";
+      }
+    };
+
+    waitForCriterion(waitCriteria, 15 * 1000, 5000, true);
+
+    locatorState = locatorLauncher.status();
+
+    assertNotNull(locatorState);
+    assertEquals(Status.NOT_RESPONDING, locatorState.getStatus());
+  }
+
+  public void test013StartServerWithSpring() {
+    String pathname = (getClass().getSimpleName() + "_" + testName);
+    File workingDirectory = new File(pathname);
+
+    assertTrue(workingDirectory.isDirectory() || workingDirectory.mkdir());
+
+    CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_SERVER);
+
+    command.addOption(CliStrings.START_SERVER__NAME, getClass().getSimpleName().concat("_").concat(testName));
+    command.addOption(CliStrings.START_SERVER__USE_CLUSTER_CONFIGURATION, Boolean.FALSE.toString());
+    command.addOption(CliStrings.START_SERVER__LOG_LEVEL, "config");
+    command.addOption(CliStrings.START_SERVER__INCLUDE_SYSTEM_CLASSPATH);
+    command.addOption(CliStrings.START_SERVER__DISABLE_DEFAULT_SERVER);
+    command.addOption(CliStrings.START_SERVER__DIR, pathname);
+    command.addOption(CliStrings.START_SERVER__SPRING_XML_LOCATION, "spring/spring-gemfire-context.xml");
+
+    CommandResult result = executeCommand(command.toString());
+
+    assertNotNull(result);
+    assertEquals(Result.Status.OK, result.getStatus());
+
+    final ServerLauncher springGemFireServer = new ServerLauncher.Builder().setCommand(
+        ServerLauncher.Command.STATUS).setWorkingDirectory(
+        IOUtils.tryGetCanonicalPathElseGetAbsolutePath(workingDirectory)).build();
+
+    assertNotNull(springGemFireServer);
+
+    ServerState serverState = springGemFireServer.status();
+
+    assertNotNull(serverState);
+    assertEquals(Status.ONLINE, serverState.getStatus());
+
+    // Now that the GemFire Server bootstrapped with Spring started up OK, stop it!
+    stopServer(springGemFireServer.getWorkingDirectory());
+
+    WaitCriterion waitCriteria = new WaitCriterion() {
+      @Override
+      public boolean done() {
+        ServerState serverState = springGemFireServer.status();
+        return (serverState != null && Status.NOT_RESPONDING.equals(serverState.getStatus()));
+      }
+
+      @Override
+      public String description() {
+        return "wait for the Locator to stop; the Locator will no longer respond after it stops";
+      }
+    };
+
+    waitForCriterion(waitCriteria, TimeUnit.SECONDS.toMillis(15), TimeUnit.SECONDS.toMillis(5), true);
+
+    serverState = springGemFireServer.status();
+
+    assertNotNull(serverState);
+    assertEquals(Status.NOT_RESPONDING, serverState.getStatus());
+  }
+
+  public void test014GemFireServerJvmProcessTerminatesOnOutOfMemoryError() throws Exception {
+    int ports[] = AvailablePortHelper.getRandomAvailableTCPPorts(2);
+    final int serverPort = ports[0];
+    final int locatorPort = ports[1];
+
+    String pathname = getClass().getSimpleName().concat("_").concat(testName);
+    File workingDirectory = new File(pathname);
+
+    assertTrue(workingDirectory.isDirectory() || workingDirectory.mkdir());
+
+    CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_SERVER);
+
+    command.addOption(CliStrings.START_SERVER__NAME, pathname + TIMESTAMP.format(Calendar.getInstance().getTime()));
+    command.addOption(CliStrings.START_SERVER__SERVER_PORT, String.valueOf(serverPort));
+    command.addOption(CliStrings.START_SERVER__USE_CLUSTER_CONFIGURATION, Boolean.FALSE.toString());
+    command.addOption(CliStrings.START_SERVER__MAXHEAP, "10M");
+    command.addOption(CliStrings.START_SERVER__LOG_LEVEL, "config");
+    command.addOption(CliStrings.START_SERVER__DIR, pathname);
+    command.addOption(CliStrings.START_SERVER__CACHE_XML_FILE,
+        IOUtils.tryGetCanonicalPathElseGetAbsolutePath(writeAndGetCacheXmlFile(workingDirectory)));
+    command.addOption(CliStrings.START_SERVER__INCLUDE_SYSTEM_CLASSPATH);
+    command.addOption(CliStrings.START_SERVER__J,
+        "-Dgemfire." + DistributionConfig.START_LOCATOR_NAME + "=localhost[" + locatorPort + "]");
+
+
+    CommandResult result = executeCommand(command.toString());
+    System.out.println("result=" + result);
+
+    assertNotNull(result);
+    assertEquals(Result.Status.OK, result.getStatus());
+
+    ServerLauncher serverLauncher = new ServerLauncher.Builder().setCommand(
+        ServerLauncher.Command.STATUS).setWorkingDirectory(
+        IOUtils.tryGetCanonicalPathElseGetAbsolutePath(workingDirectory)).build();
+
+    assertNotNull(serverLauncher);
+
+    ServerState serverState = serverLauncher.status();
+
+    assertNotNull(serverState);
+    assertEquals(Status.ONLINE, serverState.getStatus());
+
+    // Verify our GemFire Server JVM process is running!
+    assertTrue(new LauncherLifecycleCommands().isVmWithProcessIdRunning(serverState.getPid()));
+
+    ClientCache clientCache = setupClientCache(pathname + String.valueOf(serverPort), serverPort);
+
+    assertNotNull(clientCache);
+
+    try {
+      Region<Long, String> exampleRegion = clientCache.getRegion("/Example");
+      // run the GemFire Server "out-of-town" with an OutOfMemoryError!
+      for (long index = 0; index < Long.MAX_VALUE; index++) {
+        exampleRegion.put(index, String.valueOf(index));
+      }
+    } catch (Exception ignore) {
+      System.err.printf("%1$s: %2$s%n", ignore.getClass().getName(), ignore.getMessage());
+    } finally {
+      clientCache.close();
+
+      final int serverPid = serverState.getPid();
+
+      WaitCriterion waitCriteria = new WaitCriterion() {
+        private LauncherLifecycleCommands launcherLifecycleCommands = new LauncherLifecycleCommands();
+
+        @Override
+        public boolean done() {
+          return !launcherLifecycleCommands.isVmWithProcessIdRunning(serverPid);
+        }
+
+        @Override
+        public String description() {
+          return "Wait for the GemFire Server JVM process that ran out-of-memory to exit.";
+        }
+      };
+
+      waitForCriterion(waitCriteria, TimeUnit.SECONDS.toMillis(30), TimeUnit.SECONDS.toMillis(10), true);
+
+      // Verify our GemFire Server JVM process is was terminated!
+      assertFalse(new LauncherLifecycleCommands().isVmWithProcessIdRunning(serverState.getPid()));
+
+      serverState = serverLauncher.status();
+
+      assertNotNull(serverState);
+      assertEquals(Status.NOT_RESPONDING, serverState.getStatus());
+    }
+  }
+
+  private File writeAndGetCacheXmlFile(final File workingDirectory) throws IOException {
+    File cacheXml = new File(workingDirectory, "cache.xml");
+    StringBuilder buffer = new StringBuilder("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
+
+    buffer.append(StringUtils.LINE_SEPARATOR);
+    buffer.append("<!DOCTYPE cache PUBLIC  \"-//GemStone Systems, Inc.//GemFire Declarative Caching 7.0//EN\"");
+    buffer.append(StringUtils.LINE_SEPARATOR);
+    buffer.append("  \"http://www.gemstone.com/dtd/cache7_0.dtd\">");
+    buffer.append(StringUtils.LINE_SEPARATOR);
+    buffer.append("<cache>");
+    buffer.append(StringUtils.LINE_SEPARATOR);
+    buffer.append("  <region name=\"Example\" refid=\"REPLICATE\"/>");
+    buffer.append(StringUtils.LINE_SEPARATOR);
+    buffer.append("</cache>");
+
+    BufferedWriter fileWriter = null;
+
+    try {
+      fileWriter = new BufferedWriter(
+          new OutputStreamWriter(new FileOutputStream(cacheXml, false), Charset.forName("UTF-8").newEncoder()));
+      fileWriter.write(buffer.toString());
+      fileWriter.flush();
+    } finally {
+      IOUtils.close(fileWriter);
+    }
+
+    return cacheXml;
+  }
+
+  private ClientCache setupClientCache(final String durableClientId, final int serverPort) {
+    ClientCache clientCache = new ClientCacheFactory().set("durable-client-id", durableClientId).create();
+
+    PoolFactory poolFactory = PoolManager.createFactory();
+
+    poolFactory.setMaxConnections(10);
+    poolFactory.setMinConnections(1);
+    poolFactory.setReadTimeout(5000);
+    poolFactory.addServer("localhost", serverPort);
+
+    Pool pool = poolFactory.create("serverConnectionPool");
+
+    assertNotNull("The 'serverConnectionPool' was not properly configured and initialized!", pool);
+
+    ClientRegionFactory<Long, String> regionFactory = clientCache.createClientRegionFactory(ClientRegionShortcut.PROXY);
+
+    regionFactory.setPoolName(pool.getName());
+    regionFactory.setKeyConstraint(Long.class);
+    regionFactory.setValueConstraint(String.class);
+
+    Region<Long, String> exampleProxy = regionFactory.create("Example");
+
+    assertNotNull("The 'Example' Client Region was not properly configured and initialized", exampleProxy);
+
+    return clientCache;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/LauncherLifecycleCommandsJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/LauncherLifecycleCommandsJUnitTest.java b/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/LauncherLifecycleCommandsJUnitTest.java
new file mode 100755
index 0000000..d7e7970
--- /dev/null
+++ b/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/LauncherLifecycleCommandsJUnitTest.java
@@ -0,0 +1,625 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.GemFireException;
+import com.gemstone.gemfire.cache.server.CacheServer;
+import com.gemstone.gemfire.distributed.ServerLauncher;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.internal.DistributionLocator;
+import com.gemstone.gemfire.internal.lang.StringUtils;
+import com.gemstone.gemfire.internal.lang.SystemUtils;
+import com.gemstone.gemfire.internal.util.IOUtils;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.test.junit.categories.UnitTest;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Properties;
+import java.util.Set;
+import java.util.Stack;
+import java.util.jar.Attributes;
+import java.util.jar.Attributes.Name;
+import java.util.jar.JarFile;
+import java.util.jar.Manifest;
+
+import static org.junit.Assert.*;
+
+/**
+ * The LauncherLifecycleCommandsJUnitTest class is a test suite of test cases testing the contract and functionality of
+ * the lifecycle launcher GemFire shell (Gfsh) commands.
+ *
+ * @author John Blum
+ * @see com.gemstone.gemfire.management.internal.cli.commands.LauncherLifecycleCommands
+ * @see org.junit.Assert
+ * @see org.junit.Test
+ * @since 7.0
+ */
+@SuppressWarnings("unused")
+@Category(UnitTest.class)
+public class LauncherLifecycleCommandsJUnitTest {
+
+  private static final String GFSH_DEPENDENCIES_JAR_PATHNAME = IOUtils.appendToPath(System.getenv("GEMFIRE"), "lib",
+      "gfsh-dependencies.jar");
+
+  private LauncherLifecycleCommands launcherCommands;
+
+  @Before
+  public void setup() {
+    launcherCommands = new LauncherLifecycleCommands();
+  }
+
+  @After
+  public void tearDown() {
+    launcherCommands = null;
+  }
+
+  protected LauncherLifecycleCommands getLauncherLifecycleCommands() {
+    return launcherCommands;
+  }
+
+  protected void writePid(final File pidFile, final int pid) throws IOException {
+    final FileWriter fileWriter = new FileWriter(pidFile, false);
+    fileWriter.write(String.valueOf(pid));
+    fileWriter.write("\n");
+    fileWriter.flush();
+    IOUtils.close(fileWriter);
+  }
+
+  @Test
+  public void testAddGemFirePropertyFileToCommandLine() {
+    final List<String> commandLine = new ArrayList<>();
+
+    assertTrue(commandLine.isEmpty());
+
+    getLauncherLifecycleCommands().addGemFirePropertyFile(commandLine, null);
+
+    assertTrue(commandLine.isEmpty());
+
+    getLauncherLifecycleCommands().addGemFirePropertyFile(commandLine, StringUtils.EMPTY_STRING);
+
+    assertTrue(commandLine.isEmpty());
+
+    getLauncherLifecycleCommands().addGemFirePropertyFile(commandLine, " ");
+
+    assertTrue(commandLine.isEmpty());
+
+    getLauncherLifecycleCommands().addGemFirePropertyFile(commandLine, "/path/to/gemfire.properties");
+
+    assertFalse(commandLine.isEmpty());
+    assertTrue(commandLine.contains("-DgemfirePropertyFile=/path/to/gemfire.properties"));
+  }
+
+  @Test
+  public void testAddGemFireSystemPropertiesToCommandLine() {
+    final List<String> commandLine = new ArrayList<>();
+
+    assertTrue(commandLine.isEmpty());
+
+    getLauncherLifecycleCommands().addGemFireSystemProperties(commandLine, new Properties());
+
+    assertTrue(commandLine.isEmpty());
+
+    final Properties gemfireProperties = new Properties();
+
+    gemfireProperties.setProperty(DistributionConfig.LOCATORS_NAME, "localhost[11235]");
+    gemfireProperties.setProperty(DistributionConfig.LOG_LEVEL_NAME, "config");
+    gemfireProperties.setProperty(DistributionConfig.LOG_FILE_NAME, StringUtils.EMPTY_STRING);
+    gemfireProperties.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    gemfireProperties.setProperty(DistributionConfig.NAME_NAME, "tidepool");
+
+    getLauncherLifecycleCommands().addGemFireSystemProperties(commandLine, gemfireProperties);
+
+    assertFalse(commandLine.isEmpty());
+    assertEquals(4, commandLine.size());
+
+    for (final String propertyName : gemfireProperties.stringPropertyNames()) {
+      final String propertyValue = gemfireProperties.getProperty(propertyName);
+      if (StringUtils.isBlank(propertyValue)) {
+        for (final String systemProperty : commandLine) {
+          assertFalse(systemProperty.startsWith("-Dgemfire.".concat(propertyName).concat("=")));
+        }
+      } else {
+        assertTrue(commandLine.contains("-Dgemfire.".concat(propertyName).concat("=").concat(propertyValue)));
+      }
+    }
+  }
+
+  @Test
+  public void testAddInitialHeapToCommandLine() {
+    final List<String> commandLine = new ArrayList<>();
+
+    assertTrue(commandLine.isEmpty());
+
+    getLauncherLifecycleCommands().addInitialHeap(commandLine, null);
+
+    assertTrue(commandLine.isEmpty());
+
+    getLauncherLifecycleCommands().addInitialHeap(commandLine, StringUtils.EMPTY_STRING);
+
+    assertTrue(commandLine.isEmpty());
+
+    getLauncherLifecycleCommands().addInitialHeap(commandLine, " ");
+
+    assertTrue(commandLine.isEmpty());
+
+    getLauncherLifecycleCommands().addInitialHeap(commandLine, "512M");
+
+    assertFalse(commandLine.isEmpty());
+    assertEquals("-Xms512M", commandLine.get(0));
+  }
+
+  @Test
+  public void testAddJvmArgumentsAndOptionsToCommandLine() {
+    final List<String> commandLine = new ArrayList<>();
+
+    assertTrue(commandLine.isEmpty());
+
+    getLauncherLifecycleCommands().addJvmArgumentsAndOptions(commandLine, null);
+
+    assertTrue(commandLine.isEmpty());
+
+    getLauncherLifecycleCommands().addJvmArgumentsAndOptions(commandLine, new String[]{});
+
+    assertTrue(commandLine.isEmpty());
+
+    getLauncherLifecycleCommands().addJvmArgumentsAndOptions(commandLine,
+        new String[]{"-DmyProp=myVal", "-d64", "-server", "-Xprof"});
+
+    assertFalse(commandLine.isEmpty());
+    assertEquals(4, commandLine.size());
+    assertEquals("-DmyProp=myVal", commandLine.get(0));
+    assertEquals("-d64", commandLine.get(1));
+    assertEquals("-server", commandLine.get(2));
+    assertEquals("-Xprof", commandLine.get(3));
+  }
+
+  // Fix for Bug #47192 - "Making GemFire (JVM) to exit in case of OutOfMemory"
+  @Test
+  public void testAddJvmOptionsForOutOfMemoryErrors() {
+    final List<String> jvmOptions = new ArrayList<>(1);
+
+    getLauncherLifecycleCommands().addJvmOptionsForOutOfMemoryErrors(jvmOptions);
+
+    if (SystemUtils.isHotSpotVM()) {
+      if (SystemUtils.isWindows()) {
+        assertTrue(jvmOptions.contains("-XX:OnOutOfMemoryError=taskkill /F /PID %p"));
+      } else {
+        assertTrue(jvmOptions.contains("-XX:OnOutOfMemoryError=kill -KILL %p"));
+      }
+    } else if (SystemUtils.isJ9VM()) {
+      assertEquals(1, jvmOptions.size());
+      assertTrue(jvmOptions.contains("-Xcheck:memory"));
+    } else if (SystemUtils.isJRockitVM()) {
+      assertEquals(1, jvmOptions.size());
+      assertTrue(jvmOptions.contains("-XXexitOnOutOfMemory"));
+    } else {
+      assertTrue(jvmOptions.isEmpty());
+    }
+  }
+
+  @Test
+  public void testAddMaxHeapToCommandLine() {
+    final List<String> commandLine = new ArrayList<>();
+
+    assertTrue(commandLine.isEmpty());
+
+    getLauncherLifecycleCommands().addMaxHeap(commandLine, null);
+
+    assertTrue(commandLine.isEmpty());
+
+    getLauncherLifecycleCommands().addMaxHeap(commandLine, StringUtils.EMPTY_STRING);
+
+    assertTrue(commandLine.isEmpty());
+
+    getLauncherLifecycleCommands().addMaxHeap(commandLine, "  ");
+
+    assertTrue(commandLine.isEmpty());
+
+    getLauncherLifecycleCommands().addMaxHeap(commandLine, "1024M");
+
+    assertFalse(commandLine.isEmpty());
+    assertEquals(3, commandLine.size());
+    assertEquals("-Xmx1024M", commandLine.get(0));
+    assertEquals("-XX:+UseConcMarkSweepGC", commandLine.get(1));
+    assertEquals("-XX:CMSInitiatingOccupancyFraction=" + LauncherLifecycleCommands.CMS_INITIAL_OCCUPANCY_FRACTION,
+        commandLine.get(2));
+  }
+
+  @Test(expected = AssertionError.class)
+  public void testReadPidWithNull() {
+    try {
+      getLauncherLifecycleCommands().readPid(null);
+    } catch (AssertionError expected) {
+      assertEquals("The file from which to read the process ID (pid) cannot be null!", expected.getMessage());
+      throw expected;
+    }
+  }
+
+  @Test
+  public void testReadPidWithNonExistingFile() {
+    assertEquals(LauncherLifecycleCommands.INVALID_PID,
+        getLauncherLifecycleCommands().readPid(new File("/path/to/non_existing/pid.file")));
+  }
+
+  @Test
+  public void testReadPid() throws IOException {
+    final int expectedPid = 12345;
+
+    File pidFile = new File(getClass().getSimpleName().concat("_testReadPid.pid"));
+
+    assertTrue(pidFile.createNewFile());
+
+    pidFile.deleteOnExit();
+    writePid(pidFile, expectedPid);
+
+    final int actualPid = getLauncherLifecycleCommands().readPid(pidFile);
+
+    assertEquals(expectedPid, actualPid);
+  }
+
+  @Test
+  @SuppressWarnings("deprecation")
+  public void testGetClasspath() {
+    assertEquals(System.getProperty("java.class.path"), getLauncherLifecycleCommands().getClasspath(null));
+  }
+
+  @Test
+  @SuppressWarnings("deprecation")
+  public void testGetClasspathWithUserDefinedClasspath() {
+    assertEquals(System.getProperty("java.class.path") + File.pathSeparator + "/path/to/user/classes",
+        getLauncherLifecycleCommands().getClasspath("/path/to/user/classes"));
+  }
+
+  @Test
+  public void testGemFireCoreClasspath() throws IOException {
+    File coreDependenciesJar = new File(LauncherLifecycleCommands.CORE_DEPENDENCIES_JAR_PATHNAME);
+
+    assertNotNull(coreDependenciesJar);
+    assertTrue(coreDependenciesJar + " is not a file", coreDependenciesJar.isFile());
+
+    Collection<String> expectedJarDependencies = Arrays.asList("antlr", "commons-io", "commons-lang", "commons-logging",
+        "gemfire", "jackson-annotations", "jackson-core", "jackson-databind", "jansi", "jline", "snappy-java",
+        "spring-core", "spring-shell", "jetty-server", "jetty-servlet", "jetty-webapp", "jetty-util", "jetty-http",
+        "servlet-api", "jetty-io", "jetty-security", "jetty-xml"
+
+    );
+
+    assertJarFileManifestClassPath(coreDependenciesJar, expectedJarDependencies);
+  }
+
+  @Test
+  public void testGetSpringJars() {
+    List<String> actualSpringJarPathnames = new LauncherLifecycleCommands().getSpringJars();
+
+    assertNotNull(actualSpringJarPathnames);
+    assertEquals(LauncherLifecycleCommands.SPRING_JAR_NAME_PREFIXES.size(), actualSpringJarPathnames.size());
+
+    int springCoreVersion = -1;
+    int springDataCommonsVersion = -1;
+    int springDataGemFireVersion = -1;
+
+    Set<String> expectedSpringJarNames = new HashSet<>(LauncherLifecycleCommands.SPRING_JAR_NAME_PREFIXES);
+
+    assertFalse(expectedSpringJarNames.isEmpty());
+
+    for (String springJarPathname : actualSpringJarPathnames) {
+      String springJarName = springJarPathname.substring(springJarPathname.lastIndexOf(File.separator) + 1);
+      String springJarNamePrefix = springJarName.substring(0, springJarName.lastIndexOf("-"));
+
+      switch (springJarNamePrefix) {
+        case LauncherLifecycleCommands.SPRING_BEANS_JAR_NAME_PREFIX:
+          springCoreVersion = Integer.parseInt(StringUtils.getDigitsOnly(springJarName));
+          break;
+        case LauncherLifecycleCommands.SPRING_DATA_COMMONS_JAR_NAME_PREFIX:
+          springDataCommonsVersion = Integer.parseInt(StringUtils.getDigitsOnly(springJarName));
+          break;
+        case LauncherLifecycleCommands.SPRING_DATA_GEMFIRE_JAR_NAME_PREFIX:
+          springDataGemFireVersion = Integer.parseInt(StringUtils.getDigitsOnly(springJarName));
+          break;
+      }
+
+      expectedSpringJarNames.remove(springJarNamePrefix);
+    }
+
+    assertTrue(String.format("Expected empty; but was (%1$s)", expectedSpringJarNames),
+        expectedSpringJarNames.isEmpty());
+    assertEquals(3212, springCoreVersion);
+    assertEquals(191, springDataCommonsVersion);
+    assertEquals(151, springDataGemFireVersion);
+  }
+
+  @Test
+  public void testGetSystemClasspath() {
+    assertEquals(System.getProperty("java.class.path"), getLauncherLifecycleCommands().getSystemClasspath());
+  }
+
+  @Test
+  public void testLocatorClasspathOrder() {
+    String userClasspath = "/path/to/user/lib/app.jar:/path/to/user/classes";
+
+    String expectedClasspath = launcherCommands.getGemFireJarPath().concat(File.pathSeparator).concat(
+        userClasspath).concat(File.pathSeparator).concat(System.getProperty("java.class.path")).concat(
+        File.pathSeparator).concat(LauncherLifecycleCommands.CORE_DEPENDENCIES_JAR_PATHNAME);
+
+    String actualClasspath = launcherCommands.getLocatorClasspath(true, userClasspath);
+
+    assertEquals(expectedClasspath, actualClasspath);
+  }
+
+  @Test
+  public void testServerClasspathOrder() {
+    String userClasspath = "/path/to/user/lib/app.jar:/path/to/user/classes";
+
+    String expectedClasspath = launcherCommands.getGemFireJarPath().concat(File.pathSeparator).concat(
+        userClasspath).concat(File.pathSeparator).concat(
+        LauncherLifecycleCommands.CORE_DEPENDENCIES_JAR_PATHNAME).concat(File.pathSeparator).concat(
+        toPath(launcherCommands.getSpringJars().toArray()));
+
+    String actualClasspath = launcherCommands.getServerClasspath(false, true, userClasspath);
+
+    assertEquals(expectedClasspath, actualClasspath);
+  }
+
+  private String toPath(Object... pathElements) {
+    String path = "";
+
+    for (Object pathElement : pathElements) {
+      path += (path.isEmpty() ? StringUtils.EMPTY_STRING : File.pathSeparator);
+      path += pathElement;
+    }
+
+    return path;
+  }
+
+  @Test
+  public void testToClasspath() {
+    final boolean EXCLUDE_SYSTEM_CLASSPATH = false;
+    final boolean INCLUDE_SYSTEM_CLASSPATH = true;
+
+    String[] jarFilePathnames = {"/path/to/user/libs/A.jar", "/path/to/user/libs/B.jar", "/path/to/user/libs/C.jar"};
+
+    String[] userClasspaths = {"/path/to/classes:/path/to/libs/1.jar:/path/to/libs/2.jar", "/path/to/ext/libs/1.jar:/path/to/ext/classes:/path/to/ext/lib/10.jar"};
+
+    String expectedClasspath = LauncherLifecycleCommands.GEMFIRE_JAR_PATHNAME.concat(File.pathSeparator).concat(
+        toClasspath(userClasspaths)).concat(File.pathSeparator).concat(toClasspath(jarFilePathnames));
+
+    assertEquals(expectedClasspath,
+        getLauncherLifecycleCommands().toClasspath(EXCLUDE_SYSTEM_CLASSPATH, jarFilePathnames, userClasspaths));
+
+    expectedClasspath = LauncherLifecycleCommands.GEMFIRE_JAR_PATHNAME.concat(File.pathSeparator).concat(
+        toClasspath(userClasspaths)).concat(File.pathSeparator).concat(System.getProperty("java.class.path")).concat(
+        File.pathSeparator).concat(toClasspath(jarFilePathnames));
+
+    assertEquals(expectedClasspath,
+        getLauncherLifecycleCommands().toClasspath(INCLUDE_SYSTEM_CLASSPATH, jarFilePathnames, userClasspaths));
+
+    expectedClasspath = LauncherLifecycleCommands.GEMFIRE_JAR_PATHNAME.concat(File.pathSeparator).concat(
+        System.getProperty("java.class.path"));
+
+    assertEquals(expectedClasspath,
+        getLauncherLifecycleCommands().toClasspath(INCLUDE_SYSTEM_CLASSPATH, null, (String[]) null));
+
+    assertEquals(LauncherLifecycleCommands.GEMFIRE_JAR_PATHNAME,
+        getLauncherLifecycleCommands().toClasspath(EXCLUDE_SYSTEM_CLASSPATH, null, (String[]) null));
+
+    assertEquals(LauncherLifecycleCommands.GEMFIRE_JAR_PATHNAME,
+        getLauncherLifecycleCommands().toClasspath(EXCLUDE_SYSTEM_CLASSPATH, new String[0], ""));
+  }
+
+  @Test
+  public void testToClassPathOrder() {
+    String userClasspathOne = "/path/to/user/lib/a.jar:/path/to/user/classes";
+    String userClasspathTwo = "/path/to/user/lib/x.jar:/path/to/user/lib/y.jar:/path/to/user/lib/z.jar";
+
+    String expectedClasspath = launcherCommands.getGemFireJarPath().concat(File.pathSeparator).concat(
+        userClasspathOne).concat(File.pathSeparator).concat(userClasspathTwo).concat(File.pathSeparator).concat(
+        System.getProperty("java.class.path")).concat(File.pathSeparator).concat(
+        LauncherLifecycleCommands.CORE_DEPENDENCIES_JAR_PATHNAME).concat(File.pathSeparator).concat(
+        LauncherLifecycleCommands.CORE_DEPENDENCIES_JAR_PATHNAME);
+
+    String actualClasspath = launcherCommands.toClasspath(true,
+        new String[]{LauncherLifecycleCommands.CORE_DEPENDENCIES_JAR_PATHNAME, LauncherLifecycleCommands.CORE_DEPENDENCIES_JAR_PATHNAME},
+        userClasspathOne, userClasspathTwo);
+
+    assertEquals(expectedClasspath, actualClasspath);
+  }
+
+  private void assertJarFileManifestClassPath(final File dependenciesJar,
+      final Collection<String> expectedJarDependencies) throws IOException {
+    JarFile dependenciesJarFile = new JarFile(dependenciesJar);
+    Manifest manifest = dependenciesJarFile.getManifest();
+
+    assertNotNull(manifest);
+
+    Attributes attributes = manifest.getMainAttributes();
+
+    assertNotNull(attributes);
+    assertTrue(attributes.containsKey(Name.CLASS_PATH));
+
+    String[] actualJarDependencies = attributes.getValue(Name.CLASS_PATH).split(" ");
+
+    assertNotNull(actualJarDependencies);
+    assertTrue(String.format("Expected the actual number of JAR dependencies to be (%1$d); but was (%2$d)!",
+        expectedJarDependencies.size(), actualJarDependencies.length),
+        actualJarDependencies.length >= expectedJarDependencies.size());
+    //assertTrue(Arrays.asList(actualJarDependencies).containsAll(expectedJarDependencies));
+
+    List<String> actualJarDependenciesList = new ArrayList<>(Arrays.asList(actualJarDependencies));
+    List<String> missingExpectedJarDependenciesList = new ArrayList<>(expectedJarDependencies.size());
+
+    for (String expectedJarDependency : expectedJarDependencies) {
+      boolean containsExpectedJar = false;
+
+      for (int index = 0, size = actualJarDependenciesList.size(); index < size; index++) {
+        if (actualJarDependenciesList.get(index).toLowerCase().contains(expectedJarDependency.toLowerCase())) {
+          actualJarDependenciesList.remove(index);
+          containsExpectedJar = true;
+          break;
+        }
+      }
+
+      if (!containsExpectedJar) {
+        missingExpectedJarDependenciesList.add(expectedJarDependency);
+      }
+    }
+
+    assertTrue(String.format(
+        "GemFire dependencies JAR file (%1$s) does not contain the expected dependencies (%2$s) in the Manifest Class-Path attribute (%3$s)!",
+        dependenciesJar, missingExpectedJarDependenciesList, attributes.getValue(Name.CLASS_PATH)),
+        missingExpectedJarDependenciesList.isEmpty());
+  }
+
+  private String toClasspath(final String... jarFilePathnames) {
+    String classpath = StringUtils.EMPTY_STRING;
+
+    if (jarFilePathnames != null) {
+      for (final String jarFilePathname : jarFilePathnames) {
+        classpath += (classpath.isEmpty() ? StringUtils.EMPTY_STRING : File.pathSeparator);
+        classpath += jarFilePathname;
+      }
+    }
+
+    return classpath;
+  }
+
+  @Test
+  public void testGetJavaPathname() {
+    assertEquals(IOUtils.appendToPath(System.getProperty("java.home"), "bin",
+        "java" + LauncherLifecycleCommands.getExecutableSuffix()),
+        getLauncherLifecycleCommands().getJdkToolPathname("java" + LauncherLifecycleCommands.getExecutableSuffix(),
+            new GemFireException() {
+            }));
+  }
+
+  @Test(expected = NullPointerException.class)
+  public void testGetJdkToolPathnameWithNullPathnames() {
+    try {
+      getLauncherLifecycleCommands().getJdkToolPathname((Stack<String>) null, new GemFireException() {
+      });
+    } catch (NullPointerException expected) {
+      assertEquals("The JDK tool executable pathnames cannot be null!", expected.getMessage());
+      throw expected;
+    }
+  }
+
+  @Test(expected = NullPointerException.class)
+  public void testGetJdkToolPathnameWithNullGemFireException() {
+    try {
+      getLauncherLifecycleCommands().getJdkToolPathname(new Stack<String>(), null);
+    } catch (NullPointerException expected) {
+      assertEquals("The GemFireException cannot be null!", expected.getMessage());
+      throw expected;
+    }
+  }
+
+  @Test
+  public void testGetJdkToolPathnameForNonExistingTool() {
+    try {
+      final GemFireException expected = new GemFireException() {
+        @Override
+        public String getMessage() {
+          return "expected";
+        }
+      };
+
+      getLauncherLifecycleCommands().getJdkToolPathname("nonExistingTool.exe", expected);
+    } catch (GemFireException expected) {
+      assertEquals("expected", expected.getMessage());
+    }
+  }
+
+  @Test
+  public void testGetLocatorId() {
+    assertEquals("tidepool[11235]", getLauncherLifecycleCommands().getLocatorId("tidepool", 11235));
+    assertEquals("tidepool.gemstone.com[11235]",
+        getLauncherLifecycleCommands().getLocatorId("tidepool.gemstone.com", 11235));
+    assertEquals("tidepool[" + DistributionLocator.DEFAULT_LOCATOR_PORT + "]",
+        getLauncherLifecycleCommands().getLocatorId("tidepool", null));
+  }
+
+  @Test
+  public void testGetServerId() {
+    assertEquals("tidepool[12480]", getLauncherLifecycleCommands().getServerId("tidepool", 12480));
+    assertEquals("tidepool.vmware.com[12480]",
+        getLauncherLifecycleCommands().getServerId("tidepool.vmware.com", 12480));
+    assertEquals("tidepool[" + CacheServer.DEFAULT_PORT + "]",
+        getLauncherLifecycleCommands().getServerId("tidepool", null));
+  }
+
+  @Test
+  public void testCreateJmxServerUrlWithMemberName() {
+    assertEquals("service:jmx:rmi://localhost:8192/jndi/rmi://localhost:8192/jmxrmi",
+        getLauncherLifecycleCommands().getJmxServiceUrlAsString("localhost[8192]"));
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testCreateJmxServiceUrlWithInvalidMemberName() {
+    try {
+      System.err.println(getLauncherLifecycleCommands().getJmxServiceUrlAsString("memberOne[]"));
+    } catch (IllegalArgumentException expected) {
+      assertEquals(CliStrings.START_JCONSOLE__CONNECT_BY_MEMBER_NAME_ID_ERROR_MESSAGE, expected.getMessage());
+      throw expected;
+    }
+  }
+
+  @Test
+  public void testCreateServerCommandLine() throws Exception {
+    ServerLauncher serverLauncher = new ServerLauncher.Builder().setCommand(
+        ServerLauncher.Command.START).setDisableDefaultServer(true).setMemberName(
+        "testCreateServerCommandLine").setRebalance(true)
+        //.setServerBindAddress("localhost")
+        .setServerPort(41214).setCriticalHeapPercentage(95.5f).setEvictionHeapPercentage(85.0f).build();
+
+    String[] commandLineElements = launcherCommands.createStartServerCommandLine(serverLauncher, null, null,
+        new Properties(), null, false, new String[0], false, null, null);
+
+    assertNotNull(commandLineElements);
+    assertTrue(commandLineElements.length > 0);
+
+    Set<String> expectedCommandLineElements = new HashSet<>(6);
+
+    expectedCommandLineElements.add(serverLauncher.getCommand().getName());
+    expectedCommandLineElements.add("--disable-default-server");
+    expectedCommandLineElements.add(serverLauncher.getMemberName().toLowerCase());
+    expectedCommandLineElements.add("--rebalance");
+    //expectedCommandLineElements.add(String.format("--server-bind-address=%1$s", serverLauncher.getServerBindAddress().getHostName()));
+    expectedCommandLineElements.add(String.format("--server-port=%1$d", serverLauncher.getServerPort()));
+    expectedCommandLineElements.add(
+        String.format("--critical-heap-percentage=%1$s", serverLauncher.getCriticalHeapPercentage()));
+    expectedCommandLineElements.add(
+        String.format("--eviction-heap-percentage=%1$s", serverLauncher.getEvictionHeapPercentage()));
+
+    for (String commandLineElement : commandLineElements) {
+      expectedCommandLineElements.remove(commandLineElement.toLowerCase());
+    }
+
+    assertTrue(String.format("Expected ([]); but was (%1$s)", expectedCommandLineElements),
+        expectedCommandLineElements.isEmpty());
+  }
+
+}


[32/50] [abbrv] incubator-geode git commit: GEODE 608: Initial cut at adding RAT to build

Posted by kl...@apache.org.
GEODE 608: Initial cut at adding RAT to build

Add the RAT plugin to check for license headers in source files.  This
is a first pass that excludess generated files, typical IDE files, etc.
Create the report using `gradle rat`.  As GEODE-18 continues we can
update the excludes list appropriately.  Eventually this task should
pass.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/f133ff1c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/f133ff1c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/f133ff1c

Branch: refs/heads/feature/GEODE-291
Commit: f133ff1c86be467bd4ecda5f8d24b7fe969531b7
Parents: a5906e5
Author: Anthony Baker <ab...@pivotal.io>
Authored: Sat Nov 28 08:07:31 2015 -0800
Committer: Anthony Baker <ab...@pivotal.io>
Committed: Thu Dec 10 09:44:56 2015 -0800

----------------------------------------------------------------------
 build.gradle | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f133ff1c/build.gradle
----------------------------------------------------------------------
diff --git a/build.gradle b/build.gradle
index 0c94573..6f5c2a3 100755
--- a/build.gradle
+++ b/build.gradle
@@ -5,11 +5,31 @@ buildscript {
     }
   }
   dependencies {
+    classpath "gradle.plugin.org.nosphere.apache:creadur-rat-gradle:0.2.0"
     classpath "org.ajoberstar:gradle-git:1.3.2"
   }
 }
 
 apply plugin: 'wrapper'
+apply plugin: "org.nosphere.apache.rat"
+
+rat {
+  excludes = [
+    '.git/**',
+    '**/.gitignore',
+    '**/.gradle/**',
+    '.gradle',
+    '**/build/**',
+    '**/.project',
+    '**/.classpath',
+    '**/.settings/**',
+    '**/build-eclipse/**',
+    '*.iml',
+    '.idea/**',
+
+    '**/doc-files/*.fig'
+  ]
+}
 
 // Load all properties in dependency-version.properties as project properties, so all projects can read them
 Properties dependencyVersions = new Properties()


[46/50] [abbrv] incubator-geode git commit: adding a unit test for initiation of suspect processing when shared/unordered

Posted by kl...@apache.org.
adding a unit test for initiation of suspect processing when shared/unordered

As part of this work I changed the internal membership listener interfaces to
propagate the reason for initiating suspect processing.  The test gathers
these reasons and asserts that at least one is initiated by TCPConduit.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/1c423796
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/1c423796
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/1c423796

Branch: refs/heads/feature/GEODE-291
Commit: 1c423796bc6dd9b0ce0f5557b6f4b31e1466f23d
Parents: 24c170a
Author: Bruce Schuchardt <bs...@pivotal.io>
Authored: Thu Dec 10 14:52:36 2015 -0800
Committer: Bruce Schuchardt <bs...@pivotal.io>
Committed: Thu Dec 10 14:53:50 2015 -0800

----------------------------------------------------------------------
 .../DistributedSystemHealthEvaluator.java       |  2 +-
 .../internal/DistributionAdvisor.java           |  2 +-
 .../internal/DistributionManager.java           | 24 +++++++++++++-------
 .../internal/MembershipListener.java            |  3 ++-
 .../distributed/internal/ProductUseLog.java     |  2 +-
 .../distributed/internal/ReplyProcessor21.java  |  2 +-
 .../internal/locks/DLockGrantor.java            |  2 +-
 .../DistributedMembershipListener.java          |  3 ++-
 .../internal/membership/gms/Services.java       | 12 +++++-----
 .../internal/membership/gms/SuspectMember.java  |  9 ++++++--
 .../membership/gms/auth/GMSAuthenticator.java   |  2 +-
 .../membership/gms/fd/GMSHealthMonitor.java     |  4 ++--
 .../membership/gms/interfaces/Service.java      |  3 ++-
 .../membership/gms/membership/GMSJoinLeave.java |  2 +-
 .../gms/messenger/JGroupsMessenger.java         |  2 +-
 .../gms/mgr/GMSMembershipManager.java           |  6 ++---
 .../admin/remote/RemoteGfManagerAgent.java      |  2 +-
 .../internal/cache/DistributedRegion.java       |  2 +-
 .../internal/cache/InitialImageFlowControl.java |  2 +-
 .../internal/cache/PRHARedundancyProvider.java  |  4 ++--
 .../internal/cache/PartitionedRegion.java       |  2 +-
 .../internal/cache/PartitionedRegionHelper.java |  2 +-
 .../cache/SearchLoadAndWriteProcessor.java      |  2 +-
 .../gemfire/internal/cache/TXCommitMessage.java |  2 +-
 .../internal/cache/TXFarSideCMTracker.java      |  2 +-
 .../gemfire/internal/cache/TXManagerImpl.java   |  2 +-
 .../PartitionedRegionRebalanceOp.java           |  2 +-
 .../cache/persistence/BackupManager.java        |  2 +-
 .../persistence/PersistenceAdvisorImpl.java     |  2 +-
 .../persistence/PersistentMemberManager.java    |  2 +-
 .../cache/versions/RegionVersionVector.java     |  2 +-
 .../gemfire/internal/tcp/Connection.java        |  6 +++--
 .../management/internal/FederatingManager.java  |  5 ++--
 .../internal/ManagementMembershipListener.java  |  4 ++--
 .../internal/SystemManagementService.java       |  4 ++--
 .../internal/beans/MBeanAggregator.java         |  2 +-
 .../gemfire/distributed/LocatorDUnitTest.java   |  9 ++++++--
 .../gms/mgr/GMSMembershipManagerJUnitTest.java  |  8 +++----
 38 files changed, 87 insertions(+), 63 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/admin/internal/DistributedSystemHealthEvaluator.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/admin/internal/DistributedSystemHealthEvaluator.java b/gemfire-core/src/main/java/com/gemstone/gemfire/admin/internal/DistributedSystemHealthEvaluator.java
index 534f559..511edba 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/admin/internal/DistributedSystemHealthEvaluator.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/admin/internal/DistributedSystemHealthEvaluator.java
@@ -166,7 +166,7 @@ class DistributedSystemHealthEvaluator
   }
 
   public void memberSuspect(InternalDistributedMember id,
-      InternalDistributedMember whoSuspected) {
+      InternalDistributedMember whoSuspected, String reason) {
   }
   
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionAdvisor.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionAdvisor.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionAdvisor.java
index dc76d6b..538fa96 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionAdvisor.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionAdvisor.java
@@ -244,7 +244,7 @@ public class DistributionAdvisor  {
       }
 
       public void memberSuspect(InternalDistributedMember id,
-          InternalDistributedMember whoSuspected) {
+          InternalDistributedMember whoSuspected, String reason) {
       }
       
    };

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionManager.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionManager.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionManager.java
index 7a9f7c0..964845c 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionManager.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionManager.java
@@ -3333,9 +3333,10 @@ public class DistributionManager
   }
 
   /**
+   * @param reason TODO
    */
   public void handleManagerSuspect(InternalDistributedMember suspect, 
-      InternalDistributedMember whoSuspected) {
+      InternalDistributedMember whoSuspected, String reason) {
     if (!isCurrentMember(suspect)) {
       return; // fault tolerance
     }
@@ -3345,7 +3346,7 @@ public class DistributionManager
       return;
     }
 
-    addMemberEvent(new MemberSuspectEvent(suspect, whoSuspected));
+    addMemberEvent(new MemberSuspectEvent(suspect, whoSuspected, reason));
   }
   
   public void handleViewInstalled(NetView view) {
@@ -3756,7 +3757,7 @@ public class DistributionManager
                   }
                 }
                 public void memberSuspect(InternalDistributedMember id,
-                    InternalDistributedMember whoSuspected) {
+                    InternalDistributedMember whoSuspected, String reason) {
                 }
                 public void viewInstalled(NetView view) {
                 }
@@ -4424,8 +4425,8 @@ public class DistributionManager
       dm.handleManagerDeparture(theId, crashed, reason);
     }
     
-    public void memberSuspect(InternalDistributedMember suspect, InternalDistributedMember whoSuspected) {
-      dm.handleManagerSuspect(suspect, whoSuspected);
+    public void memberSuspect(InternalDistributedMember suspect, InternalDistributedMember whoSuspected, String reason) {
+      dm.handleManagerSuspect(suspect, whoSuspected, reason);
     }
     
     public void viewInstalled(NetView view) {
@@ -4579,20 +4580,27 @@ public class DistributionManager
    */
   private static final class MemberSuspectEvent extends MemberEvent {
     InternalDistributedMember whoSuspected;
-    MemberSuspectEvent(InternalDistributedMember suspect, InternalDistributedMember whoSuspected) {
+    String reason;
+    MemberSuspectEvent(InternalDistributedMember suspect, InternalDistributedMember whoSuspected, String reason) {
       super(suspect);
       this.whoSuspected = whoSuspected;
+      this.reason = reason;
     }
     public InternalDistributedMember whoSuspected() {
       return this.whoSuspected;
     }
+    
+    public String getReason() {
+      return this.reason;
+    }
+    
     @Override
     public String toString() {
-      return "member " + getId() + " suspected by: " + this.whoSuspected;
+      return "member " + getId() + " suspected by: " + this.whoSuspected + " reason: " + reason;
     }
     @Override
     protected void handleEvent(MembershipListener listener) {
-      listener.memberSuspect(getId(), whoSuspected());	
+      listener.memberSuspect(getId(), whoSuspected(), reason);	
     }
   }
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/MembershipListener.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/MembershipListener.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/MembershipListener.java
index 78ed51b..b545653 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/MembershipListener.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/MembershipListener.java
@@ -60,9 +60,10 @@ public interface MembershipListener {
    * removed from the membership view
    * @param id the suspected member
    * @param whoSuspected the member that initiated suspect processing
+   * @param reason TODO
    */
   public void memberSuspect(InternalDistributedMember id,
-      InternalDistributedMember whoSuspected);
+      InternalDistributedMember whoSuspected, String reason);
   
   /**
    * This is notification that more than 50% of member weight has been

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/ProductUseLog.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/ProductUseLog.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/ProductUseLog.java
index 823b3d0..95d538c 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/ProductUseLog.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/ProductUseLog.java
@@ -136,7 +136,7 @@ public final class ProductUseLog implements MembershipListener {
 
   @Override
   public void memberSuspect(InternalDistributedMember id,
-      InternalDistributedMember whoSuspected) {
+      InternalDistributedMember whoSuspected, String reason) {
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/ReplyProcessor21.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/ReplyProcessor21.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/ReplyProcessor21.java
index aa5f66c..b219630 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/ReplyProcessor21.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/ReplyProcessor21.java
@@ -508,7 +508,7 @@ public class ReplyProcessor21
   }
 
   public void memberSuspect(InternalDistributedMember id,
-      InternalDistributedMember whoSuspected) {
+      InternalDistributedMember whoSuspected, String reason) {
     if (isSevereAlertProcessingEnabled()) {
       // if we're waiting for the member that initiated suspicion, we don't
       // want to be hasty about kicking it out of the distributed system

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/locks/DLockGrantor.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/locks/DLockGrantor.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/locks/DLockGrantor.java
index 92cea62..b4e646c 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/locks/DLockGrantor.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/locks/DLockGrantor.java
@@ -3731,7 +3731,7 @@ public class DLockGrantor {
     public void quorumLost(Set<InternalDistributedMember> failures, List<InternalDistributedMember> remaining) {
     }
     public void memberSuspect(InternalDistributedMember id,
-        InternalDistributedMember whoSuspected) {
+        InternalDistributedMember whoSuspected, String reason) {
     }
     public void memberDeparted(final InternalDistributedMember id, final boolean crashed) {
       final DLockGrantor me = DLockGrantor.this;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/DistributedMembershipListener.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/DistributedMembershipListener.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/DistributedMembershipListener.java
index acfd6ba..d970b6b 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/DistributedMembershipListener.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/DistributedMembershipListener.java
@@ -49,8 +49,9 @@ public interface DistributedMembershipListener extends DirectChannelListener {
   /**
    * Event indicating that a member is suspected of having departed but
    * is still in the membership view
+   * @param reason TODO
    */
-  public void memberSuspect(InternalDistributedMember suspect, InternalDistributedMember whoSuspected);
+  public void memberSuspect(InternalDistributedMember suspect, InternalDistributedMember whoSuspected, String reason);
 
   /**
    * Event indicating a message has been delivered that we need to process.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/Services.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/Services.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/Services.java
index 799f95d..4484c00 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/Services.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/Services.java
@@ -277,20 +277,20 @@ public class Services {
     manager.installView(v);
   }
   
-  public void memberSuspected(InternalDistributedMember initiator, InternalDistributedMember suspect) {
+  public void memberSuspected(InternalDistributedMember initiator, InternalDistributedMember suspect, String reason) {
     try {
-      joinLeave.memberSuspected(initiator, suspect);
+      joinLeave.memberSuspected(initiator, suspect, reason);
     } finally {
       try {
-        healthMon.memberSuspected(initiator, suspect);
+        healthMon.memberSuspected(initiator, suspect, reason);
       } finally {
         try {
-          auth.memberSuspected(initiator, suspect);
+          auth.memberSuspected(initiator, suspect, reason);
         } finally {
           try {
-            messenger.memberSuspected(initiator, suspect);
+            messenger.memberSuspected(initiator, suspect, reason);
           } finally {
-            manager.memberSuspected(initiator, suspect);
+            manager.memberSuspected(initiator, suspect, reason);
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/SuspectMember.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/SuspectMember.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/SuspectMember.java
index a03ee92..ca27698 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/SuspectMember.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/SuspectMember.java
@@ -27,10 +27,15 @@ public class SuspectMember
   /** suspected member */
   public InternalDistributedMember suspectedMember;
   
-  /** create a new SuspectMember */
-  public SuspectMember(InternalDistributedMember whoSuspected, InternalDistributedMember suspectedMember) {
+  /** the reason */
+  public String reason;
+  
+  /** create a new SuspectMember 
+   * @param reason TODO*/
+  public SuspectMember(InternalDistributedMember whoSuspected, InternalDistributedMember suspectedMember, String reason) {
     this.whoSuspected = whoSuspected;
     this.suspectedMember = suspectedMember;
+    this.reason = reason;
   }
   
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/auth/GMSAuthenticator.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/auth/GMSAuthenticator.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/auth/GMSAuthenticator.java
index 982942d..ba35e46 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/auth/GMSAuthenticator.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/auth/GMSAuthenticator.java
@@ -90,7 +90,7 @@ public class GMSAuthenticator implements Authenticator {
   }
   
   @Override
-  public void memberSuspected(InternalDistributedMember initiator, InternalDistributedMember suspect) {
+  public void memberSuspected(InternalDistributedMember initiator, InternalDistributedMember suspect, String reason) {
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitor.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitor.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitor.java
index 7709114..8c38ba6 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitor.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitor.java
@@ -940,7 +940,7 @@ public class GMSHealthMonitor implements HealthMonitor, MessageHandler {
   }
 
   @Override
-  public void memberSuspected(InternalDistributedMember initiator, InternalDistributedMember suspect) {
+  public void memberSuspected(InternalDistributedMember initiator, InternalDistributedMember suspect, String reason) {
   }
 
   @Override
@@ -1177,7 +1177,7 @@ public class GMSHealthMonitor implements HealthMonitor, MessageHandler {
           @Override
           public void run() {
             try {
-              services.memberSuspected(initiator, mbr);
+              services.memberSuspected(initiator, mbr, reason);
               long startTime = System.currentTimeMillis();
               // for some reason we used to update the timestamp for the member
               // with the startTime, but we don't want to do that because it looks

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/interfaces/Service.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/interfaces/Service.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/interfaces/Service.java
index 2f4a728..8465067 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/interfaces/Service.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/interfaces/Service.java
@@ -75,8 +75,9 @@ public interface Service {
   
   /**
    * a member is suspected of having crashed
+   * @param reason TODO
    */
-  void memberSuspected(InternalDistributedMember initiator, InternalDistributedMember suspect);
+  void memberSuspected(InternalDistributedMember initiator, InternalDistributedMember suspect, String reason);
 
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeave.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeave.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeave.java
index ccc9d8c..e1821db 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeave.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeave.java
@@ -1301,7 +1301,7 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
   }
 
   @Override
-  public void memberSuspected(InternalDistributedMember initiator, InternalDistributedMember suspect) {
+  public void memberSuspected(InternalDistributedMember initiator, InternalDistributedMember suspect, String reason) {
     prepareProcessor.memberSuspected(initiator, suspect);
     viewProcessor.memberSuspected(initiator, suspect);
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/JGroupsMessenger.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/JGroupsMessenger.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/JGroupsMessenger.java
index 326491a..36a6200 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/JGroupsMessenger.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/JGroupsMessenger.java
@@ -350,7 +350,7 @@ public class JGroupsMessenger implements Messenger {
   }
 
   @Override
-  public void memberSuspected(InternalDistributedMember initiator, InternalDistributedMember suspect) {
+  public void memberSuspected(InternalDistributedMember initiator, InternalDistributedMember suspect, String reason) {
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManager.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManager.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManager.java
index bbe7ab3..93c14e2 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManager.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManager.java
@@ -1209,8 +1209,8 @@ public class GMSMembershipManager implements MembershipManager, Manager
   }
   
   @Override
-  public void memberSuspected(InternalDistributedMember initiator, InternalDistributedMember suspect) {
-    SuspectMember s = new SuspectMember(initiator, suspect);
+  public void memberSuspected(InternalDistributedMember initiator, InternalDistributedMember suspect, String reason) {
+    SuspectMember s = new SuspectMember(initiator, suspect, reason);
     handleOrDeferSuspect(s);
   }
 
@@ -1230,7 +1230,7 @@ public class GMSMembershipManager implements MembershipManager, Manager
       InternalDistributedMember who = suspectInfo.whoSuspected;
       this.suspectedMembers.put(suspect, Long.valueOf(System.currentTimeMillis()));
       try {
-        listener.memberSuspect(suspect, who);
+        listener.memberSuspect(suspect, who, suspectInfo.reason);
       }
       catch (DistributedSystemDisconnectedException se) {
         // let's not get huffy about it

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteGfManagerAgent.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteGfManagerAgent.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteGfManagerAgent.java
index eb62468..6a31b50 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteGfManagerAgent.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/admin/remote/RemoteGfManagerAgent.java
@@ -1428,7 +1428,7 @@ public
     }
     
     public void memberSuspect(InternalDistributedMember id,
-        InternalDistributedMember whoSuspected) {
+        InternalDistributedMember whoSuspected, String reason) {
     }
     
     public void quorumLost(Set<InternalDistributedMember> failures, List<InternalDistributedMember> remaining) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java
index 5d263a6..92b585a 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java
@@ -3805,7 +3805,7 @@ public class DistributedRegion extends LocalRegion implements
     }
 
     public void memberSuspect(InternalDistributedMember id,
-        InternalDistributedMember whoSuspected) {
+        InternalDistributedMember whoSuspected, String reason) {
     }
     
     /** called when membership listeners are added after region creation */

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/InitialImageFlowControl.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/InitialImageFlowControl.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/InitialImageFlowControl.java
index a37a39b..7777a33 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/InitialImageFlowControl.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/InitialImageFlowControl.java
@@ -186,7 +186,7 @@ public class InitialImageFlowControl implements MembershipListener {
   }
 
   public void memberSuspect(InternalDistributedMember id,
-      InternalDistributedMember whoSuspected) {
+      InternalDistributedMember whoSuspected, String reason) {
     //Do nothing
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PRHARedundancyProvider.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PRHARedundancyProvider.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PRHARedundancyProvider.java
index 3e21113..3abb0d6 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PRHARedundancyProvider.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PRHARedundancyProvider.java
@@ -2206,7 +2206,7 @@ public class PRHARedundancyProvider
     }
 
     public void memberSuspect(InternalDistributedMember id,
-        InternalDistributedMember whoSuspected) {
+        InternalDistributedMember whoSuspected, String reason) {
     }
     
     public void memberDeparted(InternalDistributedMember id, boolean crashed) {
@@ -2339,7 +2339,7 @@ public class PRHARedundancyProvider
     }
     
     public void memberSuspect(InternalDistributedMember id,
-        InternalDistributedMember whoSuspected) {
+        InternalDistributedMember whoSuspected, String reason) {
     }
     
     public void memberJoined(InternalDistributedMember id)

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegion.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegion.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegion.java
index 48f4787..a36d719 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegion.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegion.java
@@ -9968,7 +9968,7 @@ public class PartitionedRegion extends LocalRegion implements
     }
 
     public void memberSuspect(InternalDistributedMember id,
-        InternalDistributedMember whoSuspected) {
+        InternalDistributedMember whoSuspected, String reason) {
     }
     
     public synchronized void memberDeparted(InternalDistributedMember id,

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java
index f4a20da..ef89c80 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHelper.java
@@ -1086,7 +1086,7 @@ public class PartitionedRegionHelper
     }
 
     public void memberSuspect(InternalDistributedMember id,
-        InternalDistributedMember whoSuspected) {
+        InternalDistributedMember whoSuspected, String reason) {
     }
     
     public void quorumLost(Set<InternalDistributedMember> failures, List<InternalDistributedMember> remaining) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/SearchLoadAndWriteProcessor.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/SearchLoadAndWriteProcessor.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/SearchLoadAndWriteProcessor.java
index d7caf66..e96d99d 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/SearchLoadAndWriteProcessor.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/SearchLoadAndWriteProcessor.java
@@ -237,7 +237,7 @@ public class SearchLoadAndWriteProcessor implements MembershipListener {
   }
 
   public void memberSuspect(InternalDistributedMember id,
-      InternalDistributedMember whoSuspected) {
+      InternalDistributedMember whoSuspected, String reason) {
   }
   
   public void quorumLost(Set<InternalDistributedMember> failures, List<InternalDistributedMember> remaining) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/TXCommitMessage.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/TXCommitMessage.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/TXCommitMessage.java
index 9648503..e712943 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/TXCommitMessage.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/TXCommitMessage.java
@@ -2027,7 +2027,7 @@ public class TXCommitMessage extends PooledDistributionMessage implements Member
   }
   
   public void memberSuspect(InternalDistributedMember id,
-      InternalDistributedMember whoSuspected) {
+      InternalDistributedMember whoSuspected, String reason) {
   }
   
   public void quorumLost(Set<InternalDistributedMember> failures, List<InternalDistributedMember> remaining) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/TXFarSideCMTracker.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/TXFarSideCMTracker.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/TXFarSideCMTracker.java
index c99d8ae..94e3059 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/TXFarSideCMTracker.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/TXFarSideCMTracker.java
@@ -211,7 +211,7 @@ public class TXFarSideCMTracker
         public void memberJoined(InternalDistributedMember id) {
         }
         public void memberSuspect(InternalDistributedMember id,
-            InternalDistributedMember whoSuspected) {
+            InternalDistributedMember whoSuspected, String reason) {
         }
         public void memberDeparted(InternalDistributedMember id, boolean crashed) {
           if (memberId.equals(id)) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/TXManagerImpl.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/TXManagerImpl.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/TXManagerImpl.java
index 994990b..f4216ac 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/TXManagerImpl.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/TXManagerImpl.java
@@ -925,7 +925,7 @@ public final class TXManagerImpl implements CacheTransactionManager,
   }
 
   public void memberSuspect(InternalDistributedMember id,
-      InternalDistributedMember whoSuspected) {
+      InternalDistributedMember whoSuspected, String reason) {
   }
   
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionRebalanceOp.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionRebalanceOp.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionRebalanceOp.java
index 2b21982..a8d1350 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionRebalanceOp.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionRebalanceOp.java
@@ -644,7 +644,7 @@ public class PartitionedRegionRebalanceOp {
     }
 
     public void memberSuspect(InternalDistributedMember id,
-        InternalDistributedMember whoSuspected) {
+        InternalDistributedMember whoSuspected, String reason) {
       // do nothing.
     }
     

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/BackupManager.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/BackupManager.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/BackupManager.java
index 8f27736..1a5c765 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/BackupManager.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/BackupManager.java
@@ -352,7 +352,7 @@ public class BackupManager implements MembershipListener {
   }
 
   public void memberSuspect(InternalDistributedMember id,
-      InternalDistributedMember whoSuspected) {
+      InternalDistributedMember whoSuspected, String reason) {
   }
 
   public void waitForBackup() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/PersistenceAdvisorImpl.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/PersistenceAdvisorImpl.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/PersistenceAdvisorImpl.java
index 21af700..168fbfc 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/PersistenceAdvisorImpl.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/PersistenceAdvisorImpl.java
@@ -1186,7 +1186,7 @@ public class PersistenceAdvisorImpl implements PersistenceAdvisor {
     }
 
     public void memberSuspect(InternalDistributedMember id,
-        InternalDistributedMember whoSuspected) {
+        InternalDistributedMember whoSuspected, String reason) {
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/PersistentMemberManager.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/PersistentMemberManager.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/PersistentMemberManager.java
index 87e956b..a6f6d64 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/PersistentMemberManager.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/persistence/PersistentMemberManager.java
@@ -255,7 +255,7 @@ public class PersistentMemberManager {
 
     @Override
     public void memberSuspect(InternalDistributedMember id,
-        InternalDistributedMember whoSuspected) {
+        InternalDistributedMember whoSuspected, String reason) {
     }
     
     @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/versions/RegionVersionVector.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/versions/RegionVersionVector.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/versions/RegionVersionVector.java
index 61423d1..27f3747 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/versions/RegionVersionVector.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/versions/RegionVersionVector.java
@@ -1364,7 +1364,7 @@ public abstract class RegionVersionVector<T extends VersionSource<?>> implements
 
 
   public void memberJoined(InternalDistributedMember id) { }
-  public void memberSuspect(InternalDistributedMember id, InternalDistributedMember whoSuspected) {  }
+  public void memberSuspect(InternalDistributedMember id, InternalDistributedMember whoSuspected, String reason) {  }
   public void quorumLost(Set<InternalDistributedMember> failures, List<InternalDistributedMember> remaining) {  }
 
   /* 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/internal/tcp/Connection.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/tcp/Connection.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/tcp/Connection.java
index 30962e7..f918812 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/tcp/Connection.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/tcp/Connection.java
@@ -123,6 +123,9 @@ public class Connection implements Runnable {
   /** counter to give connections a unique id */
   private static AtomicLong idCounter = new AtomicLong(1);
 
+  /** string used as the reason for initiating suspect processing */
+  public static final String INITIATING_SUSPECT_PROCESSING = "member unexpectedly shut down shared, unordered connection";
+
   /** the table holding this connection */
   final ConnectionTable owner;
   
@@ -1954,9 +1957,8 @@ public class Connection implements Runnable {
   private void initiateSuspicionIfSharedUnordered() {
     if (this.isReceiver && this.handshakeRead && !this.preserveOrder && this.sharedResource) {
       if (this.owner.getConduit().getCancelCriterion().cancelInProgress() == null) {
-        String reason = "member unexpectedly shut down shared, unordered connection";
         this.owner.getDM().getMembershipManager().suspectMember(this.getRemoteAddress(),
-            reason);
+            INITIATING_SUSPECT_PROCESSING);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/FederatingManager.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/FederatingManager.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/FederatingManager.java
index d49589c..91e7b22 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/FederatingManager.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/FederatingManager.java
@@ -276,9 +276,10 @@ public class FederatingManager extends Manager {
    * block the membership listener
    * 
    * @param member
+   * @param reason TODO
    */
-  public void suspectMember(DistributedMember member, InternalDistributedMember whoSuspected) {
-    service.memberSuspect((InternalDistributedMember) member, whoSuspected);
+  public void suspectMember(DistributedMember member, InternalDistributedMember whoSuspected, String reason) {
+    service.memberSuspect((InternalDistributedMember) member, whoSuspected, reason);
 
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/ManagementMembershipListener.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/ManagementMembershipListener.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/ManagementMembershipListener.java
index 561981c..9a7c657 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/ManagementMembershipListener.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/ManagementMembershipListener.java
@@ -76,7 +76,7 @@ public class ManagementMembershipListener implements MembershipListener {
   }
 
   @Override
-  public void memberSuspect(InternalDistributedMember id, InternalDistributedMember whoSuspected) {
+  public void memberSuspect(InternalDistributedMember id, InternalDistributedMember whoSuspected, String reason) {
 
     if (logger.isDebugEnabled()) {
       logger.debug("ManagementMembershipListener member suspected .. {}", id.getId());
@@ -85,7 +85,7 @@ public class ManagementMembershipListener implements MembershipListener {
       if (logger.isDebugEnabled()) {
         logger.debug("Suspecting member {}", id.getId());
       }
-      service.getFederatingManager().suspectMember(id, whoSuspected);
+      service.getFederatingManager().suspectMember(id, whoSuspected, reason);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/SystemManagementService.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/SystemManagementService.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/SystemManagementService.java
index 7ac85b8..68209f2 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/SystemManagementService.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/SystemManagementService.java
@@ -729,9 +729,9 @@ public final class SystemManagementService extends BaseManagementService {
     }
   }
 
-  public void memberSuspect(InternalDistributedMember id, InternalDistributedMember whoSuspected) {
+  public void memberSuspect(InternalDistributedMember id, InternalDistributedMember whoSuspected, String reason) {
     for (ProxyListener listener : proxyListeners) {
-      listener.memberSuspect(id, whoSuspected);
+      listener.memberSuspect(id, whoSuspected, reason);
     }
   }
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MBeanAggregator.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MBeanAggregator.java b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MBeanAggregator.java
index 8c096a4..a64e7f7 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MBeanAggregator.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/beans/MBeanAggregator.java
@@ -423,7 +423,7 @@ public class MBeanAggregator implements ProxyListener {
 
   @Override
   public void memberSuspect(InternalDistributedMember id,
-      InternalDistributedMember whoSuspected) {
+      InternalDistributedMember whoSuspected, String reason) {
     distributedSystemBridge.memberSuspect(id, whoSuspected);
   }
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorDUnitTest.java
index 5b12563..7d34ab1 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorDUnitTest.java
@@ -20,6 +20,7 @@ import java.io.File;
 import java.io.FileReader;
 import java.io.IOException;
 import java.io.LineNumberReader;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Properties;
 import java.util.Set;
@@ -48,6 +49,7 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.logging.InternalLogWriter;
 import com.gemstone.gemfire.internal.logging.LocalLogWriter;
+import com.gemstone.gemfire.internal.tcp.Connection;
 
 import dunit.AsyncInvocation;
 import dunit.DistributedTestCase;
@@ -60,7 +62,6 @@ import dunit.VM;
  * Tests the ability of the {@link Locator} API to start and stop
  * locators running in remote VMs.
  *
- * @author David Whitlock
  * @since 4.0
  */
 public class LocatorDUnitTest extends DistributedTestCase {
@@ -581,6 +582,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
       }
       // quorumLost should be invoked if we get a ForcedDisconnect in this situation
       assertTrue("expected quorumLost to be invoked", listener.quorumLostInvoked);
+      assertTrue("expected suspect processing initiated by TCPConduit", listener.suspectReasons.contains(Connection.INITIATING_SUSPECT_PROCESSING));
     }
     finally {
       if (locator != null) {
@@ -1882,11 +1884,14 @@ public class LocatorDUnitTest extends DistributedTestCase {
   }
   class MyMembershipListener implements MembershipListener {
     boolean quorumLostInvoked;
+    List<String> suspectReasons = new ArrayList<>(50);
     
     public void memberJoined(InternalDistributedMember id) {  }
     public void memberDeparted(InternalDistributedMember id, boolean crashed) { }
     public void memberSuspect(InternalDistributedMember id,
-        InternalDistributedMember whoSuspected) { }
+        InternalDistributedMember whoSuspected, String reason) {
+      suspectReasons.add(reason);
+    }
     public void quorumLost(Set<InternalDistributedMember> failures,
         List<InternalDistributedMember> remaining) {
       quorumLostInvoked = true;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/1c423796/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManagerJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManagerJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManagerJUnitTest.java
index e133625..44e1b46 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManagerJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManagerJUnitTest.java
@@ -233,10 +233,10 @@ public class GMSMembershipManagerJUnitTest {
 
     // suspect a member
     InternalDistributedMember suspectMember = mockMembers[1];
-    manager.handleOrDeferSuspect(new SuspectMember(mockMembers[0], suspectMember));
+    manager.handleOrDeferSuspect(new SuspectMember(mockMembers[0], suspectMember, "testing"));
     // suspect messages aren't queued - they're ignored before joining the system
     assertEquals(2, manager.getStartupEvents().size());
-    verify(listener, never()).memberSuspect(suspectMember, mockMembers[0]);
+    verify(listener, never()).memberSuspect(suspectMember, mockMembers[0], "testing");
 
     HighPriorityAckedMessage m = new HighPriorityAckedMessage();
     mockMembers[0].setVmViewId(1);
@@ -293,8 +293,8 @@ public class GMSMembershipManagerJUnitTest {
     // process a suspect now - it will be passed to the listener
     reset(listener);
     suspectMember = mockMembers[1];
-    manager.handleOrDeferSuspect(new SuspectMember(mockMembers[0], suspectMember));
-    verify(listener).memberSuspect(suspectMember, mockMembers[0]);
+    manager.handleOrDeferSuspect(new SuspectMember(mockMembers[0], suspectMember, "testing"));
+    verify(listener).memberSuspect(suspectMember, mockMembers[0], "testing");
     
     InternalDistributedMember mbr = manager.getMemberForStub(new Stub(myMemberId.getInetAddress(), 2033, 20), false);
     assertTrue(mbr == null);


[36/50] [abbrv] incubator-geode git commit: GEODE-608: Removed exclusions from licensing check

Posted by kl...@apache.org.
GEODE-608: Removed exclusions from licensing check

XML, script, and build files are now checked for source license
headers.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/a31c8fb9
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/a31c8fb9
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/a31c8fb9

Branch: refs/heads/feature/GEODE-291
Commit: a31c8fb9ecfc7ac960fc4f1f83e06a0fb3e1cd9b
Parents: fddd33f
Author: Anthony Baker <ab...@pivotal.io>
Authored: Tue Dec 1 21:09:20 2015 -0800
Committer: Anthony Baker <ab...@pivotal.io>
Committed: Thu Dec 10 09:44:57 2015 -0800

----------------------------------------------------------------------
 build.gradle | 48 ++++--------------------------------------------
 1 file changed, 4 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a31c8fb9/build.gradle
----------------------------------------------------------------------
diff --git a/build.gradle b/build.gradle
index 4d0216b..babdb8b 100755
--- a/build.gradle
+++ b/build.gradle
@@ -21,16 +21,14 @@ rat {
     
     // gradle
     '**/.gradle/**',
-    '**/build.gradle',
     'gradlew',
     'gradlew.bat',
-    'gradle.properties',
-    'settings.gradle',
-    'gradle/dependency-versions.properties',
     'gradle/wrapper/gradle-wrapper.properties',
     '**/build/**',
     
     // IDE
+    'etc/eclipseFormatterProfile.xml',
+    'etc/intellijIdeaCodeStyle.xml',
     '**/.project',
     '**/.classpath',
     '**/.settings/**',
@@ -54,51 +52,14 @@ rat {
     '**/*.xls',
     
     // other text files
-    'gemfire-assembly/src/main/dist/bin/gfsh',
-    'gemfire-assembly/src/main/dist/bin/gfsh-completion.bash',
-    'gemfire-assembly/src/main/dist/bin/gfsh.bat',
-    'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/fixantlr.sh',
     'gemfire-spark-connector/project/plugins.sbt',
     'gemfire-spark-connector/project/build.properties',
-    'gemfire-web-api/src/main/webapp/docs/o2c.html',
-    'gemfire-web-api/src/main/webapp/docs/index.html',
-    
-    // XML
-    'etc/eclipseFormatterProfile.xml',
-    'etc/intellijIdeaCodeStyle.xml',
-    'log4j2*.xml',
-    '**/example*cache.xml',
-    'gemfire-core/src/test/resources/**/*JUnitTest*.xml',
-    'gemfire-core/src/test/resources/**/IndexCreation.xml',
-    'gemfire-core/src/test/resources/**/PRIndexCreation.xml',
-    'gemfire-core/src/test/resources/**/PartitionRegionCacheExample*.xml',
-    'gemfire-core/src/test/resources/**/attributesUnordered.xml',
-    'gemfire-core/src/test/resources/**/bad*.xml',
-    'gemfire-core/src/test/resources/**/bug44710.xml',
-    'gemfire-core/src/test/resources/**/cachejta.xml',
-    'gemfire-core/src/test/resources/**/cachequeryindex*.xml',
-    'gemfire-core/src/test/resources/**/callback*.xml',
-    'gemfire-core/src/test/resources/**/coLocation*.xml',
-    'gemfire-core/src/test/resources/**/ewtest.xml',
-    'gemfire-core/src/test/resources/**/incorrect*.xml',
-    'gemfire-core/src/test/resources/**/index-creation-*.xml',
-    'gemfire-core/src/test/resources/**/index-recovery-overflow.xml',
-    'gemfire-core/src/test/resources/**/loaderNotLoader.xml',
-    'gemfire-core/src/test/resources/**/malformed.xml',
-    'gemfire-core/src/test/resources/**/mixed_diskstore_disk*.xml',
-    'gemfire-core/src/test/resources/**/namedAttributes.xml',
-    'gemfire-core/src/test/resources/**/partitioned*.xml',
-    'gemfire-core/src/test/resources/**/same*.xml',
-    'gemfire-core/src/test/resources/**/spring-gemfire-context.xml',
-    'gemfire-core/src/test/resources/**/test*.xml',
-    'gemfire-core/src/test/resources/**/unknownNamedAttributes.xml',
     
     // ANTLR generated files
     'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/OQLLexer.java',
     'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/OQLLexerTokenTypes.java',
     'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/OQLParser.java',
     'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/OQLLexerTokenTypes.txt',
-    'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/oql.g',
     
     // Service META-INF
     '**/META-INF/services/org.xml.sax.ext.EntityResolver2',
@@ -115,9 +76,6 @@ rat {
     // Public Domain http://meyerweb.com/eric/tools/css/reset/
     'gemfire-web-api/src/main/webapp/docs/css/reset.css',
 
-    // Public Domain - http://creativecommons.org/licenses/publicdomain
-    'SynchronousQueueNoSpin.java',
-
     // JSON License - permissive, used for Good, not Evil
     'gemfire-json/src/main/java/org/json/CDL.java',
     'gemfire-json/src/main/java/org/json/Cookie.java',
@@ -155,6 +113,8 @@ rat {
     'gemfire-web-api/src/main/webapp/docs/lib/highlight.7.3.pack.js',
 
     // Apache License
+    'gemfire-web-api/src/main/webapp/docs/o2c.html',
+    'gemfire-web-api/src/main/webapp/docs/index.html',
     'gemfire-web-api/src/main/webapp/docs/lib/swagger-oauth.js',
     'gemfire-web-api/src/main/webapp/docs/lib/swagger.js',
     'gemfire-web-api/src/main/webapp/docs/css/screen.css',


[24/50] [abbrv] incubator-geode git commit: GEODE-637: Additional tests for AsyncEventQueues

Posted by kl...@apache.org.
GEODE-637: Additional tests for AsyncEventQueues


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/476c6cd3
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/476c6cd3
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/476c6cd3

Branch: refs/heads/feature/GEODE-291
Commit: 476c6cd3be1da503b2345d23fb2857da27d77127
Parents: 386d1ac
Author: Dan Smith <up...@apache.org>
Authored: Wed Dec 2 09:51:49 2015 -0800
Committer: Dan Smith <up...@apache.org>
Committed: Tue Dec 8 15:40:05 2015 -0800

----------------------------------------------------------------------
 .../cache/wan/AsyncEventQueueTestBase.java      | 1658 +++++++++++++++
 .../asyncqueue/AsyncEventListenerDUnitTest.java | 1911 ++++++++++++++++++
 .../AsyncEventListenerOffHeapDUnitTest.java     |   17 +
 .../AsyncEventQueueStatsDUnitTest.java          |  311 +++
 .../ConcurrentAsyncEventQueueDUnitTest.java     |  330 +++
 ...ncurrentAsyncEventQueueOffHeapDUnitTest.java |   16 +
 .../CommonParallelAsyncEventQueueDUnitTest.java |   53 +
 ...ParallelAsyncEventQueueOffHeapDUnitTest.java |   16 +
 8 files changed, 4312 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/476c6cd3/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/AsyncEventQueueTestBase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/AsyncEventQueueTestBase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/AsyncEventQueueTestBase.java
new file mode 100644
index 0000000..a800118
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/AsyncEventQueueTestBase.java
@@ -0,0 +1,1658 @@
+/*
+ * =========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved. This
+ * product is protected by U.S. and international copyright and intellectual
+ * property laws. Pivotal products are covered by one or more patents listed at
+ * http://www.pivotal.io/patents.
+ * =========================================================================
+ */
+package com.gemstone.gemfire.internal.cache.wan;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.StringTokenizer;
+import java.util.concurrent.ConcurrentHashMap;
+
+import com.gemstone.gemfire.cache.AttributesFactory;
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheClosedException;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.CacheLoader;
+import com.gemstone.gemfire.cache.DataPolicy;
+import com.gemstone.gemfire.cache.Declarable;
+import com.gemstone.gemfire.cache.DiskStore;
+import com.gemstone.gemfire.cache.DiskStoreFactory;
+import com.gemstone.gemfire.cache.EntryEvent;
+import com.gemstone.gemfire.cache.LoaderHelper;
+import com.gemstone.gemfire.cache.PartitionAttributesFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.Scope;
+import com.gemstone.gemfire.cache.asyncqueue.AsyncEvent;
+import com.gemstone.gemfire.cache.asyncqueue.AsyncEventListener;
+import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueue;
+import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueueFactory;
+import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueImpl;
+import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueStats;
+import com.gemstone.gemfire.cache.client.internal.LocatorDiscoveryCallbackAdapter;
+import com.gemstone.gemfire.cache.control.RebalanceFactory;
+import com.gemstone.gemfire.cache.control.RebalanceOperation;
+import com.gemstone.gemfire.cache.control.RebalanceResults;
+import com.gemstone.gemfire.cache.control.ResourceManager;
+import com.gemstone.gemfire.cache.persistence.PartitionOfflineException;
+import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
+import com.gemstone.gemfire.cache.wan.GatewayEventFilter;
+import com.gemstone.gemfire.cache.wan.GatewayReceiver;
+import com.gemstone.gemfire.cache.wan.GatewayReceiverFactory;
+import com.gemstone.gemfire.cache.wan.GatewaySender;
+import com.gemstone.gemfire.cache.wan.GatewaySender.OrderPolicy;
+import com.gemstone.gemfire.cache.wan.GatewaySenderFactory;
+import com.gemstone.gemfire.distributed.Locator;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
+import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.internal.cache.ForceReattemptException;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.PartitionedRegion;
+import com.gemstone.gemfire.internal.cache.RegionQueue;
+
+import dunit.DistributedTestCase;
+import dunit.Host;
+import dunit.VM;
+
+public class AsyncEventQueueTestBase extends DistributedTestCase {
+
+  protected static Cache cache;
+
+  protected static VM vm0;
+
+  protected static VM vm1;
+
+  protected static VM vm2;
+
+  protected static VM vm3;
+
+  protected static VM vm4;
+
+  protected static VM vm5;
+
+  protected static VM vm6;
+
+  protected static VM vm7;
+
+  protected static AsyncEventListener eventListener1;
+
+  private static final long MAX_WAIT = 10000;
+
+  protected static GatewayEventFilter eventFilter;
+
+  protected static boolean destroyFlag = false;
+
+  protected static List<Integer> dispatcherThreads = new ArrayList<Integer>(
+      Arrays.asList(1, 3, 5));
+
+  // this will be set for each test method run with one of the values from above
+  // list
+  protected static int numDispatcherThreadsForTheRun = 1;
+
+  public AsyncEventQueueTestBase(String name) {
+    super(name);
+  }
+
+  public void setUp() throws Exception {
+    super.setUp();
+    final Host host = Host.getHost(0);
+    vm0 = host.getVM(0);
+    vm1 = host.getVM(1);
+    vm2 = host.getVM(2);
+    vm3 = host.getVM(3);
+    vm4 = host.getVM(4);
+    vm5 = host.getVM(5);
+    vm6 = host.getVM(6);
+    vm7 = host.getVM(7);
+    // this is done to vary the number of dispatchers for sender
+    // during every test method run
+    shuffleNumDispatcherThreads();
+    invokeInEveryVM(AsyncEventQueueTestBase.class,
+        "setNumDispatcherThreadsForTheRun",
+        new Object[] { dispatcherThreads.get(0) });
+  }
+
+  public static void shuffleNumDispatcherThreads() {
+    Collections.shuffle(dispatcherThreads);
+  }
+
+  public static void setNumDispatcherThreadsForTheRun(int numThreads) {
+    numDispatcherThreadsForTheRun = numThreads;
+  }
+
+  public static Integer createFirstLocatorWithDSId(int dsId) {
+    if (Locator.hasLocator()) {
+      Locator.getLocator().stop();
+    }
+    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(testName);
+    int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
+    Properties props = new Properties();
+    props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    //props.setProperty(DistributionConfig.DISTRIBUTED_SYSTEM_ID_NAME, "" + dsId);
+    props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost[" + port
+        + "]");
+    props.setProperty(DistributionConfig.START_LOCATOR_NAME, "localhost["
+        + port + "],server=true,peer=true,hostname-for-clients=localhost");
+    test.getSystem(props);
+    return port;
+  }
+
+  public static Integer createFirstRemoteLocator(int dsId, int remoteLocPort) {
+    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(testName);
+    int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
+    Properties props = new Properties();
+    props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    props.setProperty(DistributionConfig.DISTRIBUTED_SYSTEM_ID_NAME, "" + dsId);
+    props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost[" + port
+        + "]");
+    props.setProperty(DistributionConfig.START_LOCATOR_NAME, "localhost["
+        + port + "],server=true,peer=true,hostname-for-clients=localhost");
+    props.setProperty(DistributionConfig.REMOTE_LOCATORS_NAME, "localhost["
+        + remoteLocPort + "]");
+    test.getSystem(props);
+    return port;
+  }
+
+  public static void createReplicatedRegionWithAsyncEventQueue(
+      String regionName, String asyncQueueIds, Boolean offHeap) {
+    ExpectedException exp1 = addExpectedException(ForceReattemptException.class
+        .getName());
+    try {
+      AttributesFactory fact = new AttributesFactory();
+      if (asyncQueueIds != null) {
+        StringTokenizer tokenizer = new StringTokenizer(asyncQueueIds, ",");
+        while (tokenizer.hasMoreTokens()) {
+          String asyncQueueId = tokenizer.nextToken();
+          fact.addAsyncEventQueueId(asyncQueueId);
+        }
+      }
+      fact.setDataPolicy(DataPolicy.REPLICATE);
+      fact.setOffHeap(offHeap);
+      RegionFactory regionFactory = cache.createRegionFactory(fact.create());
+      Region r = regionFactory.create(regionName);
+      assertNotNull(r);
+    }
+    finally {
+      exp1.remove();
+    }
+  }
+
+  public static void createReplicatedRegionWithCacheLoaderAndAsyncEventQueue(
+      String regionName, String asyncQueueIds) {
+
+    AttributesFactory fact = new AttributesFactory();
+    if (asyncQueueIds != null) {
+      StringTokenizer tokenizer = new StringTokenizer(asyncQueueIds, ",");
+      while (tokenizer.hasMoreTokens()) {
+        String asyncQueueId = tokenizer.nextToken();
+        fact.addAsyncEventQueueId(asyncQueueId);
+      }
+    }
+    fact.setDataPolicy(DataPolicy.REPLICATE);
+    // set the CacheLoader
+    fact.setCacheLoader(new MyCacheLoader());
+    RegionFactory regionFactory = cache.createRegionFactory(fact.create());
+    Region r = regionFactory.create(regionName);
+    assertNotNull(r);
+  }
+
+  public static void createReplicatedRegionWithSenderAndAsyncEventQueue(
+      String regionName, String senderIds, String asyncChannelId, Boolean offHeap) {
+    ExpectedException exp = addExpectedException(ForceReattemptException.class
+        .getName());
+    try {
+
+      AttributesFactory fact = new AttributesFactory();
+      if (senderIds != null) {
+        StringTokenizer tokenizer = new StringTokenizer(senderIds, ",");
+        while (tokenizer.hasMoreTokens()) {
+          String senderId = tokenizer.nextToken();
+          fact.addGatewaySenderId(senderId);
+        }
+      }
+      fact.setDataPolicy(DataPolicy.REPLICATE);
+      fact.setOffHeap(offHeap);
+      fact.setScope(Scope.DISTRIBUTED_ACK);
+      RegionFactory regionFactory = cache.createRegionFactory(fact.create());
+      regionFactory.addAsyncEventQueueId(asyncChannelId);
+      Region r = regionFactory.create(regionName);
+      assertNotNull(r);
+    }
+    finally {
+      exp.remove();
+    }
+  }
+
+  public static void createAsyncEventQueue(String asyncChannelId,
+      boolean isParallel, Integer maxMemory, Integer batchSize,
+      boolean isConflation, boolean isPersistent, String diskStoreName,
+      boolean isDiskSynchronous) {
+
+    if (diskStoreName != null) {
+      File directory = new File(asyncChannelId + "_disk_"
+          + System.currentTimeMillis() + "_" + VM.getCurrentVMNum());
+      directory.mkdir();
+      File[] dirs1 = new File[] { directory };
+      DiskStoreFactory dsf = cache.createDiskStoreFactory();
+      dsf.setDiskDirs(dirs1);
+      DiskStore ds = dsf.create(diskStoreName);
+    }
+
+    AsyncEventListener asyncEventListener = new MyAsyncEventListener();
+
+    AsyncEventQueueFactory factory = cache.createAsyncEventQueueFactory();
+    factory.setBatchSize(batchSize);
+    factory.setPersistent(isPersistent);
+    factory.setDiskStoreName(diskStoreName);
+    factory.setDiskSynchronous(isDiskSynchronous);
+    factory.setBatchConflationEnabled(isConflation);
+    factory.setMaximumQueueMemory(maxMemory);
+    factory.setParallel(isParallel);
+    // set dispatcher threads
+    factory.setDispatcherThreads(numDispatcherThreadsForTheRun);
+    AsyncEventQueue asyncChannel = factory.create(asyncChannelId,
+        asyncEventListener);
+  }
+
+  public static void createAsyncEventQueueWithListener2(String asyncChannelId,
+      boolean isParallel, Integer maxMemory, Integer batchSize,
+      boolean isPersistent, String diskStoreName) {
+
+    if (diskStoreName != null) {
+      File directory = new File(asyncChannelId + "_disk_"
+          + System.currentTimeMillis() + "_" + VM.getCurrentVMNum());
+      directory.mkdir();
+      File[] dirs1 = new File[] { directory };
+      DiskStoreFactory dsf = cache.createDiskStoreFactory();
+      dsf.setDiskDirs(dirs1);
+      DiskStore ds = dsf.create(diskStoreName);
+    }
+
+    AsyncEventListener asyncEventListener = new MyAsyncEventListener2();
+
+    AsyncEventQueueFactory factory = cache.createAsyncEventQueueFactory();
+    factory.setBatchSize(batchSize);
+    factory.setPersistent(isPersistent);
+    factory.setDiskStoreName(diskStoreName);
+    factory.setMaximumQueueMemory(maxMemory);
+    factory.setParallel(isParallel);
+    // set dispatcher threads
+    factory.setDispatcherThreads(numDispatcherThreadsForTheRun);
+    AsyncEventQueue asyncChannel = factory.create(asyncChannelId,
+        asyncEventListener);
+  }
+
+  public static void createAsyncEventQueue(String asyncChannelId,
+      boolean isParallel, Integer maxMemory, Integer batchSize,
+      boolean isConflation, boolean isPersistent, String diskStoreName,
+      boolean isDiskSynchronous, String asyncListenerClass) throws Exception {
+
+    if (diskStoreName != null) {
+      File directory = new File(asyncChannelId + "_disk_"
+          + System.currentTimeMillis() + "_" + VM.getCurrentVMNum());
+      directory.mkdir();
+      File[] dirs1 = new File[] { directory };
+      DiskStoreFactory dsf = cache.createDiskStoreFactory();
+      dsf.setDiskDirs(dirs1);
+      DiskStore ds = dsf.create(diskStoreName);
+    }
+
+    String packagePrefix = "com.gemstone.gemfire.internal.cache.wan.";
+    String className = packagePrefix + asyncListenerClass;
+    AsyncEventListener asyncEventListener = null;
+    try {
+      Class clazz = Class.forName(className);
+      asyncEventListener = (AsyncEventListener)clazz.newInstance();
+    }
+    catch (ClassNotFoundException e) {
+      throw e;
+    }
+    catch (InstantiationException e) {
+      throw e;
+    }
+    catch (IllegalAccessException e) {
+      throw e;
+    }
+
+    AsyncEventQueueFactory factory = cache.createAsyncEventQueueFactory();
+    factory.setBatchSize(batchSize);
+    factory.setPersistent(isPersistent);
+    factory.setDiskStoreName(diskStoreName);
+    factory.setDiskSynchronous(isDiskSynchronous);
+    factory.setBatchConflationEnabled(isConflation);
+    factory.setMaximumQueueMemory(maxMemory);
+    factory.setParallel(isParallel);
+    // set dispatcher threads
+    factory.setDispatcherThreads(numDispatcherThreadsForTheRun);
+    AsyncEventQueue asyncChannel = factory.create(asyncChannelId,
+        asyncEventListener);
+  }
+
+  public static void createAsyncEventQueueWithCustomListener(
+      String asyncChannelId, boolean isParallel, Integer maxMemory,
+      Integer batchSize, boolean isConflation, boolean isPersistent,
+      String diskStoreName, boolean isDiskSynchronous) {
+    createAsyncEventQueueWithCustomListener(asyncChannelId, isParallel,
+        maxMemory, batchSize, isConflation, isPersistent, diskStoreName,
+        isDiskSynchronous, GatewaySender.DEFAULT_DISPATCHER_THREADS);
+  }
+
+  public static void createAsyncEventQueueWithCustomListener(
+      String asyncChannelId, boolean isParallel, Integer maxMemory,
+      Integer batchSize, boolean isConflation, boolean isPersistent,
+      String diskStoreName, boolean isDiskSynchronous, int nDispatchers) {
+
+    ExpectedException exp = addExpectedException(ForceReattemptException.class
+        .getName());
+
+    try {
+      if (diskStoreName != null) {
+        File directory = new File(asyncChannelId + "_disk_"
+            + System.currentTimeMillis() + "_" + VM.getCurrentVMNum());
+        directory.mkdir();
+        File[] dirs1 = new File[] { directory };
+        DiskStoreFactory dsf = cache.createDiskStoreFactory();
+        dsf.setDiskDirs(dirs1);
+        DiskStore ds = dsf.create(diskStoreName);
+      }
+
+      AsyncEventListener asyncEventListener = new CustomAsyncEventListener();
+
+      AsyncEventQueueFactory factory = cache.createAsyncEventQueueFactory();
+      factory.setBatchSize(batchSize);
+      factory.setPersistent(isPersistent);
+      factory.setDiskStoreName(diskStoreName);
+      factory.setMaximumQueueMemory(maxMemory);
+      factory.setParallel(isParallel);
+      factory.setDispatcherThreads(nDispatchers);
+      AsyncEventQueue asyncChannel = factory.create(asyncChannelId,
+          asyncEventListener);
+    }
+    finally {
+      exp.remove();
+    }
+  }
+
+  public static void createConcurrentAsyncEventQueue(String asyncChannelId,
+      boolean isParallel, Integer maxMemory, Integer batchSize,
+      boolean isConflation, boolean isPersistent, String diskStoreName,
+      boolean isDiskSynchronous, int dispatcherThreads, OrderPolicy policy) {
+
+    if (diskStoreName != null) {
+      File directory = new File(asyncChannelId + "_disk_"
+          + System.currentTimeMillis() + "_" + VM.getCurrentVMNum());
+      directory.mkdir();
+      File[] dirs1 = new File[] { directory };
+      DiskStoreFactory dsf = cache.createDiskStoreFactory();
+      dsf.setDiskDirs(dirs1);
+      DiskStore ds = dsf.create(diskStoreName);
+    }
+
+    AsyncEventListener asyncEventListener = new MyAsyncEventListener();
+
+    AsyncEventQueueFactory factory = cache.createAsyncEventQueueFactory();
+    factory.setBatchSize(batchSize);
+    factory.setPersistent(isPersistent);
+    factory.setDiskStoreName(diskStoreName);
+    factory.setDiskSynchronous(isDiskSynchronous);
+    factory.setBatchConflationEnabled(isConflation);
+    factory.setMaximumQueueMemory(maxMemory);
+    factory.setParallel(isParallel);
+    factory.setDispatcherThreads(dispatcherThreads);
+    factory.setOrderPolicy(policy);
+    AsyncEventQueue asyncChannel = factory.create(asyncChannelId,
+        asyncEventListener);
+  }
+
+  public static String createAsyncEventQueueWithDiskStore(
+      String asyncChannelId, boolean isParallel, Integer maxMemory,
+      Integer batchSize, boolean isPersistent, String diskStoreName) {
+
+    AsyncEventListener asyncEventListener = new MyAsyncEventListener();
+
+    File persistentDirectory = null;
+    if (diskStoreName == null) {
+      persistentDirectory = new File(asyncChannelId + "_disk_"
+          + System.currentTimeMillis() + "_" + VM.getCurrentVMNum());
+    }
+    else {
+      persistentDirectory = new File(diskStoreName);
+    }
+    getLogWriter().info("The ds is : " + persistentDirectory.getName());
+    persistentDirectory.mkdir();
+    DiskStoreFactory dsf = cache.createDiskStoreFactory();
+    File[] dirs1 = new File[] { persistentDirectory };
+
+    AsyncEventQueueFactory factory = cache.createAsyncEventQueueFactory();
+    factory.setBatchSize(batchSize);
+    factory.setParallel(isParallel);
+    if (isPersistent) {
+      factory.setPersistent(isPersistent);
+      factory.setDiskStoreName(dsf.setDiskDirs(dirs1).create(asyncChannelId)
+          .getName());
+    }
+    factory.setMaximumQueueMemory(maxMemory);
+    // set dispatcher threads
+    factory.setDispatcherThreads(numDispatcherThreadsForTheRun);
+    AsyncEventQueue asyncChannel = factory.create(asyncChannelId,
+        asyncEventListener);
+    return persistentDirectory.getName();
+  }
+
+  public static void pauseAsyncEventQueue(String asyncChannelId) {
+    AsyncEventQueue theChannel = null;
+
+    Set<AsyncEventQueue> asyncEventChannels = cache.getAsyncEventQueues();
+    for (AsyncEventQueue asyncChannel : asyncEventChannels) {
+      if (asyncChannelId.equals(asyncChannel.getId())) {
+        theChannel = asyncChannel;
+      }
+    }
+
+    ((AsyncEventQueueImpl)theChannel).getSender().pause();
+  }
+
+  public static void pauseAsyncEventQueueAndWaitForDispatcherToPause(
+      String asyncChannelId) {
+    AsyncEventQueue theChannel = null;
+
+    Set<AsyncEventQueue> asyncEventChannels = cache.getAsyncEventQueues();
+    for (AsyncEventQueue asyncChannel : asyncEventChannels) {
+      if (asyncChannelId.equals(asyncChannel.getId())) {
+        theChannel = asyncChannel;
+        break;
+      }
+    }
+
+    ((AsyncEventQueueImpl)theChannel).getSender().pause();
+
+    ((AbstractGatewaySender)((AsyncEventQueueImpl)theChannel).getSender())
+        .getEventProcessor().waitForDispatcherToPause();
+  }
+
+  public static void resumeAsyncEventQueue(String asyncQueueId) {
+    AsyncEventQueue theQueue = null;
+
+    Set<AsyncEventQueue> asyncEventChannels = cache.getAsyncEventQueues();
+    for (AsyncEventQueue asyncChannel : asyncEventChannels) {
+      if (asyncQueueId.equals(asyncChannel.getId())) {
+        theQueue = asyncChannel;
+      }
+    }
+
+    ((AsyncEventQueueImpl)theQueue).getSender().resume();
+  }
+
+  public static void checkAsyncEventQueueSize(String asyncQueueId,
+      int numQueueEntries) {
+    AsyncEventQueue theAsyncEventQueue = null;
+
+    Set<AsyncEventQueue> asyncEventChannels = cache.getAsyncEventQueues();
+    for (AsyncEventQueue asyncChannel : asyncEventChannels) {
+      if (asyncQueueId.equals(asyncChannel.getId())) {
+        theAsyncEventQueue = asyncChannel;
+      }
+    }
+
+    GatewaySender sender = ((AsyncEventQueueImpl)theAsyncEventQueue)
+        .getSender();
+
+    if (sender.isParallel()) {
+      Set<RegionQueue> queues = ((AbstractGatewaySender)sender).getQueues();
+      assertEquals(numQueueEntries,
+          queues.toArray(new RegionQueue[queues.size()])[0].getRegion().size());
+    }
+    else {
+      Set<RegionQueue> queues = ((AbstractGatewaySender)sender).getQueues();
+      int size = 0;
+      for (RegionQueue q : queues) {
+        size += q.size();
+      }
+      assertEquals(numQueueEntries, size);
+    }
+  }
+
+  /**
+   * This method verifies the queue size of a ParallelGatewaySender. For
+   * ParallelGatewaySender conflation happens in a separate thread, hence test
+   * code needs to wait for some time for expected result
+   * 
+   * @param asyncQueueId
+   *          Async Queue ID
+   * @param numQueueEntries
+   *          expected number of Queue entries
+   * @throws Exception
+   */
+  public static void waitForAsyncEventQueueSize(String asyncQueueId,
+      final int numQueueEntries) throws Exception {
+    AsyncEventQueue theAsyncEventQueue = null;
+
+    Set<AsyncEventQueue> asyncEventChannels = cache.getAsyncEventQueues();
+    for (AsyncEventQueue asyncChannel : asyncEventChannels) {
+      if (asyncQueueId.equals(asyncChannel.getId())) {
+        theAsyncEventQueue = asyncChannel;
+      }
+    }
+
+    GatewaySender sender = ((AsyncEventQueueImpl)theAsyncEventQueue)
+        .getSender();
+
+    if (sender.isParallel()) {
+      final Set<RegionQueue> queues = ((AbstractGatewaySender)sender)
+          .getQueues();
+
+      waitForCriterion(new WaitCriterion() {
+
+        public String description() {
+          return "Waiting for EventQueue size to be " + numQueueEntries;
+        }
+
+        public boolean done() {
+          boolean done = numQueueEntries == queues.toArray(new RegionQueue[queues
+              .size()])[0].getRegion().size();
+          return done;
+        }
+
+      }, MAX_WAIT, 500, true);
+
+    }
+    else {
+      throw new Exception(
+          "This method should be used for only ParallelGatewaySender,SerialGatewaySender should use checkAsyncEventQueueSize() method instead");
+
+    }
+  }
+
+  public static void createPartitionedRegion(String regionName,
+      String senderIds, Integer redundantCopies, Integer totalNumBuckets) {
+    ExpectedException exp = addExpectedException(ForceReattemptException.class
+        .getName());
+    ExpectedException exp1 = addExpectedException(PartitionOfflineException.class
+        .getName());
+    try {
+      AttributesFactory fact = new AttributesFactory();
+      if (senderIds != null) {
+        StringTokenizer tokenizer = new StringTokenizer(senderIds, ",");
+        while (tokenizer.hasMoreTokens()) {
+          String senderId = tokenizer.nextToken();
+          // GatewaySender sender = cache.getGatewaySender(senderId);
+          // assertNotNull(sender);
+          fact.addGatewaySenderId(senderId);
+        }
+      }
+      PartitionAttributesFactory pfact = new PartitionAttributesFactory();
+      pfact.setTotalNumBuckets(totalNumBuckets);
+      pfact.setRedundantCopies(redundantCopies);
+      pfact.setRecoveryDelay(0);
+      fact.setPartitionAttributes(pfact.create());
+      Region r = cache.createRegionFactory(fact.create()).create(regionName);
+      assertNotNull(r);
+    }
+    finally {
+      exp.remove();
+      exp1.remove();
+    }
+  }
+
+  public static void createPartitionedRegionWithAsyncEventQueue(
+      String regionName, String asyncEventQueueId, Boolean offHeap) {
+    ExpectedException exp = addExpectedException(ForceReattemptException.class
+        .getName());
+    ExpectedException exp1 = addExpectedException(PartitionOfflineException.class
+        .getName());
+    try {
+      AttributesFactory fact = new AttributesFactory();
+
+      PartitionAttributesFactory pfact = new PartitionAttributesFactory();
+      pfact.setTotalNumBuckets(16);
+      fact.setPartitionAttributes(pfact.create());
+      fact.setOffHeap(offHeap);
+      Region r = cache.createRegionFactory(fact.create())
+          .addAsyncEventQueueId(asyncEventQueueId).create(regionName);
+      assertNotNull(r);
+    }
+    finally {
+      exp.remove();
+      exp1.remove();
+    }
+  }
+
+  public static void createColocatedPartitionedRegionWithAsyncEventQueue(
+      String regionName, String asyncEventQueueId, Integer totalNumBuckets,
+      String colocatedWith) {
+
+    ExpectedException exp = addExpectedException(ForceReattemptException.class
+        .getName());
+    ExpectedException exp1 = addExpectedException(PartitionOfflineException.class
+        .getName());
+    try {
+      AttributesFactory fact = new AttributesFactory();
+
+      PartitionAttributesFactory pfact = new PartitionAttributesFactory();
+      pfact.setTotalNumBuckets(totalNumBuckets);
+      pfact.setColocatedWith(colocatedWith);
+      fact.setPartitionAttributes(pfact.create());
+      Region r = cache.createRegionFactory(fact.create())
+          .addAsyncEventQueueId(asyncEventQueueId).create(regionName);
+      assertNotNull(r);
+    }
+    finally {
+      exp.remove();
+      exp1.remove();
+    }
+  }
+
+  public static void createPartitionedRegionWithCacheLoaderAndAsyncQueue(
+      String regionName, String asyncEventQueueId) {
+
+    AttributesFactory fact = new AttributesFactory();
+
+    PartitionAttributesFactory pfact = new PartitionAttributesFactory();
+    pfact.setTotalNumBuckets(16);
+    fact.setPartitionAttributes(pfact.create());
+    // set the CacheLoader implementation
+    fact.setCacheLoader(new MyCacheLoader());
+    Region r = cache.createRegionFactory(fact.create())
+        .addAsyncEventQueueId(asyncEventQueueId).create(regionName);
+    assertNotNull(r);
+  }
+
+  /**
+   * Create PartitionedRegion with 1 redundant copy
+   */
+  public static void createPRWithRedundantCopyWithAsyncEventQueue(
+      String regionName, String asyncEventQueueId, Boolean offHeap) {
+    ExpectedException exp = addExpectedException(ForceReattemptException.class
+        .getName());
+
+    try {
+      AttributesFactory fact = new AttributesFactory();
+
+      PartitionAttributesFactory pfact = new PartitionAttributesFactory();
+      pfact.setTotalNumBuckets(16);
+      pfact.setRedundantCopies(1);
+      fact.setPartitionAttributes(pfact.create());
+      fact.setOffHeap(offHeap);
+      Region r = cache.createRegionFactory(fact.create())
+          .addAsyncEventQueueId(asyncEventQueueId).create(regionName);
+      assertNotNull(r);
+    }
+    finally {
+      exp.remove();
+    }
+  }
+
+  public static void createPartitionedRegionAccessorWithAsyncEventQueue(
+      String regionName, String asyncEventQueueId) {
+    AttributesFactory fact = new AttributesFactory();
+    PartitionAttributesFactory pfact = new PartitionAttributesFactory();
+    pfact.setTotalNumBuckets(16);
+    pfact.setLocalMaxMemory(0);
+    fact.setPartitionAttributes(pfact.create());
+    Region r = cache.createRegionFactory(fact.create())
+        .addAsyncEventQueueId(asyncEventQueueId).create(regionName);
+    // fact.create()).create(regionName);
+    assertNotNull(r);
+  }
+
+  protected static void createCache(Integer locPort) {
+    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(testName);
+    Properties props = new Properties();
+    props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost[" + locPort
+        + "]");
+    InternalDistributedSystem ds = test.getSystem(props);
+    cache = CacheFactory.create(ds);
+  }
+
+  public static void createCacheWithoutLocator(Integer mCastPort) {
+    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(testName);
+    Properties props = new Properties();
+    props.setProperty(DistributionConfig.MCAST_PORT_NAME, "" + mCastPort);
+    InternalDistributedSystem ds = test.getSystem(props);
+    cache = CacheFactory.create(ds);
+  }
+
+  public static void checkAsyncEventQueueStats(String queueId,
+      final int queueSize, final int eventsReceived, final int eventsQueued,
+      final int eventsDistributed) {
+    Set<AsyncEventQueue> asyncQueues = cache.getAsyncEventQueues();
+    AsyncEventQueue queue = null;
+    for (AsyncEventQueue q : asyncQueues) {
+      if (q.getId().equals(queueId)) {
+        queue = q;
+        break;
+      }
+    }
+    final AsyncEventQueueStats statistics = ((AsyncEventQueueImpl)queue)
+        .getStatistics();
+    assertEquals(queueSize, statistics.getEventQueueSize());
+    assertEquals(eventsReceived, statistics.getEventsReceived());
+    assertEquals(eventsQueued, statistics.getEventsQueued());
+    assert (statistics.getEventsDistributed() >= eventsDistributed);
+  }
+
+  public static void checkAsyncEventQueueConflatedStats(
+      String asyncEventQueueId, final int eventsConflated) {
+    Set<AsyncEventQueue> queues = cache.getAsyncEventQueues();
+    AsyncEventQueue queue = null;
+    for (AsyncEventQueue q : queues) {
+      if (q.getId().equals(asyncEventQueueId)) {
+        queue = q;
+        break;
+      }
+    }
+    final AsyncEventQueueStats statistics = ((AsyncEventQueueImpl)queue)
+        .getStatistics();
+    assertEquals(eventsConflated, statistics.getEventsNotQueuedConflated());
+  }
+
+  public static void checkAsyncEventQueueStats_Failover(
+      String asyncEventQueueId, final int eventsReceived) {
+    Set<AsyncEventQueue> asyncEventQueues = cache.getAsyncEventQueues();
+    AsyncEventQueue queue = null;
+    for (AsyncEventQueue q : asyncEventQueues) {
+      if (q.getId().equals(asyncEventQueueId)) {
+        queue = q;
+        break;
+      }
+    }
+    final AsyncEventQueueStats statistics = ((AsyncEventQueueImpl)queue)
+        .getStatistics();
+
+    assertEquals(eventsReceived, statistics.getEventsReceived());
+    assertEquals(
+        eventsReceived,
+        (statistics.getEventsQueued()
+            + statistics.getUnprocessedTokensAddedByPrimary() + statistics
+            .getUnprocessedEventsRemovedByPrimary()));
+  }
+
+  public static void checkAsyncEventQueueBatchStats(String asyncQueueId,
+      final int batches) {
+    Set<AsyncEventQueue> queues = cache.getAsyncEventQueues();
+    AsyncEventQueue queue = null;
+    for (AsyncEventQueue q : queues) {
+      if (q.getId().equals(asyncQueueId)) {
+        queue = q;
+        break;
+      }
+    }
+    final AsyncEventQueueStats statistics = ((AsyncEventQueueImpl)queue)
+        .getStatistics();
+    assert (statistics.getBatchesDistributed() >= batches);
+    assertEquals(0, statistics.getBatchesRedistributed());
+  }
+
+  public static void checkAsyncEventQueueUnprocessedStats(String asyncQueueId,
+      int events) {
+    Set<AsyncEventQueue> asyncQueues = cache.getAsyncEventQueues();
+    AsyncEventQueue queue = null;
+    for (AsyncEventQueue q : asyncQueues) {
+      if (q.getId().equals(asyncQueueId)) {
+        queue = q;
+        break;
+      }
+    }
+    final AsyncEventQueueStats statistics = ((AsyncEventQueueImpl)queue)
+        .getStatistics();
+    assertEquals(events,
+        (statistics.getUnprocessedEventsAddedBySecondary() + statistics
+            .getUnprocessedTokensRemovedBySecondary()));
+    assertEquals(events,
+        (statistics.getUnprocessedEventsRemovedByPrimary() + statistics
+            .getUnprocessedTokensAddedByPrimary()));
+  }
+
+  public static void setRemoveFromQueueOnException(String senderId,
+      boolean removeFromQueue) {
+    Set<GatewaySender> senders = cache.getGatewaySenders();
+    GatewaySender sender = null;
+    for (GatewaySender s : senders) {
+      if (s.getId().equals(senderId)) {
+        sender = s;
+        break;
+      }
+    }
+    assertNotNull(sender);
+    ((AbstractGatewaySender)sender)
+        .setRemoveFromQueueOnException(removeFromQueue);
+  }
+
+  public static void unsetRemoveFromQueueOnException(String senderId) {
+    Set<GatewaySender> senders = cache.getGatewaySenders();
+    GatewaySender sender = null;
+    for (GatewaySender s : senders) {
+      if (s.getId().equals(senderId)) {
+        sender = s;
+        break;
+      }
+    }
+    assertNotNull(sender);
+    ((AbstractGatewaySender)sender).setRemoveFromQueueOnException(false);
+  }
+
+  public static void waitForSenderToBecomePrimary(String senderId) {
+    Set<GatewaySender> senders = ((GemFireCacheImpl)cache)
+        .getAllGatewaySenders();
+    final GatewaySender sender = getGatewaySenderById(senders, senderId);
+    WaitCriterion wc = new WaitCriterion() {
+      public boolean done() {
+        if (sender != null && ((AbstractGatewaySender)sender).isPrimary()) {
+          return true;
+        }
+        return false;
+      }
+
+      public String description() {
+        return "Expected sender primary state to be true but is false";
+      }
+    };
+    DistributedTestCase.waitForCriterion(wc, 10000, 1000, true);
+  }
+
+  private static GatewaySender getGatewaySenderById(Set<GatewaySender> senders,
+      String senderId) {
+    for (GatewaySender s : senders) {
+      if (s.getId().equals(senderId)) {
+        return s;
+      }
+    }
+    // if none of the senders matches with the supplied senderid, return null
+    return null;
+  }
+
+  public static void createSender(String dsName, int remoteDsId,
+      boolean isParallel, Integer maxMemory, Integer batchSize,
+      boolean isConflation, boolean isPersistent, GatewayEventFilter filter,
+      boolean isManulaStart) {
+    final ExpectedException exln = addExpectedException("Could not connect");
+    try {
+      File persistentDirectory = new File(dsName + "_disk_"
+          + System.currentTimeMillis() + "_" + VM.getCurrentVMNum());
+      persistentDirectory.mkdir();
+      DiskStoreFactory dsf = cache.createDiskStoreFactory();
+      File[] dirs1 = new File[] { persistentDirectory };
+      if (isParallel) {
+        GatewaySenderFactory gateway = cache.createGatewaySenderFactory();
+        gateway.setParallel(true);
+        gateway.setMaximumQueueMemory(maxMemory);
+        gateway.setBatchSize(batchSize);
+        gateway.setManualStart(isManulaStart);
+        // set dispatcher threads
+        gateway.setDispatcherThreads(numDispatcherThreadsForTheRun);
+        ((InternalGatewaySenderFactory)gateway)
+            .setLocatorDiscoveryCallback(new MyLocatorCallback());
+        if (filter != null) {
+          eventFilter = filter;
+          gateway.addGatewayEventFilter(filter);
+        }
+        if (isPersistent) {
+          gateway.setPersistenceEnabled(true);
+          gateway.setDiskStoreName(dsf.setDiskDirs(dirs1).create(dsName)
+              .getName());
+        }
+        else {
+          DiskStore store = dsf.setDiskDirs(dirs1).create(dsName);
+          gateway.setDiskStoreName(store.getName());
+        }
+        gateway.setBatchConflationEnabled(isConflation);
+        gateway.create(dsName, remoteDsId);
+
+      }
+      else {
+        GatewaySenderFactory gateway = cache.createGatewaySenderFactory();
+        gateway.setMaximumQueueMemory(maxMemory);
+        gateway.setBatchSize(batchSize);
+        gateway.setManualStart(isManulaStart);
+        // set dispatcher threads
+        gateway.setDispatcherThreads(numDispatcherThreadsForTheRun);
+        ((InternalGatewaySenderFactory)gateway)
+            .setLocatorDiscoveryCallback(new MyLocatorCallback());
+        if (filter != null) {
+          eventFilter = filter;
+          gateway.addGatewayEventFilter(filter);
+        }
+        gateway.setBatchConflationEnabled(isConflation);
+        if (isPersistent) {
+          gateway.setPersistenceEnabled(true);
+          gateway.setDiskStoreName(dsf.setDiskDirs(dirs1).create(dsName)
+              .getName());
+        }
+        else {
+          DiskStore store = dsf.setDiskDirs(dirs1).create(dsName);
+          gateway.setDiskStoreName(store.getName());
+        }
+        gateway.create(dsName, remoteDsId);
+      }
+    }
+    finally {
+      exln.remove();
+    }
+  }
+
+  public static void pauseWaitCriteria(final long millisec) {
+    WaitCriterion wc = new WaitCriterion() {
+      public boolean done() {
+        return false;
+      }
+
+      public String description() {
+        return "Expected to wait for " + millisec + " millisec.";
+      }
+    };
+    DistributedTestCase.waitForCriterion(wc, millisec, 500, false);
+  }
+
+  public static int createReceiver(int locPort) {
+    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(testName);
+    Properties props = new Properties();
+    props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost[" + locPort
+        + "]");
+
+    InternalDistributedSystem ds = test.getSystem(props);
+    cache = CacheFactory.create(ds);
+    GatewayReceiverFactory fact = cache.createGatewayReceiverFactory();
+    int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
+    fact.setStartPort(port);
+    fact.setEndPort(port);
+    fact.setManualStart(true);
+    GatewayReceiver receiver = fact.create();
+    try {
+      receiver.start();
+    }
+    catch (IOException e) {
+      e.printStackTrace();
+      fail("Test " + test.getName()
+          + " failed to start GatewayRecevier on port " + port);
+    }
+    return port;
+  }
+
+  public static String makePath(String[] strings) {
+    StringBuilder sb = new StringBuilder();
+    for (int i = 0; i < strings.length; i++) {
+      sb.append(strings[i]);
+      sb.append(File.separator);
+    }
+    return sb.toString();
+  }
+
+  /**
+   * Do a rebalance and verify balance was improved. If evictionPercentage > 0
+   * (the default) then we have heapLRU and this can cause simulate and
+   * rebalance results to differ if eviction kicks in between. (See BUG 44899).
+   */
+  public static void doRebalance() {
+    ResourceManager resMan = cache.getResourceManager();
+    boolean heapEviction = (resMan.getEvictionHeapPercentage() > 0);
+    RebalanceFactory factory = resMan.createRebalanceFactory();
+    try {
+      RebalanceResults simulateResults = null;
+      if (!heapEviction) {
+        getLogWriter().info("Calling rebalance simulate");
+        RebalanceOperation simulateOp = factory.simulate();
+        simulateResults = simulateOp.getResults();
+      }
+
+      getLogWriter().info("Starting rebalancing");
+      RebalanceOperation rebalanceOp = factory.start();
+      RebalanceResults rebalanceResults = rebalanceOp.getResults();
+
+    }
+    catch (InterruptedException e) {
+      fail("Interrupted", e);
+    }
+  }
+
+  public static void doPuts(String regionName, int numPuts) {
+    ExpectedException exp1 = addExpectedException(InterruptedException.class
+        .getName());
+    ExpectedException exp2 = addExpectedException(GatewaySenderException.class
+        .getName());
+    try {
+      Region r = cache.getRegion(Region.SEPARATOR + regionName);
+      assertNotNull(r);
+      for (long i = 0; i < numPuts; i++) {
+        r.put(i, i);
+      }
+    }
+    finally {
+      exp1.remove();
+      exp2.remove();
+    }
+    // for (long i = 0; i < numPuts; i++) {
+    // r.destroy(i);
+    // }
+  }
+
+  /**
+   * To be used for CacheLoader related tests
+   */
+  public static void doGets(String regionName, int numGets) {
+    Region r = cache.getRegion(Region.SEPARATOR + regionName);
+    assertNotNull(r);
+    for (long i = 0; i < numGets; i++) {
+      r.get(i);
+    }
+  }
+
+  public static void doPutsFrom(String regionName, int from, int numPuts) {
+    Region r = cache.getRegion(Region.SEPARATOR + regionName);
+    assertNotNull(r);
+    for (long i = from; i < numPuts; i++) {
+      r.put(i, i);
+    }
+  }
+
+  public static void doPutAll(String regionName, int numPuts, int size) {
+    Region r = cache.getRegion(Region.SEPARATOR + regionName);
+    assertNotNull(r);
+    for (long i = 0; i < numPuts; i++) {
+      Map putAllMap = new HashMap();
+      for (long j = 0; j < size; j++) {
+        putAllMap.put((size * i) + j, i);
+      }
+      r.putAll(putAllMap, "putAllCallback");
+      putAllMap.clear();
+    }
+  }
+
+  public static void putGivenKeyValue(String regionName, Map keyValues) {
+    Region r = cache.getRegion(Region.SEPARATOR + regionName);
+    assertNotNull(r);
+    for (Object key : keyValues.keySet()) {
+      r.put(key, keyValues.get(key));
+    }
+  }
+
+  public static void doNextPuts(String regionName, int start, int numPuts) {
+    // waitForSitesToUpdate();
+    ExpectedException exp = addExpectedException(CacheClosedException.class
+        .getName());
+    try {
+      Region r = cache.getRegion(Region.SEPARATOR + regionName);
+      assertNotNull(r);
+      for (long i = start; i < numPuts; i++) {
+        r.put(i, i);
+      }
+    }
+    finally {
+      exp.remove();
+    }
+  }
+
+  public static void validateRegionSize(String regionName, final int regionSize) {
+    ExpectedException exp = addExpectedException(ForceReattemptException.class
+        .getName());
+    ExpectedException exp1 = addExpectedException(CacheClosedException.class
+        .getName());
+    try {
+
+      final Region r = cache.getRegion(Region.SEPARATOR + regionName);
+      assertNotNull(r);
+      WaitCriterion wc = new WaitCriterion() {
+        public boolean done() {
+          if (r.keySet().size() == regionSize) {
+            return true;
+          }
+          return false;
+        }
+
+        public String description() {
+          return "Expected region entries: " + regionSize
+              + " but actual entries: " + r.keySet().size()
+              + " present region keyset " + r.keySet();
+        }
+      };
+      DistributedTestCase.waitForCriterion(wc, 240000, 500, true);
+    }
+    finally {
+      exp.remove();
+      exp1.remove();
+    }
+  }
+
+  /**
+   * Validate whether all the attributes set on AsyncEventQueueFactory are set
+   * on the sender underneath the AsyncEventQueue.
+   */
+  public static void validateAsyncEventQueueAttributes(String asyncChannelId,
+      int maxQueueMemory, int batchSize, int batchTimeInterval,
+      boolean isPersistent, String diskStoreName, boolean isDiskSynchronous,
+      boolean batchConflationEnabled) {
+
+    AsyncEventQueue theChannel = null;
+
+    Set<AsyncEventQueue> asyncEventChannels = cache.getAsyncEventQueues();
+    for (AsyncEventQueue asyncChannel : asyncEventChannels) {
+      if (asyncChannelId.equals(asyncChannel.getId())) {
+        theChannel = asyncChannel;
+      }
+    }
+
+    GatewaySender theSender = ((AsyncEventQueueImpl)theChannel).getSender();
+    assertEquals("maxQueueMemory", maxQueueMemory,
+        theSender.getMaximumQueueMemory());
+    assertEquals("batchSize", batchSize, theSender.getBatchSize());
+    assertEquals("batchTimeInterval", batchTimeInterval,
+        theSender.getBatchTimeInterval());
+    assertEquals("isPersistent", isPersistent, theSender.isPersistenceEnabled());
+    assertEquals("diskStoreName", diskStoreName, theSender.getDiskStoreName());
+    assertEquals("isDiskSynchronous", isDiskSynchronous,
+        theSender.isDiskSynchronous());
+    assertEquals("batchConflation", batchConflationEnabled,
+        theSender.isBatchConflationEnabled());
+  }
+  
+  /**
+   * Validate whether all the attributes set on AsyncEventQueueFactory are set
+   * on the sender underneath the AsyncEventQueue.
+   */
+  public static void validateConcurrentAsyncEventQueueAttributes(String asyncChannelId,
+      int maxQueueMemory, int batchSize, int batchTimeInterval,
+      boolean isPersistent, String diskStoreName, boolean isDiskSynchronous,
+      boolean batchConflationEnabled, int dispatcherThreads, OrderPolicy policy) {
+
+    AsyncEventQueue theChannel = null;
+
+    Set<AsyncEventQueue> asyncEventChannels = cache.getAsyncEventQueues();
+    for (AsyncEventQueue asyncChannel : asyncEventChannels) {
+      if (asyncChannelId.equals(asyncChannel.getId())) {
+        theChannel = asyncChannel;
+      }
+    }
+
+    GatewaySender theSender = ((AsyncEventQueueImpl)theChannel).getSender();
+    assertEquals("maxQueueMemory", maxQueueMemory, theSender
+        .getMaximumQueueMemory());
+    assertEquals("batchSize", batchSize, theSender.getBatchSize());
+    assertEquals("batchTimeInterval", batchTimeInterval, theSender
+        .getBatchTimeInterval());
+    assertEquals("isPersistent", isPersistent, theSender.isPersistenceEnabled());
+    assertEquals("diskStoreName", diskStoreName, theSender.getDiskStoreName());
+    assertEquals("isDiskSynchronous", isDiskSynchronous, theSender
+        .isDiskSynchronous());
+    assertEquals("batchConflation", batchConflationEnabled, theSender
+        .isBatchConflationEnabled());
+    assertEquals("dispatcherThreads", dispatcherThreads, theSender
+        .getDispatcherThreads());
+    assertEquals("orderPolicy", policy, theSender.getOrderPolicy());
+  }
+
+  public static void validateAsyncEventListener(String asyncQueueId,
+      final int expectedSize) {
+    AsyncEventListener theListener = null;
+
+    Set<AsyncEventQueue> asyncEventQueues = cache.getAsyncEventQueues();
+    for (AsyncEventQueue asyncQueue : asyncEventQueues) {
+      if (asyncQueueId.equals(asyncQueue.getId())) {
+        theListener = asyncQueue.getAsyncEventListener();
+      }
+    }
+
+    final Map eventsMap = ((MyAsyncEventListener)theListener).getEventsMap();
+    assertNotNull(eventsMap);
+    WaitCriterion wc = new WaitCriterion() {
+      public boolean done() {
+        if (eventsMap.size() == expectedSize) {
+          return true;
+        }
+        return false;
+      }
+
+      public String description() {
+        return "Expected map entries: " + expectedSize
+            + " but actual entries: " + eventsMap.size();
+      }
+    };
+    DistributedTestCase.waitForCriterion(wc, 60000, 500, true); // TODO:Yogs
+  }
+
+  public static void validateAsyncEventForOperationDetail(String asyncQueueId,
+      final int expectedSize, boolean isLoad, boolean isPutAll) {
+
+    AsyncEventListener theListener = null;
+
+    Set<AsyncEventQueue> asyncEventQueues = cache.getAsyncEventQueues();
+    for (AsyncEventQueue asyncQueue : asyncEventQueues) {
+      if (asyncQueueId.equals(asyncQueue.getId())) {
+        theListener = asyncQueue.getAsyncEventListener();
+      }
+    }
+
+    final Map eventsMap = ((MyAsyncEventListener_CacheLoader)theListener)
+        .getEventsMap();
+    assertNotNull(eventsMap);
+    WaitCriterion wc = new WaitCriterion() {
+      public boolean done() {
+        if (eventsMap.size() == expectedSize) {
+          return true;
+        }
+        return false;
+      }
+
+      public String description() {
+        return "Expected map entries: " + expectedSize
+            + " but actual entries: " + eventsMap.size();
+      }
+    };
+    DistributedTestCase.waitForCriterion(wc, 60000, 500, true); // TODO:Yogs
+    Collection values = eventsMap.values();
+    Iterator itr = values.iterator();
+    while (itr.hasNext()) {
+      AsyncEvent asyncEvent = (AsyncEvent)itr.next();
+      if (isLoad)
+        assertTrue(asyncEvent.getOperation().isLoad());
+      if (isPutAll)
+        assertTrue(asyncEvent.getOperation().isPutAll());
+    }
+  }
+
+  public static void validateCustomAsyncEventListener(String asyncQueueId,
+      final int expectedSize) {
+    AsyncEventListener theListener = null;
+
+    Set<AsyncEventQueue> asyncEventQueues = cache.getAsyncEventQueues();
+    for (AsyncEventQueue asyncQueue : asyncEventQueues) {
+      if (asyncQueueId.equals(asyncQueue.getId())) {
+        theListener = asyncQueue.getAsyncEventListener();
+      }
+    }
+
+    final Map eventsMap = ((CustomAsyncEventListener)theListener)
+        .getEventsMap();
+    assertNotNull(eventsMap);
+    WaitCriterion wc = new WaitCriterion() {
+      public boolean done() {
+        if (eventsMap.size() == expectedSize) {
+          return true;
+        }
+        return false;
+      }
+
+      public String description() {
+        return "Expected map entries: " + expectedSize
+            + " but actual entries: " + eventsMap.size();
+      }
+    };
+    DistributedTestCase.waitForCriterion(wc, 60000, 500, true); // TODO:Yogs
+
+    Iterator<AsyncEvent> itr = eventsMap.values().iterator();
+    while (itr.hasNext()) {
+      AsyncEvent event = itr.next();
+      assertTrue("possibleDuplicate should be true for event: " + event,
+          event.getPossibleDuplicate());
+    }
+  }
+
+  public static void waitForAsyncQueueToGetEmpty(String asyncQueueId) {
+    AsyncEventQueue theAsyncEventQueue = null;
+
+    Set<AsyncEventQueue> asyncEventChannels = cache.getAsyncEventQueues();
+    for (AsyncEventQueue asyncChannel : asyncEventChannels) {
+      if (asyncQueueId.equals(asyncChannel.getId())) {
+        theAsyncEventQueue = asyncChannel;
+      }
+    }
+
+    final GatewaySender sender = ((AsyncEventQueueImpl)theAsyncEventQueue)
+        .getSender();
+
+    if (sender.isParallel()) {
+      final Set<RegionQueue> queues = ((AbstractGatewaySender)sender)
+          .getQueues();
+
+      WaitCriterion wc = new WaitCriterion() {
+        public boolean done() {
+          int size = 0;
+          for (RegionQueue q : queues) {
+            size += q.size();
+          }
+          if (size == 0) {
+            return true;
+          }
+          return false;
+        }
+
+        public String description() {
+          int size = 0;
+          for (RegionQueue q : queues) {
+            size += q.size();
+          }
+          return "Expected queue size to be : " + 0 + " but actual entries: "
+              + size;
+        }
+      };
+      DistributedTestCase.waitForCriterion(wc, 60000, 500, true);
+
+    }
+    else {
+      WaitCriterion wc = new WaitCriterion() {
+        public boolean done() {
+          Set<RegionQueue> queues = ((AbstractGatewaySender)sender).getQueues();
+          int size = 0;
+          for (RegionQueue q : queues) {
+            size += q.size();
+          }
+          if (size == 0) {
+            return true;
+          }
+          return false;
+        }
+
+        public String description() {
+          Set<RegionQueue> queues = ((AbstractGatewaySender)sender).getQueues();
+          int size = 0;
+          for (RegionQueue q : queues) {
+            size += q.size();
+          }
+          return "Expected queue size to be : " + 0 + " but actual entries: "
+              + size;
+        }
+      };
+      DistributedTestCase.waitForCriterion(wc, 60000, 500, true);
+    }
+  }
+
+  public static void verifyAsyncEventListenerForPossibleDuplicates(
+      String asyncEventQueueId, Set<Integer> bucketIds, int batchSize) {
+    AsyncEventListener theListener = null;
+
+    Set<AsyncEventQueue> asyncEventQueues = cache.getAsyncEventQueues();
+    for (AsyncEventQueue asyncQueue : asyncEventQueues) {
+      if (asyncEventQueueId.equals(asyncQueue.getId())) {
+        theListener = asyncQueue.getAsyncEventListener();
+      }
+    }
+
+    final Map<Integer, List<GatewaySenderEventImpl>> bucketToEventsMap = ((MyAsyncEventListener2)theListener)
+        .getBucketToEventsMap();
+    assertNotNull(bucketToEventsMap);
+    assertTrue(bucketIds.size() > 1);
+
+    for (int bucketId : bucketIds) {
+      List<GatewaySenderEventImpl> eventsForBucket = bucketToEventsMap
+          .get(bucketId);
+      getLogWriter().info(
+          "Events for bucket: " + bucketId + " is " + eventsForBucket);
+      assertNotNull(eventsForBucket);
+      for (int i = 0; i < batchSize; i++) {
+        GatewaySenderEventImpl senderEvent = eventsForBucket.get(i);
+        assertTrue(senderEvent.getPossibleDuplicate());
+      }
+    }
+  }
+
+  public static int getAsyncEventListenerMapSize(String asyncEventQueueId) {
+    AsyncEventListener theListener = null;
+
+    Set<AsyncEventQueue> asyncEventQueues = cache.getAsyncEventQueues();
+    for (AsyncEventQueue asyncQueue : asyncEventQueues) {
+      if (asyncEventQueueId.equals(asyncQueue.getId())) {
+        theListener = asyncQueue.getAsyncEventListener();
+      }
+    }
+
+    final Map eventsMap = ((MyAsyncEventListener)theListener).getEventsMap();
+    assertNotNull(eventsMap);
+    getLogWriter().info("The events map size is " + eventsMap.size());
+    return eventsMap.size();
+  }
+
+  public static int getAsyncEventQueueSize(String asyncEventQueueId) {
+    AsyncEventQueue theQueue = null;
+
+    Set<AsyncEventQueue> asyncEventQueues = cache.getAsyncEventQueues();
+    for (AsyncEventQueue asyncQueue : asyncEventQueues) {
+      if (asyncEventQueueId.equals(asyncQueue.getId())) {
+        theQueue = asyncQueue;
+      }
+    }
+    assertNotNull(theQueue);
+    return theQueue.size();
+  }
+
+  public static String getRegionFullPath(String regionName) {
+    final Region r = cache.getRegion(Region.SEPARATOR + regionName);
+    assertNotNull(r);
+    return r.getFullPath();
+  }
+
+  public static Set<Integer> getAllPrimaryBucketsOnTheNode(String regionName) {
+    PartitionedRegion region = (PartitionedRegion)cache.getRegion(regionName);
+    return region.getDataStore().getAllLocalPrimaryBucketIds();
+  }
+
+  public static void addCacheListenerAndCloseCache(String regionName) {
+    final Region region = cache.getRegion(Region.SEPARATOR + regionName);
+    assertNotNull(region);
+    CacheListenerAdapter cl = new CacheListenerAdapter() {
+      @Override
+      public void afterCreate(EntryEvent event) {
+        if ((Long)event.getKey() == 900) {
+          cache.getLogger().fine(" Gateway sender is killed by a test");
+          cache.close();
+          cache.getDistributedSystem().disconnect();
+        }
+      }
+    };
+    region.getAttributesMutator().addCacheListener(cl);
+  }
+
+  public static Boolean killSender(String senderId) {
+    final ExpectedException exln = addExpectedException("Could not connect");
+    ExpectedException exp = addExpectedException(CacheClosedException.class
+        .getName());
+    ExpectedException exp1 = addExpectedException(ForceReattemptException.class
+        .getName());
+    try {
+      Set<GatewaySender> senders = cache.getGatewaySenders();
+      AbstractGatewaySender sender = null;
+      for (GatewaySender s : senders) {
+        if (s.getId().equals(senderId)) {
+          sender = (AbstractGatewaySender)s;
+          break;
+        }
+      }
+      if (sender.isPrimary()) {
+        getLogWriter().info("Gateway sender is killed by a test");
+        cache.getDistributedSystem().disconnect();
+        return Boolean.TRUE;
+      }
+      return Boolean.FALSE;
+    }
+    finally {
+      exp.remove();
+      exp1.remove();
+      exln.remove();
+    }
+  }
+
+  public static Boolean killAsyncEventQueue(String asyncQueueId) {
+    Set<AsyncEventQueue> queues = cache.getAsyncEventQueues();
+    AsyncEventQueueImpl queue = null;
+    for (AsyncEventQueue q : queues) {
+      if (q.getId().equals(asyncQueueId)) {
+        queue = (AsyncEventQueueImpl)q;
+        break;
+      }
+    }
+    if (queue.isPrimary()) {
+      getLogWriter().info("AsyncEventQueue is killed by a test");
+      cache.getDistributedSystem().disconnect();
+      return Boolean.TRUE;
+    }
+    return Boolean.FALSE;
+  }
+
+  public static void killSender() {
+    getLogWriter().info("Gateway sender is going to be killed by a test");
+    cache.close();
+    cache.getDistributedSystem().disconnect();
+    getLogWriter().info("Gateway sender is killed by a test");
+  }
+
+  public static class MyLocatorCallback extends LocatorDiscoveryCallbackAdapter {
+
+    private final Set discoveredLocators = new HashSet();
+
+    private final Set removedLocators = new HashSet();
+
+    public synchronized void locatorsDiscovered(List locators) {
+      discoveredLocators.addAll(locators);
+      notifyAll();
+    }
+
+    public synchronized void locatorsRemoved(List locators) {
+      removedLocators.addAll(locators);
+      notifyAll();
+    }
+
+    public boolean waitForDiscovery(InetSocketAddress locator, long time)
+        throws InterruptedException {
+      return waitFor(discoveredLocators, locator, time);
+    }
+
+    public boolean waitForRemove(InetSocketAddress locator, long time)
+        throws InterruptedException {
+      return waitFor(removedLocators, locator, time);
+    }
+
+    private synchronized boolean waitFor(Set set, InetSocketAddress locator,
+        long time) throws InterruptedException {
+      long remaining = time;
+      long endTime = System.currentTimeMillis() + time;
+      while (!set.contains(locator) && remaining >= 0) {
+        wait(remaining);
+        remaining = endTime - System.currentTimeMillis();
+      }
+      return set.contains(locator);
+    }
+
+    public synchronized Set getDiscovered() {
+      return new HashSet(discoveredLocators);
+    }
+
+    public synchronized Set getRemoved() {
+      return new HashSet(removedLocators);
+    }
+  }
+  
+  public void tearDown2() throws Exception {
+    super.tearDown2();
+    cleanupVM();
+    vm0.invoke(AsyncEventQueueTestBase.class, "cleanupVM");
+    vm1.invoke(AsyncEventQueueTestBase.class, "cleanupVM");
+    vm2.invoke(AsyncEventQueueTestBase.class, "cleanupVM");
+    vm3.invoke(AsyncEventQueueTestBase.class, "cleanupVM");
+    vm4.invoke(AsyncEventQueueTestBase.class, "cleanupVM");
+    vm5.invoke(AsyncEventQueueTestBase.class, "cleanupVM");
+    vm6.invoke(AsyncEventQueueTestBase.class, "cleanupVM");
+    vm7.invoke(AsyncEventQueueTestBase.class, "cleanupVM");
+  }
+
+  public static void cleanupVM() throws IOException {
+    closeCache();
+  }
+
+  public static void closeCache() throws IOException {
+    if (cache != null && !cache.isClosed()) {
+      cache.close();
+      cache.getDistributedSystem().disconnect();
+      cache = null;
+    }
+    else {
+      AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(testName);
+      if (test.isConnectedToDS()) {
+        test.getSystem().disconnect();
+      }
+    }
+  }
+
+  public static void shutdownLocator() {
+    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(testName);
+    test.getSystem().disconnect();
+  }
+
+  public static void printEventListenerMap() {
+    ((MyGatewaySenderEventListener)eventListener1).printMap();
+  }
+  
+
+  @Override
+  public InternalDistributedSystem getSystem(Properties props) {
+    // For now all WANTestBase tests allocate off-heap memory even though
+    // many of them never use it.
+    // The problem is that WANTestBase has static methods that create instances
+    // of WANTestBase (instead of instances of the subclass). So we can't override
+    // this method so that only the off-heap subclasses allocate off heap memory.
+    props.setProperty(DistributionConfig.OFF_HEAP_MEMORY_SIZE_NAME, "300m");
+    return super.getSystem(props);
+  }
+  
+  /**
+   * Returns true if the test should create off-heap regions.
+   * OffHeap tests should over-ride this method and return false.
+   */
+  public boolean isOffHeap() {
+    return false;
+  }
+
+}
+
+class MyAsyncEventListener_CacheLoader implements AsyncEventListener {
+  private final Map eventsMap;
+
+  public MyAsyncEventListener_CacheLoader() {
+    this.eventsMap = new ConcurrentHashMap();
+  }
+
+  public boolean processEvents(List<AsyncEvent> events) {
+    for (AsyncEvent event : events) {
+      this.eventsMap.put(event.getKey(), event);
+    }
+    return true;
+  }
+
+  public Map getEventsMap() {
+    return eventsMap;
+  }
+
+  public void close() {
+  }
+}
+
+class MyCacheLoader implements CacheLoader, Declarable {
+
+  public Object load(LoaderHelper helper) {
+    Long key = (Long)helper.getKey();
+    return "LoadedValue" + "_" + key;
+  }
+
+  public void close() {
+  }
+
+  public void init(Properties props) {
+  }
+
+}


[41/50] [abbrv] incubator-geode git commit: GEODE-644: add unit test for DataAsAddress

Posted by kl...@apache.org.
GEODE-644: add unit test for DataAsAddress


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/56b37571
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/56b37571
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/56b37571

Branch: refs/heads/feature/GEODE-291
Commit: 56b3757129fb20ab047d8c164fa965eff50099c4
Parents: 7cbb5db
Author: Sai Boorlagadda <sb...@pivotal.io>
Authored: Tue Dec 8 15:08:02 2015 -0800
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Thu Dec 10 11:11:30 2015 -0800

----------------------------------------------------------------------
 .../gemfire/internal/offheap/DataAsAddress.java |  14 +-
 .../offheap/OffHeapRegionEntryHelper.java       |  51 +--
 .../offheap/AbstractStoredObjectTestBase.java   | 203 ++++++++++
 .../offheap/DataAsAddressJUnitTest.java         | 368 +++++++++++++++++++
 4 files changed, 593 insertions(+), 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/56b37571/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/DataAsAddress.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/DataAsAddress.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/DataAsAddress.java
index 5b14389..dff1d8f 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/DataAsAddress.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/DataAsAddress.java
@@ -59,7 +59,13 @@ public class DataAsAddress extends AbstractStoredObject {
   }
 
   public byte[] getDecompressedBytes(RegionEntryContext r) {
-    return OffHeapRegionEntryHelper.encodedAddressToBytes(this.address, true, r);
+    byte[] bytes = OffHeapRegionEntryHelper.decodeAddressToBytes(getEncodedAddress(), true, true);
+    if (isCompressed()) {
+        long time = r.getCachePerfStats().startDecompression();
+        bytes = r.getCompressor().decompress(bytes);
+        r.getCachePerfStats().endDecompression(time);
+    }
+    return bytes;
   }
 
   /**
@@ -67,17 +73,17 @@ public class DataAsAddress extends AbstractStoredObject {
    * Otherwise return the serialize bytes in us in a byte array.
    */
   public byte[] getRawBytes() {
-    return OffHeapRegionEntryHelper.encodedAddressToRawBytes(this.address);
+    return OffHeapRegionEntryHelper.decodeAddressToBytes(getEncodedAddress(), true, false);
   }
   
   @Override
   public byte[] getSerializedValue() {
-    return OffHeapRegionEntryHelper.encodedAddressToBytes(this.address);
+    return OffHeapRegionEntryHelper.decodeAddressToBytes(this.address);
   }
 
   @Override
   public Object getDeserializedValue(Region r, RegionEntry re) {
-    return OffHeapRegionEntryHelper.encodedAddressToObject(this.address);
+    return OffHeapRegionEntryHelper.decodeAddressToObject(this.address);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/56b37571/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapRegionEntryHelper.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapRegionEntryHelper.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapRegionEntryHelper.java
index d1a81f0..1731b01 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapRegionEntryHelper.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapRegionEntryHelper.java
@@ -87,10 +87,6 @@ public class OffHeapRegionEntryHelper {
     if (v == Token.NOT_AVAILABLE) return NOT_AVAILABLE_ADDRESS;
     throw new IllegalStateException("Can not convert " + v + " to an off heap address.");
   }
-
-  static Object encodedAddressToObject(long ohAddress) {
-    return encodedAddressToObject(ohAddress, true, true);
-  }
   
   //TODO:Asif:Check if this is a valid equality conditions
   public static boolean isAddressInvalidOrRemoved(long address) {
@@ -280,23 +276,20 @@ public class OffHeapRegionEntryHelper {
     }
     return 0L;
   }
-  
-  public static Object encodedAddressToObject(long addr, boolean decompress, boolean deserialize) {
-    boolean isSerialized = (addr & SERIALIZED_BIT) != 0;
-    byte[] bytes = encodedAddressToBytes(addr, decompress, false);
-    if (isSerialized) {
-      if (deserialize) {
-        return EntryEventImpl.deserialize(bytes);
+
+  static Object decodeAddressToObject(long ohAddress) {
+      byte[] bytes = decodeAddressToBytes(ohAddress, true, false);
+
+      boolean isSerialized = (ohAddress & SERIALIZED_BIT) != 0;
+      if (isSerialized) {
+         return EntryEventImpl.deserialize(bytes);
       } else {
-        return CachedDeserializableFactory.create(bytes);
+          return bytes;
       }
-    } else {
-      return bytes;
-    }
   }
-  
-  static byte[] encodedAddressToBytes(long addr) {
-    byte[] result = encodedAddressToBytes(addr, true, false);
+
+  static byte[] decodeAddressToBytes(long addr) {
+    byte[] result = decodeAddressToBytes(addr, true, false);
     boolean isSerialized = (addr & SERIALIZED_BIT) != 0;
     if (!isSerialized) {
       result = EntryEventImpl.serialize(result);
@@ -304,15 +297,7 @@ public class OffHeapRegionEntryHelper {
     return result;
   }
 
-  /**
-   * If the address contains a byte[] return it.
-   * Otherwise return the serialize bytes in the address in a byte array.
-   */
-  static byte[] encodedAddressToRawBytes(long addr) {
-    return encodedAddressToBytes(addr, true, false);
-  }
-
-  private static byte[] encodedAddressToBytes(long addr, boolean decompress, boolean compressedOk) {
+  static byte[] decodeAddressToBytes(long addr, boolean decompress, boolean compressedOk) {
     assert (addr & ENCODED_BIT) != 0;
     boolean isCompressed = (addr & COMPRESSED_BIT) != 0;
     int size = (int) ((addr & SIZE_MASK) >> SIZE_SHIFT);
@@ -344,18 +329,6 @@ public class OffHeapRegionEntryHelper {
     }
     return bytes;
   }
-  public static byte[] encodedAddressToBytes(long addr, boolean decompress, RegionEntryContext context) {
-    byte[] bytes = encodedAddressToBytes(addr, decompress, true);
-    if (decompress) {
-      boolean isCompressed = (addr & COMPRESSED_BIT) != 0;
-      if (isCompressed) {
-        long time = context.getCachePerfStats().startDecompression();
-        bytes = context.getCompressor().decompress(bytes);
-        context.getCachePerfStats().endDecompression(time);      
-      }
-    }
-    return bytes;
-  }
 
   /**
    * The previous value at the address in 're' will be @Released and then the

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/56b37571/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/AbstractStoredObjectTestBase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/AbstractStoredObjectTestBase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/AbstractStoredObjectTestBase.java
new file mode 100644
index 0000000..a5fac48
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/AbstractStoredObjectTestBase.java
@@ -0,0 +1,203 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.internal.offheap;
+
+import com.gemstone.gemfire.internal.DataSerializableFixedID;
+import com.gemstone.gemfire.internal.offheap.StoredObject;
+import org.junit.Test;
+
+import java.io.DataOutput;
+import java.io.IOException;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+public abstract class AbstractStoredObjectTestBase {
+
+    /* Returns Value as an Object Eg: Integer or UserDefinedRegionValue */
+    protected abstract Object getValue();
+
+    /* Returns Value as an ByteArray (not serialized) */
+    protected abstract byte[] getValueAsByteArray();
+
+    protected abstract Object convertByteArrayToObject(byte[] valueInByteArray);
+
+    protected abstract Object convertSerializedByteArrayToObject(byte[] valueInSerializedByteArray);
+
+    protected abstract StoredObject createValueAsUnserializedStoredObject(Object value);
+
+    protected abstract StoredObject createValueAsSerializedStoredObject(Object value);
+
+    @Test
+    public void getValueAsDeserializedHeapObjectShouldReturnDeserializedValueIfValueIsSerialized() {
+        Object regionEntryValue = getValue();
+        StoredObject storedObject = createValueAsSerializedStoredObject(regionEntryValue);
+
+        Integer actualRegionEntryValue = (Integer) storedObject.getValueAsDeserializedHeapObject();
+        assertEquals(regionEntryValue, actualRegionEntryValue);
+    }
+
+    @Test
+    public void getValueAsDeserializedHeapObjectShouldReturnValueAsIsIfNotSerialized() {
+        byte[] regionEntryValue = getValueAsByteArray();
+        StoredObject storedObject = createValueAsUnserializedStoredObject(regionEntryValue);
+
+        byte[] deserializedValue = (byte[]) storedObject.getValueAsDeserializedHeapObject();
+        assertArrayEquals(regionEntryValue, deserializedValue);
+    }
+
+    @Test
+    public void getValueAsHeapByteArrayShouldReturnSerializedByteArrayIfValueIsSerialized() {
+        Object regionEntryValue = getValue();
+        StoredObject storedObject = createValueAsSerializedStoredObject(regionEntryValue);
+
+        byte[] valueInSerializedByteArray = (byte[]) storedObject.getValueAsHeapByteArray();
+        Object actualRegionEntryValue = convertSerializedByteArrayToObject(valueInSerializedByteArray);
+
+        assertEquals(regionEntryValue, actualRegionEntryValue);
+    }
+
+    @Test
+    public void getValueAsHeapByteArrayShouldReturnDeserializedByteArrayIfValueIsNotSerialized() {
+        Object regionEntryValue = getValue();
+
+        StoredObject storedObject = createValueAsUnserializedStoredObject(regionEntryValue);
+
+        byte[] valueInByteArray = (byte[]) storedObject.getValueAsHeapByteArray();
+
+        Object actualRegionEntryValue = convertByteArrayToObject(valueInByteArray);
+
+        assertEquals(regionEntryValue, actualRegionEntryValue);
+    }
+
+    @Test
+    public void getStringFormShouldReturnStringFromDeserializedValue() {
+        Object regionEntryValue = getValue();
+        StoredObject storedObject = createValueAsSerializedStoredObject(regionEntryValue);
+
+        String stringForm = storedObject.getStringForm();
+        assertEquals(String.valueOf(regionEntryValue), stringForm);
+    }
+
+    @Test
+    public void getValueShouldReturnSerializedValue() {
+        Object regionEntryValue = getValue();
+        StoredObject storedObject = createValueAsSerializedStoredObject(regionEntryValue);
+
+        byte[] valueAsSerializedByteArray = (byte[]) storedObject.getValue();
+
+        Object actualValue = convertSerializedByteArrayToObject(valueAsSerializedByteArray);
+
+        assertEquals(regionEntryValue, actualValue);
+    }
+
+    @Test(expected = IllegalStateException.class)
+    public void getValueShouldThrowExceptionIfValueIsNotSerialized() {
+        Object regionEntryValue = getValue();
+        StoredObject storedObject = createValueAsUnserializedStoredObject(regionEntryValue);
+
+        byte[] deserializedValue = (byte[]) storedObject.getValue();
+    }
+
+    @Test
+    public void getDeserializedWritableCopyShouldReturnDeserializedValue() {
+        byte[] regionEntryValue = getValueAsByteArray();
+        StoredObject storedObject = createValueAsSerializedStoredObject(regionEntryValue);
+
+        assertArrayEquals(regionEntryValue, (byte[]) storedObject.getDeserializedWritableCopy(null, null));
+    }
+
+    @Test
+    public void writeValueAsByteArrayWritesToProvidedDataOutput() throws IOException {
+        byte[] regionEntryValue = getValueAsByteArray();
+        StoredObject storedObject = createValueAsSerializedStoredObject(regionEntryValue);
+
+        DataOutput dataOutput = mock(DataOutput.class);
+        storedObject.writeValueAsByteArray(dataOutput);
+
+        verify(dataOutput, times(1)).write(storedObject.getSerializedValue(), 0 , storedObject.getSerializedValue().length);
+    }
+
+    @Test
+    public void sendToShouldWriteSerializedValueToDataOutput() throws IOException {
+        Object regionEntryValue = getValue();
+        StoredObject storedObject = createValueAsSerializedStoredObject(regionEntryValue);
+
+        DataOutput dataOutput = mock(DataOutput.class);
+        storedObject.sendTo(dataOutput);
+
+        verify(dataOutput, times(1)).write(storedObject.getSerializedValue());
+    }
+
+    @Test
+    public void sendToShouldWriteDeserializedObjectToDataOutput() throws IOException {
+        byte[] regionEntryValue = getValueAsByteArray();
+        StoredObject storedObject = createValueAsUnserializedStoredObject(regionEntryValue);
+
+        DataOutput dataOutput = mock(DataOutput.class);
+        storedObject.sendTo(dataOutput);
+
+        verify(dataOutput, times(1)).write(regionEntryValue, 0, regionEntryValue.length);
+    }
+
+    @Test
+    public void sendAsByteArrayShouldWriteSerializedValueToDataOutput() throws IOException {
+        Object regionEntryValue = getValue();
+        StoredObject storedObject = createValueAsSerializedStoredObject(regionEntryValue);
+
+        DataOutput dataOutput = mock(DataOutput.class);
+        storedObject.sendAsByteArray(dataOutput);
+
+        verify(dataOutput, times(1)).write(storedObject.getSerializedValue(), 0, storedObject.getSerializedValue().length);
+    }
+
+    @Test
+    public void sendAsByteArrayShouldWriteDeserializedObjectToDataOutput() throws IOException {
+        byte[] regionEntryValue = getValueAsByteArray();
+        StoredObject storedObject = createValueAsUnserializedStoredObject(regionEntryValue);
+
+        DataOutput dataOutput = mock(DataOutput.class);
+        storedObject.sendAsByteArray(dataOutput);
+
+        verify(dataOutput, times(1)).write(regionEntryValue, 0, regionEntryValue.length);
+    }
+
+    @Test
+    public void sendAsCachedDeserializableShouldWriteSerializedValueToDataOutputAndSetsHeader() throws IOException {
+        Object regionEntryValue = getValue();
+        StoredObject storedObject = createValueAsSerializedStoredObject(regionEntryValue);
+
+        DataOutput dataOutput = mock(DataOutput.class);
+        storedObject.sendAsCachedDeserializable(dataOutput);
+
+        verify(dataOutput, times(1)).writeByte((DataSerializableFixedID.VM_CACHED_DESERIALIZABLE));
+        verify(dataOutput, times(1)).write(storedObject.getSerializedValue(), 0, storedObject.getSerializedValue().length);
+    }
+
+    @Test(expected = IllegalStateException.class)
+    public void sendAsCachedDeserializableShouldThrowExceptionIfValueIsNotSerialized() throws IOException {
+        Object regionEntryValue = getValue();
+        StoredObject storedObject = createValueAsUnserializedStoredObject(regionEntryValue);
+
+        DataOutput dataOutput = mock(DataOutput.class);
+        storedObject.sendAsCachedDeserializable(dataOutput);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/56b37571/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/DataAsAddressJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/DataAsAddressJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/DataAsAddressJUnitTest.java
new file mode 100644
index 0000000..8a251fd
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/DataAsAddressJUnitTest.java
@@ -0,0 +1,368 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.internal.offheap;
+
+import com.gemstone.gemfire.compression.Compressor;
+import com.gemstone.gemfire.internal.cache.BytesAndBitsForCompactor;
+import com.gemstone.gemfire.internal.cache.CachePerfStats;
+import com.gemstone.gemfire.internal.cache.EntryEventImpl;
+import com.gemstone.gemfire.internal.cache.RegionEntryContext;
+import com.gemstone.gemfire.internal.offheap.DataAsAddress;
+
+import com.gemstone.gemfire.internal.offheap.OffHeapRegionEntryHelper;
+import com.gemstone.gemfire.test.junit.categories.UnitTest;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mock;
+
+import java.nio.ByteBuffer;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.*;
+
+@Category(UnitTest.class)
+public class DataAsAddressJUnitTest extends AbstractStoredObjectTestBase {
+
+    @Override
+    public Object getValue() {
+        return Integer.valueOf(123456789);
+    }
+
+    @Override
+    public byte[] getValueAsByteArray() {
+        return convertValueToByteArray(getValue());
+    }
+
+    private byte[] convertValueToByteArray(Object value) {
+        return ByteBuffer.allocate(Integer.SIZE / Byte.SIZE).putInt((Integer) value).array();
+    }
+
+    @Override
+    public Object convertByteArrayToObject(byte[] valueInByteArray) {
+        return ByteBuffer.wrap(valueInByteArray).getInt();
+    }
+
+    @Override
+    public Object convertSerializedByteArrayToObject(byte[] valueInSerializedByteArray) {
+       return EntryEventImpl.deserialize(valueInSerializedByteArray);
+    }
+
+    @Override
+    public DataAsAddress createValueAsUnserializedStoredObject(Object value) {
+        byte[] valueInByteArray;
+        if(value instanceof Integer) {
+            valueInByteArray = convertValueToByteArray(value);
+        } else {
+            valueInByteArray = (byte[]) value;
+        }
+        //encode a non-serialized entry value to address
+        long encodedAddress = OffHeapRegionEntryHelper.encodeDataAsAddress(valueInByteArray, false, false);
+        return new DataAsAddress(encodedAddress);
+    }
+
+    @Override
+    public DataAsAddress createValueAsSerializedStoredObject(Object value) {
+        byte[] valueInSerializedByteArray = EntryEventImpl.serialize(value);
+        //encode a serialized entry value to address
+        long encodedAddress = OffHeapRegionEntryHelper.encodeDataAsAddress(valueInSerializedByteArray, true, false);
+        return new DataAsAddress(encodedAddress);
+    }
+
+    public DataAsAddress createValueAsCompressedStoredObject(Object value) {
+        byte[] valueInSerializedByteArray = EntryEventImpl.serialize(value);
+        //encode a serialized, compressed entry value to address
+        long encodedAddress = OffHeapRegionEntryHelper.encodeDataAsAddress(valueInSerializedByteArray, true, true);
+        return new DataAsAddress(encodedAddress);
+    }
+
+    public DataAsAddress createValueAsUncompressedStoredObject(Object value) {
+        byte[] valueInSerializedByteArray = EntryEventImpl.serialize(value);
+        //encode a serialized, uncompressed entry value to address
+        long encodedAddress = OffHeapRegionEntryHelper.encodeDataAsAddress(valueInSerializedByteArray, true, false);
+        return new DataAsAddress(encodedAddress);
+    }
+
+    @Test
+    public void shouldReturnCorrectEncodingAddress() {
+
+        DataAsAddress address1 = new DataAsAddress(10001L);
+        assertNotNull(address1);
+        assertEquals("Encoding address should be:", 10001, address1.getEncodedAddress());
+
+        DataAsAddress address2 = new DataAsAddress(10002L);
+        assertNotNull(address2);
+        assertEquals("Returning always 10001 expected 10002", 10002, address2.getEncodedAddress());
+    }
+
+    @Test
+    public void twoAddressesShouldBeEqualIfEncodingAddressIsSame() {
+        DataAsAddress address1 = new DataAsAddress(10001L);
+        DataAsAddress address2 = new DataAsAddress(10001L);
+
+        assertEquals("Two addresses are equal if encoding address is same", true, address1.equals(address2));
+    }
+
+    @Test
+    public void twoAddressesShouldNotBeEqualIfEncodingAddressIsNotSame() {
+        DataAsAddress address1 = new DataAsAddress(10001L);
+        DataAsAddress address2 = new DataAsAddress(10002L);
+
+        assertEquals("Two addresses are not equal if encoding address is not same", false, address1.equals(address2));
+    }
+
+    @Test
+    public void twoAddressesAreNotEqualIfTheyAreNotTypeDataAsAddress() {
+        DataAsAddress address1 = new DataAsAddress(10001L);
+        Long address2 = new Long(10002L);
+
+        assertEquals("Two addresses are not equal if encoding address is not same", false, address1.equals(address2));
+    }
+
+    @Test
+    public void addressHashCodeShouldBe() {
+        DataAsAddress address1 = new DataAsAddress(10001L);
+        assertEquals("", 10001, address1.hashCode());
+    }
+
+    @Test
+    public void getSizeInBytesAlwaysReturnsZero() {
+        DataAsAddress address1 = new DataAsAddress(10001L);
+        DataAsAddress address2 = new DataAsAddress(10002L);
+
+        assertEquals("getSizeInBytes", 0, address1.getSizeInBytes());
+        assertEquals("getSizeInBytes", 0, address2.getSizeInBytes());
+    }
+
+    @Test
+    public void getValueSizeInBytesAlwaysReturnsZero() {
+        DataAsAddress address1 = new DataAsAddress(10001L);
+        DataAsAddress address2 = new DataAsAddress(10002L);
+
+        assertEquals("getSizeInBytes", 0, address1.getValueSizeInBytes());
+        assertEquals("getSizeInBytes", 0, address2.getValueSizeInBytes());
+    }
+
+    @Test
+    public void retainShouldAlwaysBeTrue() {
+        DataAsAddress address1 = new DataAsAddress(10001L);
+        DataAsAddress address2 = new DataAsAddress(10002L);
+
+        assertEquals("retain", true, address1.retain());
+        assertEquals("retain", true, address2.retain());
+    }
+
+    @Test
+    public void dataAsAddressShouldImplementReleaseToAdhereToStoredObject() {
+        DataAsAddress address = new DataAsAddress(10001L);
+        address.release();
+    }
+
+    @Test
+    public void isCompressedShouldReturnTrueIfCompressed() {
+        Object regionEntryValue = getValue();
+
+        DataAsAddress offheapAddress = createValueAsCompressedStoredObject(regionEntryValue);
+
+        assertEquals("Should return true as it is compressed", true, offheapAddress.isCompressed());
+    }
+
+    @Test
+    public void isCompressedShouldReturnFalseIfNotCompressed() {
+        Object regionEntryValue = getValue();
+
+        DataAsAddress offheapAddress = createValueAsUncompressedStoredObject(regionEntryValue);
+
+        assertEquals("Should return false as it is compressed", false, offheapAddress.isCompressed());
+    }
+
+    @Test
+    public void isSerializedShouldReturnTrueIfSeriazlied() {
+        Object regionEntryValue = getValue();
+
+        DataAsAddress offheapAddress = createValueAsSerializedStoredObject(regionEntryValue);
+
+        assertEquals("Should return true as it is serialized", true, offheapAddress.isSerialized());
+    }
+
+    @Test
+    public void isSerializedShouldReturnFalseIfNotSeriazlied() {
+        Object regionEntryValue = getValue();
+
+        DataAsAddress offheapAddress = createValueAsUnserializedStoredObject(regionEntryValue);
+
+        assertEquals("Should return false as it is serialized", false, offheapAddress.isSerialized());
+    }
+
+    @Test
+    public void getDecompressedBytesShouldReturnDecompressedBytesIfCompressed() {
+        Object regionEntryValue = getValue();
+        byte[] regionEntryValueAsBytes =  convertValueToByteArray(regionEntryValue);
+
+        //encode a non-serialized and compressed entry value to address - last argument is to let that it is compressed
+        long encodedAddress = OffHeapRegionEntryHelper.encodeDataAsAddress(regionEntryValueAsBytes, false, true);
+        DataAsAddress offheapAddress = new DataAsAddress(encodedAddress);
+
+        RegionEntryContext regionContext = mock(RegionEntryContext.class);
+        CachePerfStats cacheStats = mock(CachePerfStats.class);
+        Compressor compressor = mock(Compressor.class);
+
+        long startTime = 10000L;
+
+        //mock required things
+        when(regionContext.getCompressor()).thenReturn(compressor);
+        when(compressor.decompress(regionEntryValueAsBytes)).thenReturn(regionEntryValueAsBytes);
+        when(regionContext.getCachePerfStats()).thenReturn(cacheStats);
+        when(cacheStats.startDecompression()).thenReturn(startTime);
+
+        //invoke the thing
+        byte[] bytes = offheapAddress.getDecompressedBytes(regionContext);
+
+        //verify the thing happened
+        verify(cacheStats, atLeastOnce()).startDecompression();
+        verify(compressor, times(1)).decompress(regionEntryValueAsBytes);
+        verify(cacheStats, atLeastOnce()).endDecompression(startTime);
+
+        assertArrayEquals(regionEntryValueAsBytes, bytes);
+    }
+
+    @Test
+    public void getDecompressedBytesShouldNotTryToDecompressIfNotCompressed() {
+        Object regionEntryValue = getValue();
+
+        DataAsAddress offheapAddress = createValueAsUncompressedStoredObject(regionEntryValue);
+
+        //mock the thing
+        RegionEntryContext regionContext = mock(RegionEntryContext.class);
+        Compressor compressor = mock(Compressor.class);
+        when(regionContext.getCompressor()).thenReturn(compressor);
+
+        //invoke the thing
+        byte[] actualValueInBytes = offheapAddress.getDecompressedBytes(regionContext);
+
+        //createValueAsUncompressedStoredObject does uses a serialized value - so convert it to object
+        Object actualRegionValue = convertSerializedByteArrayToObject(actualValueInBytes);
+
+        //verify the thing happened
+        verify(regionContext, never()).getCompressor();
+        assertEquals(regionEntryValue, actualRegionValue);
+    }
+
+    @Test
+    public void getRawBytesShouldReturnAByteArray() {
+        byte[] regionEntryValueAsBytes = getValueAsByteArray();
+
+        DataAsAddress offheapAddress = createValueAsUnserializedStoredObject(regionEntryValueAsBytes);
+        byte[] actual = offheapAddress.getRawBytes();
+
+        assertArrayEquals(regionEntryValueAsBytes, actual);
+    }
+
+    @Test
+    public void getSerializedValueShouldReturnASerializedByteArray() {
+        Object regionEntryValue = getValue();
+
+        DataAsAddress offheapAddress = createValueAsSerializedStoredObject(regionEntryValue);
+
+        byte[] actualSerializedValue = offheapAddress.getSerializedValue();
+
+        Object actualRegionEntryValue = convertSerializedByteArrayToObject(actualSerializedValue);
+
+        assertEquals(regionEntryValue, actualRegionEntryValue);
+    }
+
+    @Test
+    public void getDeserializedObjectShouldReturnADeserializedObject() {
+        Object regionEntryValue = getValue();
+
+        DataAsAddress offheapAddress = createValueAsSerializedStoredObject(regionEntryValue);
+
+        Integer actualRegionEntryValue = (Integer) offheapAddress.getDeserializedValue(null, null);
+
+        assertEquals(regionEntryValue, actualRegionEntryValue);
+    }
+
+    @Test
+    public void getDeserializedObjectShouldReturnAByteArrayAsIsIfNotSerialized() {
+        byte[] regionEntryValueAsBytes = getValueAsByteArray();
+
+        DataAsAddress offheapAddress = createValueAsUnserializedStoredObject(regionEntryValueAsBytes);
+
+        byte[] deserializeValue = (byte[]) offheapAddress.getDeserializedValue(null, null);
+
+        assertArrayEquals(regionEntryValueAsBytes, deserializeValue);
+    }
+
+    @Test
+    public void fillSerializedValueShouldFillWrapperWithSerializedValueIfValueIsSerialized() {
+        Object regionEntryValue = getValue();
+        byte[] serializedRegionEntryValue = EntryEventImpl.serialize(regionEntryValue);
+
+        //encode a serialized entry value to address
+        long encodedAddress = OffHeapRegionEntryHelper.encodeDataAsAddress(serializedRegionEntryValue, true, false);
+
+        DataAsAddress offheapAddress = new DataAsAddress(encodedAddress);
+
+        //mock the things
+        BytesAndBitsForCompactor wrapper = mock(BytesAndBitsForCompactor.class);
+
+        byte userBits = 1;
+        offheapAddress.fillSerializedValue(wrapper, userBits);
+
+        verify(wrapper, times(1)).setData(serializedRegionEntryValue, userBits, serializedRegionEntryValue.length, true);
+    }
+
+    @Test
+    public void fillSerializedValueShouldFillWrapperWithDeserializedValueIfValueIsNotSerialized() {
+        Object regionEntryValue = getValue();
+        byte[] regionEntryValueAsBytes =  convertValueToByteArray(regionEntryValue);
+
+        //encode a un serialized entry value to address
+        long encodedAddress = OffHeapRegionEntryHelper.encodeDataAsAddress(regionEntryValueAsBytes, false, false);
+
+        DataAsAddress offheapAddress = new DataAsAddress(encodedAddress);
+
+        //mock the things
+        BytesAndBitsForCompactor wrapper = mock(BytesAndBitsForCompactor.class);
+
+        byte userBits = 1;
+        offheapAddress.fillSerializedValue(wrapper, userBits);
+
+        verify(wrapper, times(1)).setData(regionEntryValueAsBytes, userBits, regionEntryValueAsBytes.length, true);
+    }
+
+    @Test
+    public void getStringFormShouldCatchExceptionAndReturnErrorMessageAsString() {
+        Object regionEntryValueAsBytes = getValue();
+
+        byte[] serializedValue = EntryEventImpl.serialize(regionEntryValueAsBytes);
+
+        //store -127 (DSCODE.ILLEGAL) - in order the deserialize to throw exception
+        serializedValue[0] = -127;
+
+        //encode a un serialized entry value to address
+        long encodedAddress = OffHeapRegionEntryHelper.encodeDataAsAddress(serializedValue, true, false);
+
+        DataAsAddress offheapAddress = new DataAsAddress(encodedAddress);
+
+        String errorMessage = offheapAddress.getStringForm();
+
+        assertEquals(true, errorMessage.contains("Could not convert object to string because "));
+    }
+}


[20/50] [abbrv] incubator-geode git commit: GEODE-642: fix race in OffHeapStorageJUnitTest

Posted by kl...@apache.org.
GEODE-642: fix race in OffHeapStorageJUnitTest

The problem was caused by the listener not rechecking the system
property every time it was notified of an OutOfOffHeapMemoryException.
Since the code that disconnects the distributed system when
we run out of off-heap memory is async the test would intermittently pass.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/80b59bfa
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/80b59bfa
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/80b59bfa

Branch: refs/heads/feature/GEODE-291
Commit: 80b59bfa3dcf9b2ac1317c6996591b1fd68494e1
Parents: a6398d9
Author: Darrel Schneider <ds...@pivotal.io>
Authored: Tue Dec 8 10:42:58 2015 -0800
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Tue Dec 8 11:36:25 2015 -0800

----------------------------------------------------------------------
 .../com/gemstone/gemfire/internal/offheap/OffHeapStorage.java     | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/80b59bfa/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapStorage.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapStorage.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapStorage.java
index 82cbfeb..3eb839b 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapStorage.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapStorage.java
@@ -395,7 +395,6 @@ public class OffHeapStorage implements OffHeapMemoryStats {
   }
   
   static class DisconnectingOutOfOffHeapMemoryListener implements OutOfOffHeapMemoryListener {
-    private final boolean stayConnectedOnOutOfOffHeapMemory = Boolean.getBoolean(STAY_CONNECTED_ON_OUTOFOFFHEAPMEMORY_PROPERTY);
     private final Object lock = new Object();
     private InternalDistributedSystem ids;
     
@@ -415,7 +414,7 @@ public class OffHeapStorage implements OffHeapMemoryStats {
         if (this.ids == null) {
           return;
         }
-        if (stayConnectedOnOutOfOffHeapMemory) {
+        if (Boolean.getBoolean(STAY_CONNECTED_ON_OUTOFOFFHEAPMEMORY_PROPERTY)) {
           return;
         }
         


[06/50] [abbrv] incubator-geode git commit: new unit tests and code clean-up

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/JGroupsMessengerJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/JGroupsMessengerJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/JGroupsMessengerJUnitTest.java
index fbdcdf5..4b9c01f 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/JGroupsMessengerJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/JGroupsMessengerJUnitTest.java
@@ -16,26 +16,22 @@
  */
 package com.gemstone.gemfire.distributed.internal.membership.gms.messenger;
 
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.*;
+
 import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.*;
 
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.DataOutput;
-import java.io.DataOutputStream;
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Properties;
 
-import junit.framework.Assert;
-
+import org.apache.commons.lang.SerializationException;
 import org.jgroups.Event;
+import org.jgroups.JChannel;
 import org.jgroups.Message;
 import org.jgroups.conf.ClassConfigurator;
 import org.jgroups.protocols.UNICAST3;
@@ -43,9 +39,10 @@ import org.jgroups.util.UUID;
 import org.junit.After;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
-import org.mockito.Mockito;
 
 import com.gemstone.gemfire.ForcedDisconnectException;
+import com.gemstone.gemfire.GemFireIOException;
+import com.gemstone.gemfire.distributed.DistributedSystemDisconnectedException;
 import com.gemstone.gemfire.distributed.internal.DMStats;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.DistributionConfigImpl;
@@ -58,15 +55,21 @@ import com.gemstone.gemfire.distributed.internal.membership.gms.GMSMember;
 import com.gemstone.gemfire.distributed.internal.membership.gms.ServiceConfig;
 import com.gemstone.gemfire.distributed.internal.membership.gms.Services;
 import com.gemstone.gemfire.distributed.internal.membership.gms.Services.Stopper;
+import com.gemstone.gemfire.distributed.internal.membership.gms.interfaces.HealthMonitor;
 import com.gemstone.gemfire.distributed.internal.membership.gms.interfaces.JoinLeave;
 import com.gemstone.gemfire.distributed.internal.membership.gms.interfaces.Manager;
 import com.gemstone.gemfire.distributed.internal.membership.gms.interfaces.MessageHandler;
 import com.gemstone.gemfire.distributed.internal.membership.gms.messages.JoinRequestMessage;
+import com.gemstone.gemfire.distributed.internal.membership.gms.messages.JoinResponseMessage;
 import com.gemstone.gemfire.distributed.internal.membership.gms.messages.LeaveRequestMessage;
+import com.gemstone.gemfire.distributed.internal.membership.gms.messenger.JGroupsMessenger.JGroupsReceiver;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.internal.DataSerializableFixedID;
 import com.gemstone.gemfire.internal.HeapDataOutputStream;
+import com.gemstone.gemfire.internal.SocketCreator;
 import com.gemstone.gemfire.internal.Version;
 import com.gemstone.gemfire.internal.admin.remote.RemoteTransportConfig;
+import com.gemstone.gemfire.internal.cache.DistributedCacheOperation;
 import com.gemstone.gemfire.test.junit.categories.UnitTest;
 
 @Category(UnitTest.class)
@@ -76,6 +79,7 @@ public class JGroupsMessengerJUnitTest {
   private JoinLeave joinLeave;
   private Manager manager;
   private Stopper stopper;
+  private HealthMonitor healthMonitor;
   private InterceptUDP interceptor;
 
 
@@ -83,6 +87,10 @@ public class JGroupsMessengerJUnitTest {
    * Create stub and mock objects
    */
   private void initMocks(boolean enableMcast) throws Exception {
+    if (messenger != null) {
+      messenger.stop();
+      messenger = null;
+    }
     Properties nonDefault = new Properties();
     nonDefault.put(DistributionConfig.DISABLE_TCP_NAME, "true");
     nonDefault.put(DistributionConfig.MCAST_PORT_NAME, enableMcast? ""+AvailablePortHelper.getRandomAvailableUDPPort() : "0");
@@ -100,6 +108,8 @@ public class JGroupsMessengerJUnitTest {
     manager = mock(Manager.class);
     when(manager.isMulticastAllowed()).thenReturn(enableMcast);
     
+    healthMonitor = mock(HealthMonitor.class);
+    
     joinLeave = mock(JoinLeave.class);
     
     ServiceConfig serviceConfig = new ServiceConfig(tconfig, config);
@@ -107,6 +117,7 @@ public class JGroupsMessengerJUnitTest {
     services = mock(Services.class);
     when(services.getConfig()).thenReturn(serviceConfig);
     when(services.getCancelCriterion()).thenReturn(stopper);
+    when(services.getHealthMonitor()).thenReturn(healthMonitor);
     when(services.getManager()).thenReturn(manager);
     when(services.getJoinLeave()).thenReturn(joinLeave);
     when(services.getStatistics()).thenReturn(mock(DMStats.class));
@@ -121,7 +132,7 @@ public class JGroupsMessengerJUnitTest {
         "<"+InterceptUDP.class.getName()+"/>" +
         jgroupsConfig.substring(insertIdx);
     messenger.setJGroupsStackConfigForTesting(jgroupsConfig);
-    System.out.println("jgroups config: " + jgroupsConfig);
+//    System.out.println("jgroups config: " + jgroupsConfig);
     
     messenger.start();
     messenger.started();
@@ -141,13 +152,195 @@ public class JGroupsMessengerJUnitTest {
   @Test
   public void testMemberWeightIsSerialized() throws Exception {
     HeapDataOutputStream out = new HeapDataOutputStream(500, Version.CURRENT);
-    InternalDistributedMember m = new InternalDistributedMember("localhost", 8888);
-    ((GMSMember)m.getNetMember()).setMemberWeight((byte)40);
-    m.toData(out);
+    InternalDistributedMember mbr = createAddress(8888);
+    ((GMSMember)mbr.getNetMember()).setMemberWeight((byte)40);
+    mbr.toData(out);
     DataInputStream in = new DataInputStream(new ByteArrayInputStream(out.toByteArray()));
-    m = new InternalDistributedMember();
-    m.fromData(in);
-    assertEquals(40, m.getNetMember().getMemberWeight());
+    mbr = new InternalDistributedMember();
+    mbr.fromData(in);
+    assertEquals(40, mbr.getNetMember().getMemberWeight());
+  }
+  
+  @Test
+  public void testSerializationError() throws Exception {
+    for (int i=0; i<2 ; i++) {
+      boolean enableMcast = (i==1);
+      initMocks(enableMcast);
+      InternalDistributedMember mbr = createAddress(8888);
+      DistributedCacheOperation.CacheOperationMessage msg = mock(DistributedCacheOperation.CacheOperationMessage.class);
+      when(msg.getRecipients()).thenReturn(new InternalDistributedMember[] {mbr});
+      when(msg.getMulticast()).thenReturn(enableMcast);
+      if (!enableMcast) {
+        // for non-mcast we send a message with a reply-processor
+        when(msg.getProcessorId()).thenReturn(1234);
+      } else {
+        // for mcast we send a direct-ack message and expect the messenger
+        // to register it
+        stub(msg.isDirectAck()).toReturn(true);
+      }
+      when(msg.getDSFID()).thenReturn((int)DataSerializableFixedID.PUT_ALL_MESSAGE);
+      
+      // for code coverage we need to test with both a SerializationException and
+      // an IOException.  The former is wrapped in a GemfireIOException while the
+      // latter is not
+      doThrow(new SerializationException()).when(msg).toData(any(DataOutput.class));
+      try {
+        messenger.send(msg);
+        fail("expected a failure");
+      } catch (GemFireIOException e) {
+        // success
+      }
+      if (enableMcast) {
+        verify(msg, atLeastOnce()).registerProcessor();
+      }
+      doThrow(new IOException()).when(msg).toData(any(DataOutput.class));
+      try {
+        messenger.send(msg);
+        fail("expected a failure");
+      } catch (GemFireIOException e) {
+        // success
+      }
+    }
+  }
+  
+  @Test
+  public void testJChannelError() throws Exception {
+    for (int i=0; i<2 ; i++) {
+      boolean enableMcast = (i==1);
+      initMocks(enableMcast);
+      JChannel mockChannel = mock(JChannel.class);
+      when(mockChannel.isConnected()).thenReturn(true);
+      doThrow(new RuntimeException()).when(mockChannel).send(any(Message.class));
+      JChannel realChannel = messenger.myChannel;
+      messenger.myChannel = mockChannel;
+      try {
+        InternalDistributedMember mbr = createAddress(8888);
+        DistributedCacheOperation.CacheOperationMessage msg = mock(DistributedCacheOperation.CacheOperationMessage.class);
+        when(msg.getRecipients()).thenReturn(new InternalDistributedMember[] {mbr});
+        when(msg.getMulticast()).thenReturn(enableMcast);
+        when(msg.getProcessorId()).thenReturn(1234);
+        when(msg.getDSFID()).thenReturn((int)DataSerializableFixedID.PUT_ALL_MESSAGE);
+        try {
+          messenger.send(msg);
+          fail("expected a failure");
+        } catch (DistributedSystemDisconnectedException e) {
+          // success
+        }
+        verify(mockChannel).send(isA(Message.class));
+      } finally {
+        messenger.myChannel = realChannel;
+      }
+    }
+  }
+  
+  @Test
+  public void testJChannelErrorDuringDisconnect() throws Exception {
+    for (int i=0; i<4 ; i++) {
+      System.out.println("loop #"+i);
+      boolean enableMcast = (i%2 == 1);
+      initMocks(enableMcast);
+      JChannel mockChannel = mock(JChannel.class);
+      when(mockChannel.isConnected()).thenReturn(true);
+      Exception ex, shutdownCause;
+      if (i < 2) {
+        ex = new RuntimeException("");
+        shutdownCause = new RuntimeException("shutdownCause");
+      } else {
+        shutdownCause = new ForcedDisconnectException("");
+        ex = new RuntimeException("", shutdownCause);
+      }
+      doThrow(ex).when(mockChannel).send(any(Message.class));
+      JChannel realChannel = messenger.myChannel;
+      messenger.myChannel = mockChannel;
+      
+      when(services.getShutdownCause()).thenReturn(shutdownCause);
+      
+      try {
+        InternalDistributedMember mbr = createAddress(8888);
+        DistributedCacheOperation.CacheOperationMessage msg = mock(DistributedCacheOperation.CacheOperationMessage.class);
+        when(msg.getRecipients()).thenReturn(new InternalDistributedMember[] {mbr});
+        when(msg.getMulticast()).thenReturn(enableMcast);
+        when(msg.getProcessorId()).thenReturn(1234);
+        when(msg.getDSFID()).thenReturn((int)DataSerializableFixedID.PUT_ALL_MESSAGE);
+        try {
+          messenger.send(msg);
+          fail("expected a failure");
+        } catch (DistributedSystemDisconnectedException e) {
+          // the ultimate cause should be the shutdownCause returned
+          // by Services.getShutdownCause()
+          Throwable cause = e;
+          while (cause.getCause() != null) {
+            cause = cause.getCause();
+          }
+          assertTrue(cause != e);
+          assertTrue(cause == shutdownCause);
+        }
+        verify(mockChannel).send(isA(Message.class));
+      } finally {
+        messenger.myChannel = realChannel;
+      }
+    }
+  }
+  
+  @Test
+  public void testSendWhenChannelIsClosed() throws Exception {
+    for (int i=0; i<2 ; i++) {
+      initMocks(false);
+      JChannel mockChannel = mock(JChannel.class);
+      when(mockChannel.isConnected()).thenReturn(false);
+      doThrow(new RuntimeException()).when(mockChannel).send(any(Message.class));
+      JChannel realChannel = messenger.myChannel;
+      messenger.myChannel = mockChannel;
+      try {
+        InternalDistributedMember mbr = createAddress(8888);
+        DistributedCacheOperation.CacheOperationMessage msg = mock(DistributedCacheOperation.CacheOperationMessage.class);
+        when(msg.getRecipients()).thenReturn(new InternalDistributedMember[] {mbr});
+        when(msg.getMulticast()).thenReturn(false);
+        when(msg.getProcessorId()).thenReturn(1234);
+        try {
+          messenger.send(msg);
+          fail("expected a failure");
+        } catch (DistributedSystemDisconnectedException e) {
+          // success
+        }
+        verify(mockChannel, never()).send(isA(Message.class));
+      } finally {
+        messenger.myChannel = realChannel;
+      }
+    }
+  }
+
+  @Test
+  public void testSendUnreliably() throws Exception {
+    for (int i=0; i<2 ; i++) {
+      boolean enableMcast = (i==1);
+      initMocks(enableMcast);
+      InternalDistributedMember mbr = createAddress(8888);
+      DistributedCacheOperation.CacheOperationMessage msg = mock(DistributedCacheOperation.CacheOperationMessage.class);
+      when(msg.getRecipients()).thenReturn(new InternalDistributedMember[] {mbr});
+      when(msg.getMulticast()).thenReturn(enableMcast);
+      if (!enableMcast) {
+        // for non-mcast we send a message with a reply-processor
+        when(msg.getProcessorId()).thenReturn(1234);
+      } else {
+        // for mcast we send a direct-ack message and expect the messenger
+        // to register it
+        stub(msg.isDirectAck()).toReturn(true);
+      }
+      when(msg.getDSFID()).thenReturn((int)DataSerializableFixedID.PUT_ALL_MESSAGE);
+      interceptor.collectMessages = true;
+      try {
+        messenger.sendUnreliably(msg);
+      } catch (GemFireIOException e) {
+        fail("expected success");
+      }
+      if (enableMcast) {
+        verify(msg, atLeastOnce()).registerProcessor();
+      }
+      verify(msg).toData(isA(DataOutput.class));
+      assertTrue("expected 1 message but found " + interceptor.collectedMessages, interceptor.collectedMessages.size() == 1);
+      assertTrue(interceptor.collectedMessages.get(0).isFlagSet(Message.Flag.NO_RELIABILITY));
+    }
   }
   
   @Test
@@ -265,7 +458,7 @@ public class JGroupsMessengerJUnitTest {
   public void testSendToMultipleMembers() throws Exception {
     initMocks(false);
     InternalDistributedMember sender = messenger.getMemberID();
-    InternalDistributedMember other = new InternalDistributedMember("localhost", 8888);
+    InternalDistributedMember other = createAddress(8888);
 
     NetView v = new NetView(sender);
     v.add(other);
@@ -285,11 +478,11 @@ public class JGroupsMessengerJUnitTest {
   @Test
   public void testChannelStillConnectedAfterEmergencyCloseAfterForcedDisconnectWithAutoReconnect() throws Exception {
     initMocks(false);
-    Mockito.doCallRealMethod().when(services).setShutdownCause(any(ForcedDisconnectException.class));
-    Mockito.doCallRealMethod().when(services).getShutdownCause();
-    Mockito.doCallRealMethod().when(services).emergencyClose();
-    Mockito.doCallRealMethod().when(services).isShutdownDueToForcedDisconnect();
-    Mockito.doCallRealMethod().when(services).isAutoReconnectEnabled();
+    doCallRealMethod().when(services).setShutdownCause(any(ForcedDisconnectException.class));
+    doCallRealMethod().when(services).getShutdownCause();
+    doCallRealMethod().when(services).emergencyClose();
+    doCallRealMethod().when(services).isShutdownDueToForcedDisconnect();
+    doCallRealMethod().when(services).isAutoReconnectEnabled();
     services.setShutdownCause(new ForcedDisconnectException("Test Forced Disconnect"));
     assertTrue(messenger.myChannel.isConnected());
     messenger.emergencyClose();
@@ -299,11 +492,11 @@ public class JGroupsMessengerJUnitTest {
   @Test
   public void testChannelStillConnectedAfterStopAfterForcedDisconnectWithAutoReconnect() throws Exception {
     initMocks(false);
-    Mockito.doCallRealMethod().when(services).setShutdownCause(any(ForcedDisconnectException.class));
-    Mockito.doCallRealMethod().when(services).getShutdownCause();
-    Mockito.doCallRealMethod().when(services).emergencyClose();
-    Mockito.doCallRealMethod().when(services).isShutdownDueToForcedDisconnect();
-    Mockito.doCallRealMethod().when(services).isAutoReconnectEnabled();
+    doCallRealMethod().when(services).setShutdownCause(any(ForcedDisconnectException.class));
+    doCallRealMethod().when(services).getShutdownCause();
+    doCallRealMethod().when(services).emergencyClose();
+    doCallRealMethod().when(services).isShutdownDueToForcedDisconnect();
+    doCallRealMethod().when(services).isAutoReconnectEnabled();
     services.setShutdownCause(new ForcedDisconnectException("Test Forced Disconnect"));
     assertTrue(messenger.myChannel.isConnected());
     messenger.stop();
@@ -313,12 +506,12 @@ public class JGroupsMessengerJUnitTest {
   @Test
   public void testChannelStillConnectedAfteremergencyWhileReconnectingDS() throws Exception {
     initMocks(false);
-    Mockito.doCallRealMethod().when(services).setShutdownCause(any(ForcedDisconnectException.class));
-    Mockito.doCallRealMethod().when(services).getShutdownCause();
-    Mockito.doCallRealMethod().when(services).emergencyClose();
-    Mockito.doReturn(false).when(services).isShutdownDueToForcedDisconnect();
-    Mockito.doReturn(false).when(services).isAutoReconnectEnabled();
-    Mockito.doReturn(true).when(manager).isReconnectingDS();
+    doCallRealMethod().when(services).setShutdownCause(any(ForcedDisconnectException.class));
+    doCallRealMethod().when(services).getShutdownCause();
+    doCallRealMethod().when(services).emergencyClose();
+    doReturn(false).when(services).isShutdownDueToForcedDisconnect();
+    doReturn(false).when(services).isAutoReconnectEnabled();
+    doReturn(true).when(manager).isReconnectingDS();
     services.setShutdownCause(new ForcedDisconnectException("Test Forced Disconnect"));
     assertTrue(messenger.myChannel.isConnected());
     messenger.emergencyClose();
@@ -329,12 +522,12 @@ public class JGroupsMessengerJUnitTest {
   @Test
   public void testChannelStillConnectedAfterStopWhileReconnectingDS() throws Exception {
     initMocks(false);
-    Mockito.doCallRealMethod().when(services).setShutdownCause(any(ForcedDisconnectException.class));
-    Mockito.doCallRealMethod().when(services).getShutdownCause();
-    Mockito.doCallRealMethod().when(services).emergencyClose();
-    Mockito.doReturn(false).when(services).isShutdownDueToForcedDisconnect();
-    Mockito.doReturn(false).when(services).isAutoReconnectEnabled();
-    Mockito.doReturn(true).when(manager).isReconnectingDS();
+    doCallRealMethod().when(services).setShutdownCause(any(ForcedDisconnectException.class));
+    doCallRealMethod().when(services).getShutdownCause();
+    doCallRealMethod().when(services).emergencyClose();
+    doReturn(false).when(services).isShutdownDueToForcedDisconnect();
+    doReturn(false).when(services).isAutoReconnectEnabled();
+    doReturn(true).when(manager).isReconnectingDS();
     services.setShutdownCause(new ForcedDisconnectException("Test Forced Disconnect"));
     assertTrue(messenger.myChannel.isConnected());
     messenger.stop();
@@ -344,12 +537,12 @@ public class JGroupsMessengerJUnitTest {
   @Test
   public void testChannelClosedOnEmergencyClose() throws Exception {
     initMocks(false);
-    Mockito.doCallRealMethod().when(services).setShutdownCause(any(ForcedDisconnectException.class));
-    Mockito.doCallRealMethod().when(services).getShutdownCause();
-    Mockito.doCallRealMethod().when(services).emergencyClose();
-    Mockito.doReturn(false).when(services).isShutdownDueToForcedDisconnect();
-    Mockito.doReturn(false).when(services).isAutoReconnectEnabled();
-    Mockito.doReturn(false).when(manager).isReconnectingDS();
+    doCallRealMethod().when(services).setShutdownCause(any(ForcedDisconnectException.class));
+    doCallRealMethod().when(services).getShutdownCause();
+    doCallRealMethod().when(services).emergencyClose();
+    doReturn(false).when(services).isShutdownDueToForcedDisconnect();
+    doReturn(false).when(services).isAutoReconnectEnabled();
+    doReturn(false).when(manager).isReconnectingDS();
     services.setShutdownCause(new ForcedDisconnectException("Test Forced Disconnect"));
     assertTrue(messenger.myChannel.isConnected());
     messenger.emergencyClose();
@@ -359,12 +552,12 @@ public class JGroupsMessengerJUnitTest {
   @Test
   public void testChannelClosedOnStop() throws Exception {
     initMocks(false);
-    Mockito.doCallRealMethod().when(services).setShutdownCause(any(ForcedDisconnectException.class));
-    Mockito.doCallRealMethod().when(services).getShutdownCause();
-    Mockito.doCallRealMethod().when(services).emergencyClose();
-    Mockito.doReturn(false).when(services).isShutdownDueToForcedDisconnect();
-    Mockito.doReturn(false).when(services).isAutoReconnectEnabled();
-    Mockito.doReturn(false).when(manager).isReconnectingDS();
+    doCallRealMethod().when(services).setShutdownCause(any(ForcedDisconnectException.class));
+    doCallRealMethod().when(services).getShutdownCause();
+    doCallRealMethod().when(services).emergencyClose();
+    doReturn(false).when(services).isShutdownDueToForcedDisconnect();
+    doReturn(false).when(services).isAutoReconnectEnabled();
+    doReturn(false).when(manager).isReconnectingDS();
     services.setShutdownCause(new ForcedDisconnectException("Test Forced Disconnect"));
     assertTrue(messenger.myChannel.isConnected());
     messenger.stop();
@@ -374,12 +567,12 @@ public class JGroupsMessengerJUnitTest {
   @Test
   public void testChannelClosedAfterEmergencyCloseForcedDisconnectWithoutAutoReconnect() throws Exception {
     initMocks(false);
-    Mockito.doCallRealMethod().when(services).setShutdownCause(any(ForcedDisconnectException.class));
-    Mockito.doCallRealMethod().when(services).getShutdownCause();
-    Mockito.doCallRealMethod().when(services).emergencyClose();
-    Mockito.doReturn(true).when(services).isShutdownDueToForcedDisconnect();
-    Mockito.doReturn(false).when(services).isAutoReconnectEnabled();
-    Mockito.doReturn(false).when(manager).isReconnectingDS();
+    doCallRealMethod().when(services).setShutdownCause(any(ForcedDisconnectException.class));
+    doCallRealMethod().when(services).getShutdownCause();
+    doCallRealMethod().when(services).emergencyClose();
+    doReturn(true).when(services).isShutdownDueToForcedDisconnect();
+    doReturn(false).when(services).isAutoReconnectEnabled();
+    doReturn(false).when(manager).isReconnectingDS();
     services.setShutdownCause(new ForcedDisconnectException("Test Forced Disconnect"));
     assertTrue(messenger.myChannel.isConnected());
     messenger.emergencyClose();
@@ -389,12 +582,12 @@ public class JGroupsMessengerJUnitTest {
   @Test
   public void testChannelStillConnectedStopAfterForcedDisconnectWithoutAutoReconnect() throws Exception {
     initMocks(false);
-    Mockito.doCallRealMethod().when(services).setShutdownCause(any(ForcedDisconnectException.class));
-    Mockito.doCallRealMethod().when(services).getShutdownCause();
-    Mockito.doCallRealMethod().when(services).emergencyClose();
-    Mockito.doReturn(true).when(services).isShutdownDueToForcedDisconnect();
-    Mockito.doReturn(false).when(services).isAutoReconnectEnabled();
-    Mockito.doReturn(false).when(manager).isReconnectingDS();
+    doCallRealMethod().when(services).setShutdownCause(any(ForcedDisconnectException.class));
+    doCallRealMethod().when(services).getShutdownCause();
+    doCallRealMethod().when(services).emergencyClose();
+    doReturn(true).when(services).isShutdownDueToForcedDisconnect();
+    doReturn(false).when(services).isAutoReconnectEnabled();
+    doReturn(false).when(manager).isReconnectingDS();
     services.setShutdownCause(new ForcedDisconnectException("Test Forced Disconnect"));
     assertTrue(messenger.myChannel.isConnected());
     messenger.stop();
@@ -404,12 +597,12 @@ public class JGroupsMessengerJUnitTest {
   @Test
   public void testChannelClosedAfterEmergencyCloseNotForcedDisconnectWithAutoReconnect() throws Exception {
     initMocks(false);
-    Mockito.doCallRealMethod().when(services).setShutdownCause(any(ForcedDisconnectException.class));
-    Mockito.doCallRealMethod().when(services).getShutdownCause();
-    Mockito.doCallRealMethod().when(services).emergencyClose();
-    Mockito.doReturn(false).when(services).isShutdownDueToForcedDisconnect();
-    Mockito.doReturn(true).when(services).isAutoReconnectEnabled();
-    Mockito.doReturn(false).when(manager).isReconnectingDS();
+    doCallRealMethod().when(services).setShutdownCause(any(ForcedDisconnectException.class));
+    doCallRealMethod().when(services).getShutdownCause();
+    doCallRealMethod().when(services).emergencyClose();
+    doReturn(false).when(services).isShutdownDueToForcedDisconnect();
+    doReturn(true).when(services).isAutoReconnectEnabled();
+    doReturn(false).when(manager).isReconnectingDS();
     services.setShutdownCause(new ForcedDisconnectException("Test Forced Disconnect"));
     assertTrue(messenger.myChannel.isConnected());
     messenger.emergencyClose();
@@ -419,18 +612,150 @@ public class JGroupsMessengerJUnitTest {
   @Test
   public void testChannelStillConnectedStopNotForcedDisconnectWithAutoReconnect() throws Exception {
     initMocks(false);
-    Mockito.doCallRealMethod().when(services).setShutdownCause(any(ForcedDisconnectException.class));
-    Mockito.doCallRealMethod().when(services).getShutdownCause();
-    Mockito.doCallRealMethod().when(services).emergencyClose();
-    Mockito.doReturn(false).when(services).isShutdownDueToForcedDisconnect();
-    Mockito.doReturn(true).when(services).isAutoReconnectEnabled();
-    Mockito.doReturn(false).when(manager).isReconnectingDS();
+    doCallRealMethod().when(services).setShutdownCause(any(ForcedDisconnectException.class));
+    doCallRealMethod().when(services).getShutdownCause();
+    doCallRealMethod().when(services).emergencyClose();
+    doReturn(false).when(services).isShutdownDueToForcedDisconnect();
+    doReturn(true).when(services).isAutoReconnectEnabled();
+    doReturn(false).when(manager).isReconnectingDS();
     services.setShutdownCause(new ForcedDisconnectException("Test Forced Disconnect"));
     assertTrue(messenger.myChannel.isConnected());
     messenger.stop();
     assertFalse(messenger.myChannel.isConnected());
   }
   
+  @Test
+  public void testMessageFiltering() throws Exception {
+    initMocks(true);
+    InternalDistributedMember mbr = createAddress(8888);
+    NetView view = new NetView(mbr);
+    
+    // the digest should be set in an outgoing join response
+    JoinResponseMessage joinResponse = new JoinResponseMessage(mbr, view);
+    messenger.filterOutgoingMessage(joinResponse);
+    assertNotNull(joinResponse.getMessengerData());
+    
+    // save the view digest for later
+    byte[] data = joinResponse.getMessengerData();
+    
+    // the digest should be used and the message bytes nulled out in an incoming join response
+    messenger.filterIncomingMessage(joinResponse);
+    assertNull(joinResponse.getMessengerData());
+    
+    // the digest shouldn't be set in an outgoing rejection message
+    joinResponse = new JoinResponseMessage("you can't join my distributed system.  nyah nyah nyah!");
+    messenger.filterOutgoingMessage(joinResponse);
+    assertNull(joinResponse.getMessengerData());
+    
+    // the digest shouldn't be installed from an incoming rejection message
+    joinResponse.setMessengerData(data);
+    messenger.filterIncomingMessage(joinResponse);
+    assertNotNull(joinResponse.getMessengerData());
+  }
+  
+  @Test
+  public void testPingPong() throws Exception {
+    initMocks(false);
+    GMSPingPonger pinger = messenger.getPingPonger();
+    InternalDistributedMember mbr = createAddress(8888);
+    JGAddress addr = new JGAddress(mbr);
+    
+    Message pingMessage = pinger.createPingMessage(null, addr);
+    assertTrue(pinger.isPingMessage(pingMessage.getBuffer()));
+    assertFalse(pinger.isPongMessage(pingMessage.getBuffer()));
+    
+    Message pongMessage = pinger.createPongMessage(null, addr);
+    assertTrue(pinger.isPongMessage(pongMessage.getBuffer()));
+    assertFalse(pinger.isPingMessage(pongMessage.getBuffer()));
+    
+    interceptor.collectMessages = true;
+    pinger.sendPingMessage(messenger.myChannel, null, addr);
+    assertEquals("expected 1 message but found " + interceptor.collectedMessages, interceptor.collectedMessages.size(), 1);
+    pingMessage = interceptor.collectedMessages.get(0);
+    assertTrue(pinger.isPingMessage(pingMessage.getBuffer()));
+    
+    interceptor.collectedMessages.clear();
+    pinger.sendPongMessage(messenger.myChannel, null, addr);
+    assertEquals("expected 1 message but found " + interceptor.collectedMessages, interceptor.collectedMessages.size(), 1);
+    pongMessage = interceptor.collectedMessages.get(0);
+    assertTrue(pinger.isPongMessage(pongMessage.getBuffer()));
+
+    interceptor.collectedMessages.clear();
+    JGroupsReceiver receiver = (JGroupsReceiver)messenger.myChannel.getReceiver();
+    long pongsReceived = messenger.pongsReceived;
+    receiver.receive(pongMessage);
+    assertEquals(pongsReceived+1, messenger.pongsReceived);
+    receiver.receive(pingMessage);
+    assertEquals("expected 1 message but found " + interceptor.collectedMessages, interceptor.collectedMessages.size(), 1);
+    Message m = interceptor.collectedMessages.get(0);
+    assertTrue(pinger.isPongMessage(m.getBuffer()));
+  }
+  
+  @Test
+  public void testJGroupsIOExceptionHandler() throws Exception {
+    initMocks(false);
+    InternalDistributedMember mbr = createAddress(8888);
+    NetView v = new NetView(mbr);
+    v.add(messenger.getMemberID());
+    messenger.installView(v);
+
+    IOException ioe = new IOException("test exception");
+    messenger.handleJGroupsIOException(ioe, new JGAddress(mbr));
+    messenger.handleJGroupsIOException(ioe, new JGAddress(mbr)); // should be ignored
+    verify(healthMonitor).checkIfAvailable(mbr, "Unable to send messages to this member via JGroups", true);
+  }
+  
+  @Test
+  public void testReceiver() throws Exception {
+    initMocks(false);
+    JGroupsReceiver receiver = (JGroupsReceiver)messenger.myChannel.getReceiver();
+    
+    // a zero-length message is ignored
+    Message msg = new Message(new JGAddress(messenger.getMemberID()));
+    Object result = messenger.readJGMessage(msg);
+    assertNull(result);
+    
+    // for code coverage we need to pump this message through the receiver
+    receiver.receive(msg);
+    
+    // for more code coverage we need to actually set a buffer in the message
+    msg.setBuffer(new byte[0]);
+    result = messenger.readJGMessage(msg);
+    assertNull(result);
+    receiver.receive(msg);
+    
+    // now create a view and a real distribution-message
+    InternalDistributedMember myAddress = messenger.getMemberID();
+    InternalDistributedMember other = createAddress(8888);
+    NetView v = new NetView(myAddress);
+    v.add(other);
+    when(joinLeave.getView()).thenReturn(v);
+    messenger.installView(v);
+
+    List<InternalDistributedMember> recipients = v.getMembers();
+    SerialAckedMessage dmsg = new SerialAckedMessage();
+    dmsg.setRecipients(recipients);
+
+    // a message is ignored during manager shutdown
+    msg = messenger.createJGMessage(dmsg, new JGAddress(other), Version.CURRENT_ORDINAL);
+    when(manager.shutdownInProgress()).thenReturn(Boolean.TRUE);
+    receiver.receive(msg);
+    verify(manager, never()).processMessage(isA(DistributionMessage.class));
+  }
+  
+  @Test
+  public void testUseOldJChannel() throws Exception {
+    initMocks(false);
+    JChannel channel = messenger.myChannel;
+    services.getConfig().getTransport().setOldDSMembershipInfo(channel);
+    JGroupsMessenger newMessenger = new JGroupsMessenger();
+    newMessenger.init(services);
+    newMessenger.start();
+    newMessenger.started();
+    newMessenger.stop();
+    assertTrue(newMessenger.myChannel == messenger.myChannel);
+  }
+  
   /**
    * creates an InternalDistributedMember address that can be used
    * with the doctored JGroups channel.  This includes a logical
@@ -439,7 +764,7 @@ public class JGroupsMessengerJUnitTest {
    * @param port the UDP port to use for the new address
    */
   private InternalDistributedMember createAddress(int port) {
-    GMSMember gms = new GMSMember("localhost", 8888);
+    GMSMember gms = new GMSMember("localhost",  port);
     gms.setUUID(UUID.randomUUID());
     gms.setVmKind(DistributionManager.NORMAL_DM_TYPE);
     gms.setVersionOrdinal(Version.CURRENT_ORDINAL);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/test/java/dunit/RemoteDUnitVMIF.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/dunit/RemoteDUnitVMIF.java b/gemfire-core/src/test/java/dunit/RemoteDUnitVMIF.java
index 0004246..5dffa47 100644
--- a/gemfire-core/src/test/java/dunit/RemoteDUnitVMIF.java
+++ b/gemfire-core/src/test/java/dunit/RemoteDUnitVMIF.java
@@ -31,4 +31,6 @@ public interface RemoteDUnitVMIF extends Remote {
   MethExecutorResult executeMethodOnClass(String name, String methodName,
       Object[] args) throws RemoteException;
 
+  void shutDownVM() throws RemoteException;
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/test/java/dunit/standalone/ChildVM.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/dunit/standalone/ChildVM.java b/gemfire-core/src/test/java/dunit/standalone/ChildVM.java
index 67b2710..45a236a 100644
--- a/gemfire-core/src/test/java/dunit/standalone/ChildVM.java
+++ b/gemfire-core/src/test/java/dunit/standalone/ChildVM.java
@@ -34,6 +34,15 @@ import dunit.standalone.DUnitLauncher.MasterRemote;
  */
 public class ChildVM {
   
+  private static boolean stopMainLoop = false;
+  
+  /**
+   * tells the main() loop to exit
+   */
+  public static void stopVM() {
+    stopMainLoop = true;
+  }
+  
   static {
     createHydraLogWriter();
   }
@@ -54,7 +63,7 @@ public class ChildVM {
       Naming.rebind("//localhost:" + namingPort + "/vm" + vmNum, dunitVM);
       holder.signalVMReady();
       //This loop is here so this VM will die even if the master is mean killed.
-      while(true) {
+      while (!stopMainLoop) {
         holder.ping();
         Thread.sleep(1000);
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/test/java/dunit/standalone/DUnitLauncher.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/dunit/standalone/DUnitLauncher.java b/gemfire-core/src/test/java/dunit/standalone/DUnitLauncher.java
index f3109f3..72c33d6 100644
--- a/gemfire-core/src/test/java/dunit/standalone/DUnitLauncher.java
+++ b/gemfire-core/src/test/java/dunit/standalone/DUnitLauncher.java
@@ -169,6 +169,30 @@ public class DUnitLauncher {
 
     Runtime.getRuntime().addShutdownHook(new Thread() {
       public void run() {
+//        System.out.println("shutting down DUnit JVMs");
+//        for (int i=0; i<NUM_VMS; i++) {
+//          try {
+//            processManager.getStub(i).shutDownVM();
+//          } catch (Exception e) {
+//            System.out.println("exception shutting down vm_"+i+": " + e);
+//          }
+//        }
+//        // TODO - hasLiveVMs always returns true
+//        System.out.print("waiting for JVMs to exit");
+//        long giveUp = System.currentTimeMillis() + 5000;
+//        while (giveUp > System.currentTimeMillis()) {
+//          if (!processManager.hasLiveVMs()) {
+//            return;
+//          }
+//          System.out.print(".");
+//          System.out.flush();
+//          try {
+//            Thread.sleep(1000);
+//          } catch (InterruptedException e) {
+//            break;
+//          }
+//        }
+//        System.out.println("\nkilling any remaining JVMs");
         processManager.killVMs();
       }
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/test/java/dunit/standalone/ProcessManager.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/dunit/standalone/ProcessManager.java b/gemfire-core/src/test/java/dunit/standalone/ProcessManager.java
index 60ac04d..7fc762f 100644
--- a/gemfire-core/src/test/java/dunit/standalone/ProcessManager.java
+++ b/gemfire-core/src/test/java/dunit/standalone/ProcessManager.java
@@ -98,12 +98,20 @@ public class ProcessManager {
   public synchronized void killVMs() {
     for(ProcessHolder process : processes.values()) {
       if(process != null) {
-        //TODO - stop it gracefully? Why bother
         process.kill();
       }
     }
   }
   
+  public synchronized boolean hasLiveVMs() {
+    for(ProcessHolder process : processes.values()) {
+      if(process != null && process.isAlive()) {
+        return true;
+      }
+    }
+    return false;
+  }
+  
   public synchronized void bounce(int vmNum) {
     if(!processes.containsKey(vmNum)) {
       throw new IllegalStateException("No such process " + vmNum);
@@ -240,6 +248,10 @@ public class ProcessManager {
     public boolean isKilled() {
       return killed;
     }
+    
+    public boolean isAlive() {
+      return !killed && process.isAlive();
+    }
   }
 
   public RemoteDUnitVMIF getStub(int i) throws AccessException, RemoteException, NotBoundException, InterruptedException {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/test/java/dunit/standalone/RemoteDUnitVM.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/dunit/standalone/RemoteDUnitVM.java b/gemfire-core/src/test/java/dunit/standalone/RemoteDUnitVM.java
index 15acc2e..742dc55 100644
--- a/gemfire-core/src/test/java/dunit/standalone/RemoteDUnitVM.java
+++ b/gemfire-core/src/test/java/dunit/standalone/RemoteDUnitVM.java
@@ -135,11 +135,10 @@ public class RemoteDUnitVM extends UnicastRemoteObject implements RemoteDUnitVMI
     
   }
 
-  public void shutDownVM(boolean disconnect, boolean runShutdownHook)
-      throws RemoteException {
+  public void shutDownVM() throws RemoteException {
+    ChildVM.stopVM();
   }
 
-  public void disconnectVM()
-  throws RemoteException {
+  public void disconnectVM() throws RemoteException {
   }
 }



[27/50] [abbrv] incubator-geode git commit: Merge branch 'feature/GEODE-53' into develop - This merge will update Apache Geode website - Removing unused images

Posted by kl...@apache.org.
Merge branch 'feature/GEODE-53' into develop
- This merge will update Apache Geode website
- Removing unused images


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/cd75b1f1
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/cd75b1f1
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/cd75b1f1

Branch: refs/heads/feature/GEODE-291
Commit: cd75b1f196573524cac16f8565a2f42937ef16c7
Parents: 68dfcab c32a5b2
Author: William Markito <wm...@pivotal.io>
Authored: Tue Dec 8 16:37:37 2015 -0800
Committer: William Markito <wm...@pivotal.io>
Committed: Tue Dec 8 16:58:16 2015 -0800

----------------------------------------------------------------------
 gemfire-site/build.gradle                       |   37 -
 .../content/bootstrap/bootstrap.min.css         |    9 +
 gemfire-site/content/community/index.html       |  629 +++++++
 .../content/css/bootflat-extensions.css         |  356 ++++
 gemfire-site/content/css/bootflat-square.css    |   69 +
 gemfire-site/content/css/bootflat.css           | 1559 ++++++++++++++++++
 gemfire-site/content/css/font-awesome.min.css   |  405 +++++
 gemfire-site/content/css/geode-site.css         | 1554 +++++++++++++++++
 gemfire-site/content/css/usergrid-site.css      | 1554 +++++++++++++++++
 gemfire-site/content/favicon.ico                |  Bin 0 -> 20805 bytes
 gemfire-site/content/font/FontAwesome.otf       |  Bin 0 -> 61896 bytes
 .../content/font/fontawesome-webfont-eot.eot    |  Bin 0 -> 37405 bytes
 .../content/font/fontawesome-webfont-svg.svg    |  399 +++++
 .../content/font/fontawesome-webfont-ttf.ttf    |  Bin 0 -> 79076 bytes
 .../content/font/fontawesome-webfont-woff.woff  |  Bin 0 -> 43572 bytes
 gemfire-site/content/img/apache_geode_logo.png  |  Bin 0 -> 23616 bytes
 .../content/img/apache_geode_logo_white.png     |  Bin 0 -> 22695 bytes
 .../img/apache_geode_logo_white_small.png       |  Bin 0 -> 52948 bytes
 gemfire-site/content/img/check_flat/default.png |  Bin 0 -> 25851 bytes
 gemfire-site/content/img/egg-logo.png           |  Bin 0 -> 9938 bytes
 gemfire-site/content/img/github.png             |  Bin 0 -> 8936 bytes
 gemfire-site/content/index.html                 |  295 ++++
 gemfire-site/content/js/bootstrap.min.js        |    8 +
 gemfire-site/content/js/head.js                 |  708 ++++++++
 gemfire-site/content/js/html5shiv.js            |    8 +
 gemfire-site/content/js/jquery-1.10.1.min.js    |    6 +
 gemfire-site/content/js/jquery.icheck.js        |  397 +++++
 gemfire-site/content/js/respond.min.js          |    6 +
 gemfire-site/content/js/usergrid-site.js        |   50 +
 gemfire-site/content/releases/index.html        |  239 +++
 gemfire-site/content/static/github-btn.html     |    2 +
 gemfire-site/src/jbake.zip                      |  Bin 207030 -> 0 bytes
 gemfire-site/src/jbake/assets/favicon.ico       |  Bin 1150 -> 0 bytes
 .../src/jbake/assets/images/bg-billboard.png    |  Bin 25538 -> 0 bytes
 .../jbake/assets/images/bg-crystals-home.png    |  Bin 41684 -> 0 bytes
 .../assets/images/bg-crystals-secondary.png     |  Bin 26046 -> 0 bytes
 .../src/jbake/assets/images/egg-logo1.png       |  Bin 8626 -> 0 bytes
 .../jbake/assets/images/events/apachecon.png    |  Bin 4528 -> 0 bytes
 .../src/jbake/assets/images/events/oscon.png    |  Bin 26024 -> 0 bytes
 .../src/jbake/assets/images/geode-banner.png    |  Bin 7916 -> 0 bytes
 .../assets/images/logo-apache-geode-white.png   |  Bin 2336 -> 0 bytes
 .../jbake/assets/images/logo-apache-geode.png   |  Bin 3200 -> 0 bytes
 .../jbake/assets/images/logo-geode-white.png    |  Bin 1620 -> 0 bytes
 .../src/jbake/assets/images/logo-geode.png      |  Bin 3345 -> 0 bytes
 .../src/jbake/assets/javascripts/master.js      |  121 --
 .../src/jbake/assets/javascripts/scale.fix.js   |   20 -
 .../jbake/assets/stylesheets/pygment_trac.css   |   60 -
 .../src/jbake/assets/stylesheets/styles.css     |  319 ----
 gemfire-site/src/jbake/content/404.md           |    9 -
 gemfire-site/src/jbake/content/README.md        |   36 -
 gemfire-site/src/jbake/content/about/index.md   |   31 -
 .../src/jbake/content/community/index.md        |   82 -
 .../src/jbake/content/contribute/index.md       |   47 -
 gemfire-site/src/jbake/content/docs/index.md    |   23 -
 .../src/jbake/content/download/index.md         |   13 -
 .../src/jbake/content/getting-started/index.md  |   88 -
 gemfire-site/src/jbake/content/index.md         |   76 -
 gemfire-site/src/jbake/jbake.properties         |    6 -
 gemfire-site/src/jbake/templates/page.groovy    |   80 -
 gemfire-site/website/.gitignore                 |    1 +
 gemfire-site/website/README.md                  |   54 +
 gemfire-site/website/Rules                      |   52 +
 gemfire-site/website/build.sh                   |    1 +
 .../website/content/bootstrap/bootstrap.min.css |    9 +
 .../website/content/community/index.html        |  286 ++++
 .../website/content/css/bootflat-extensions.css |  356 ++++
 .../website/content/css/bootflat-square.css     |   69 +
 gemfire-site/website/content/css/bootflat.css   | 1559 ++++++++++++++++++
 .../website/content/css/font-awesome.min.css    |  405 +++++
 gemfire-site/website/content/css/geode-site.css | 1554 +++++++++++++++++
 gemfire-site/website/content/favicon.ico        |  Bin 0 -> 20805 bytes
 .../website/content/font/FontAwesome.otf        |  Bin 0 -> 61896 bytes
 .../content/font/fontawesome-webfont-eot.eot    |  Bin 0 -> 37405 bytes
 .../content/font/fontawesome-webfont-svg.svg    |  399 +++++
 .../content/font/fontawesome-webfont-ttf.ttf    |  Bin 0 -> 79076 bytes
 .../content/font/fontawesome-webfont-woff.woff  |  Bin 0 -> 43572 bytes
 .../website/content/img/apache_geode_logo.png   |  Bin 0 -> 23616 bytes
 .../content/img/apache_geode_logo_white.png     |  Bin 0 -> 22695 bytes
 .../img/apache_geode_logo_white_small.png       |  Bin 0 -> 52948 bytes
 .../website/content/img/check_flat/default.png  |  Bin 0 -> 25851 bytes
 gemfire-site/website/content/img/egg-logo.png   |  Bin 0 -> 9938 bytes
 gemfire-site/website/content/img/github.png     |  Bin 0 -> 8936 bytes
 gemfire-site/website/content/img/intellij.png   |  Bin 0 -> 9199 bytes
 gemfire-site/website/content/img/yourkit.jpeg   |  Bin 0 -> 7763 bytes
 gemfire-site/website/content/index.html         |  124 ++
 .../website/content/js/bootstrap.min.js         |    8 +
 gemfire-site/website/content/js/head.js         |  708 ++++++++
 gemfire-site/website/content/js/html5shiv.js    |    8 +
 .../website/content/js/jquery-1.10.1.min.js     |    6 +
 .../website/content/js/jquery.icheck.js         |  397 +++++
 gemfire-site/website/content/js/respond.min.js  |    6 +
 .../website/content/js/usergrid-site.js         |   50 +
 .../website/content/releases/index.html         |   65 +
 gemfire-site/website/layouts/community.html     |    1 +
 gemfire-site/website/layouts/default.html       |   44 +
 gemfire-site/website/layouts/docs.html          |    1 +
 gemfire-site/website/layouts/footer.html        |   96 ++
 gemfire-site/website/layouts/header.html        |  231 +++
 gemfire-site/website/lib/default.rb             |   43 +
 gemfire-site/website/lib/helpers_.rb            |    0
 gemfire-site/website/lib/pandoc.template        |    4 +
 gemfire-site/website/nanoc.yaml                 |   77 +
 gemfire-site/website/run.sh                     |    1 +
 gemfire-site/website/utilities/map-markers.rb   |   58 +
 gemfire-site/website/utilities/markers.txt      |  440 +++++
 .../website/utilities/snapshot-apigee.rb        |   71 +
 gemfire-site/website/utilities/usergrid.csv     |  290 ++++
 107 files changed, 15727 insertions(+), 1048 deletions(-)
----------------------------------------------------------------------



[30/50] [abbrv] incubator-geode git commit: New unit tests & minor bugfixes

Posted by kl...@apache.org.
New unit tests & minor bugfixes

1) new unit tests
2) detection of IDs w/o UUIDs in them in UDP messaging
3) removal of JoinResponse messages - view messages are considered a join-response so we don't need to send both
4) detection of new members admitted by a different coordinator during becomeCoordinator processes (problem found by Jianxia)
5) adjusting the join-timeout if member-timeout is large


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/a5906e5b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/a5906e5b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/a5906e5b

Branch: refs/heads/feature/GEODE-291
Commit: a5906e5be3165b6afaa8424631a7a7f504c73f27
Parents: 6e32ffe
Author: Bruce Schuchardt <bs...@pivotal.io>
Authored: Thu Dec 10 09:14:51 2015 -0800
Committer: Bruce Schuchardt <bs...@pivotal.io>
Committed: Thu Dec 10 09:16:12 2015 -0800

----------------------------------------------------------------------
 .../internal/InternalDistributedSystem.java     |  19 ++-
 .../distributed/internal/ReplyProcessor21.java  |   2 +-
 .../internal/membership/NetView.java            |  19 +++
 .../internal/membership/gms/GMSMember.java      |   8 ++
 .../internal/membership/gms/ServiceConfig.java  |  21 ++-
 .../membership/gms/membership/GMSJoinLeave.java | 126 ++++++++++++------
 .../gms/mgr/GMSMembershipManager.java           |  18 ++-
 .../internal/DistributionManagerDUnitTest.java  |  43 +++++-
 .../membership/MembershipJUnitTest.java         | 130 +++++++------------
 .../internal/membership/NetViewJUnitTest.java   |  66 +++++++++-
 .../gms/fd/GMSHealthMonitorJUnitTest.java       |  54 ++++----
 .../gms/membership/GMSJoinLeaveJUnitTest.java   |  87 ++++++++++---
 .../gms/mgr/GMSMembershipManagerJUnitTest.java  |  74 +++++++++++
 .../internal/DataSerializableJUnitTest.java     |  35 ++---
 14 files changed, 511 insertions(+), 191 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a5906e5b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/InternalDistributedSystem.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/InternalDistributedSystem.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/InternalDistributedSystem.java
index 261b8a9..8f604b4 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/InternalDistributedSystem.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/InternalDistributedSystem.java
@@ -285,6 +285,19 @@ public final class InternalDistributedSystem
       }
     }
   }
+  
+  
+  /**
+   * creates a non-functional instance for testing
+   * @param nonDefault - non-default distributed system properties
+   */
+  public static InternalDistributedSystem newInstanceForTesting(DM dm, Properties nonDefault) {
+    InternalDistributedSystem sys = new InternalDistributedSystem(nonDefault);
+    sys.config = new RuntimeDistributionConfigImpl(sys);
+    sys.dm = dm;
+    sys.isConnected = true;
+    return sys;
+  }
 
   /**
    * Returns a connection to the distributed system that is suitable
@@ -529,10 +542,8 @@ public final class InternalDistributedSystem
       }
     }
 
-    if (this.isLoner) {
-      this.config = new RuntimeDistributionConfigImpl(this);
-    } else {
-      this.config = new RuntimeDistributionConfigImpl(this);
+    this.config = new RuntimeDistributionConfigImpl(this);
+    if (!this.isLoner) {
       this.attemptingToReconnect = (reconnectAttemptCounter > 0);
     }
     try {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a5906e5b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/ReplyProcessor21.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/ReplyProcessor21.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/ReplyProcessor21.java
index d12e78d..aa5f66c 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/ReplyProcessor21.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/ReplyProcessor21.java
@@ -757,7 +757,7 @@ public class ReplyProcessor21
       else {
         if (msecs > timeout) {
           if (!latch.await(timeout)) {
-            timeout(false, false);
+            timeout(isSevereAlertProcessingEnabled() && (severeAlertTimeout > 0), false);
             // after timeout alert, wait remaining time
             if (!latch.await(msecs-timeout)) {
               logger.info(LocalizedMessage.create(LocalizedStrings.ReplyProcessor21_WAIT_FOR_REPLIES_TIMING_OUT_AFTER_0_SEC, Long.valueOf(msecs / 1000)));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a5906e5b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/NetView.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/NetView.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/NetView.java
index a90a45d..6a68619 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/NetView.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/NetView.java
@@ -230,6 +230,10 @@ public class NetView implements DataSerializableFixedID {
     this.crashedMembers.addAll(mbr);
   }
 
+  public void addShutdownMembers(Set<InternalDistributedMember> mbr) {
+    this.shutdownMembers.addAll(mbr);
+  }
+
   public boolean remove(InternalDistributedMember mbr) {
     this.hashedMembers.remove(mbr);
     int idx = this.members.indexOf(mbr);
@@ -518,6 +522,21 @@ public class NetView implements DataSerializableFixedID {
     sb.append("]");
     return sb.toString();
   }
+  
+  /**
+   * Returns the ID from this view that is equal to the argument.
+   * If no such ID exists the argument is returned.
+   */
+  public synchronized InternalDistributedMember getCanonicalID(InternalDistributedMember id) {
+    if (hashedMembers.contains(id)) {
+      for (InternalDistributedMember m: this.members) {
+        if (id.equals(m)) {
+          return m;
+        }
+      }
+    }
+    return id;
+  }
 
   @Override
   public synchronized boolean equals(Object arg0) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a5906e5b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/GMSMember.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/GMSMember.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/GMSMember.java
index 05b3aee..b1a4883 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/GMSMember.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/GMSMember.java
@@ -374,6 +374,14 @@ public class GMSMember implements NetMember, DataSerializableFixedID {
   public void setPort(int p) {
     this.udpPort = p;
   }
+  
+  /**
+   * checks to see if this address has UUID information needed
+   * to send messages via JGroups
+   */
+  public boolean hasUUID() {
+    return !(this.uuidLSBs == 0 && this.uuidMSBs == 0);
+  }
 
   @Override
   public Version[] getSerializationVersions() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a5906e5b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/ServiceConfig.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/ServiceConfig.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/ServiceConfig.java
index 1d67bbf..a412dfa 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/ServiceConfig.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/ServiceConfig.java
@@ -20,12 +20,17 @@ import java.net.InetAddress;
 
 import com.gemstone.gemfire.distributed.Locator;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.distributed.internal.membership.gms.membership.GMSJoinLeave;
 import com.gemstone.gemfire.internal.SocketCreator;
 import com.gemstone.gemfire.internal.admin.remote.RemoteTransportConfig;
 
 public class ServiceConfig {
+
+  /** stall time to wait for concurrent join/leave/remove requests to be received */
+  public static final long MEMBER_REQUEST_COLLECTION_INTERVAL = Long.getLong("gemfire.member-request-collection-interval", 500);
+
   /** various settings from Geode configuration */
-  private int joinTimeout;
+  private long joinTimeout;
   private int[] membershipPortRange;
   private int udpRecvBufferSize;
   private int udpSendBufferSize;
@@ -47,7 +52,7 @@ public class ServiceConfig {
   }
 
 
-  public int getJoinTimeout() {
+  public long getJoinTimeout() {
     return joinTimeout;
   }
 
@@ -128,11 +133,19 @@ public class ServiceConfig {
     this.dconfig = theConfig;
     this.transport = transport;
     
-    int defaultJoinTimeout = 24000;
+    long defaultJoinTimeout = 24000;
     if (theConfig.getLocators().length() > 0 && !Locator.hasLocators()) {
       defaultJoinTimeout = 60000;
     }
-    joinTimeout = Integer.getInteger("p2p.joinTimeout", defaultJoinTimeout).intValue();
+    
+    // we need to have enough time to figure out that the coordinator has crashed &
+    // find a new one
+    long minimumJoinTimeout = dconfig.getMemberTimeout() * 2 + MEMBER_REQUEST_COLLECTION_INTERVAL;
+    if (defaultJoinTimeout < minimumJoinTimeout) {
+      defaultJoinTimeout = minimumJoinTimeout;
+    };
+    
+    joinTimeout = Long.getLong("p2p.joinTimeout", defaultJoinTimeout).longValue();
     
     // if network partition detection is enabled, we must connect to the locators
     // more frequently in order to make sure we're not isolated from them

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a5906e5b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeave.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeave.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeave.java
index 3e767ae..ccc9d8c 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeave.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeave.java
@@ -25,6 +25,7 @@ import static com.gemstone.gemfire.internal.DataSerializableFixedID.LEAVE_REQUES
 import static com.gemstone.gemfire.internal.DataSerializableFixedID.NETWORK_PARTITION_MESSAGE;
 import static com.gemstone.gemfire.internal.DataSerializableFixedID.REMOVE_MEMBER_REQUEST;
 import static com.gemstone.gemfire.internal.DataSerializableFixedID.VIEW_ACK_MESSAGE;
+import static com.gemstone.gemfire.distributed.internal.membership.gms.ServiceConfig.MEMBER_REQUEST_COLLECTION_INTERVAL;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -96,9 +97,6 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
   /** amount of time to sleep before trying to join after a failed attempt */
   private static final int JOIN_RETRY_SLEEP = Integer.getInteger("gemfire.join-retry-sleep", 1000);
 
-  /** stall time to wait for concurrent join/leave/remove requests to be received */
-  public static final long MEMBER_REQUEST_COLLECTION_INTERVAL = Long.getLong("gemfire.member-request-collection-interval", 500);
-
   /** time to wait for a leave request to be transmitted by jgroups */
   private static final long LEAVE_MESSAGE_SLEEP_TIME = Long.getLong("gemfire.leave-message-sleep-time", 1000);
 
@@ -664,7 +662,7 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
       }
       if (viewCreator == null || viewCreator.isShutdown()) {
         viewCreator = new ViewCreator("Geode Membership View Creator", Services.getThreadGroup());
-        viewCreator.setInitialView(newView, leaving, removals);
+        viewCreator.setInitialView(newView, newView.getNewMembers(), leaving, removals);
         viewCreator.setDaemon(true);
         viewCreator.start();
         startViewBroadcaster();
@@ -672,13 +670,6 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
     }
   }
 
-  private void sendJoinResponses(List<InternalDistributedMember> newMbrs, NetView newView) {
-    for (InternalDistributedMember mbr : newMbrs) {
-      JoinResponseMessage response = new JoinResponseMessage(mbr, newView);
-      services.getMessenger().send(response);
-    }
-  }
-
   private void sendRemoveMessages(List<InternalDistributedMember> removals, List<String> reasons, NetView newView) {
     Iterator<String> reason = reasons.iterator();
     for (InternalDistributedMember mbr : removals) {
@@ -747,12 +738,6 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
 
     // only wait for responses during preparation
     if (preparing) {
-      // send join responses after other members at least have
-      // a prepared view announcing the new member
-      if (!(isNetworkPartition && quorumRequired)) {
-        sendJoinResponses(newMembers, view);
-      }
-
       logger.debug("waiting for view responses");
 
       Set<InternalDistributedMember> failedToRespond = rp.waitForResponses();
@@ -1330,22 +1315,12 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
       stopCoordinatorServices();
       if (view != null) {
         if (view.size() > 1) {
-          if (this.isCoordinator) {
-            logger.debug("JoinLeave stopping coordination services");
-            NetView newView = new NetView(view, view.getViewId() + 1);
-            newView.remove(localAddress);
-            InstallViewMessage m = new InstallViewMessage(newView, services.getAuthenticator().getCredentials(this.localAddress));
-            m.setRecipients(newView.getMembers());
-            services.getMessenger().send(m);
-            waitForProcessing = true;
-          } else {
-            List<InternalDistributedMember> coords = view.getPreferredCoordinators(Collections.<InternalDistributedMember> emptySet(), localAddress, 5);
+          List<InternalDistributedMember> coords = view.getPreferredCoordinators(Collections.<InternalDistributedMember> emptySet(), localAddress, 5);
 
-            logger.debug("JoinLeave sending a leave request to {}", coords);
-            LeaveRequestMessage m = new LeaveRequestMessage(coords, this.localAddress, "this member is shutting down");
-            services.getMessenger().send(m);
-            waitForProcessing = true;
-          }
+          logger.debug("JoinLeave sending a leave request to {}", coords);
+          LeaveRequestMessage m = new LeaveRequestMessage(coords, this.localAddress, "this member is shutting down");
+          services.getMessenger().send(m);
+          waitForProcessing = true;
         } // view.size
       } // view != null
     }
@@ -1690,8 +1665,21 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
     boolean shutdown = false;
     volatile boolean waiting = false;
 
+    /**
+     * initial view to install.  guarded by synch on ViewCreator
+     */
     NetView initialView;
+    /**
+     * initial joining members.  guarded by synch on ViewCreator
+     */
+    List<InternalDistributedMember> initialJoins = Collections.<InternalDistributedMember>emptyList();
+    /**
+     * initial leaving members  guarded by synch on ViewCreator
+     */
     Set<InternalDistributedMember> initialLeaving;
+    /**
+     * initial crashed members.  guarded by synch on ViewCreator
+     */
     Set<InternalDistributedMember> initialRemovals;
 
     ViewCreator(String name, ThreadGroup tg) {
@@ -1723,22 +1711,72 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
      * @param leaving - members leaving in this view
      * @param removals - members crashed in this view
      */
-    void setInitialView(NetView newView, Set<InternalDistributedMember> leaving, Set<InternalDistributedMember> removals) {
+    synchronized void setInitialView(NetView newView, List<InternalDistributedMember> newMembers,
+        Set<InternalDistributedMember> leaving, Set<InternalDistributedMember> removals) {
       this.initialView = newView;
+      this.initialJoins = newMembers;
       this.initialLeaving = leaving;
       this.initialRemovals = removals;
     }
 
     private void sendInitialView() {
-      if (initialView != null) {
-        try {
-          prepareAndSendView(initialView, Collections.<InternalDistributedMember>emptyList(), initialLeaving,
-              initialRemovals);
-        } finally {
-          this.initialView = null;
-          this.initialLeaving = null;
-          this.initialRemovals = null;
+      if (initialView == null) {
+        return;
+      }
+      NetView v = preparedView;
+      if (v != null) {
+        processPreparedView(v);
+      }
+      try {
+        NetView iView;
+        List<InternalDistributedMember> iJoins;
+        Set<InternalDistributedMember> iLeaves;
+        Set<InternalDistributedMember> iRemoves;
+        synchronized(this) {
+          iView = initialView;
+          iJoins = initialJoins;
+          iLeaves = initialLeaving;
+          iRemoves = initialRemovals;
+        }
+        if (iView != null) {
+          prepareAndSendView(iView, iJoins, iLeaves, iRemoves);
+        }
+      } finally {
+        setInitialView(null, null, null, null);
+      }
+    }
+
+    /**
+     * During initial view processing a prepared view was discovered.
+     * This method will extract its new members and create a new
+     * initial view containing them.
+     * 
+     * @param v The prepared view
+     */
+    private void processPreparedView(NetView v) {
+      assert initialView != null;
+      if (currentView == null || currentView.getViewId() < v.getViewId()) {
+        // we have a prepared view that is newer than the current view
+        // form a new View ID
+        int viewId = Math.max(initialView.getViewId(),v.getViewId());
+        viewId += 1;
+        NetView newView = new NetView(initialView, viewId);
+
+        // add the new members from the prepared view to the new view,
+        // preserving their failure-detection ports
+        List<InternalDistributedMember> newMembers;
+        if (currentView != null) {
+          newMembers = v.getNewMembers(currentView);
+        } else {
+          newMembers = v.getMembers();
         }
+        for (InternalDistributedMember newMember: newMembers) {
+          newView.add(newMember);
+          newView.setFailureDetectionPort(newMember, v.getFailureDetectionPort(newMember));
+        }
+
+        // use the new view as the initial view
+        setInitialView(newView, newMembers, initialLeaving, initialRemovals);
       }
     }
 
@@ -2007,6 +2045,12 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
           logger.info("adding these crashed members from a conflicting view to the crash-set for the next view: {}\nconflicting view: {}", unresponsive,
               conflictingView);
           failures.addAll(conflictingView.getCrashedMembers());
+          // this member may have been kicked out of the conflicting view
+          if (failures.contains(localAddress)) {
+            forceDisconnect("I am no longer a member of the distributed system");
+            shutdown = true;
+            return;
+          }
           List<InternalDistributedMember> newMembers = conflictingView.getNewMembers();
           if (!newMembers.isEmpty()) {
             logger.info("adding these new members from a conflicting view to the new view: {}", newMembers);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a5906e5b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManager.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManager.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManager.java
index 4e108be..bbe7ab3 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManager.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManager.java
@@ -39,6 +39,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.logging.log4j.Logger;
 
+import com.gemstone.gemfire.CancelCriterion;
 import com.gemstone.gemfire.CancelException;
 import com.gemstone.gemfire.ForcedDisconnectException;
 import com.gemstone.gemfire.GemFireConfigException;
@@ -72,6 +73,7 @@ import com.gemstone.gemfire.distributed.internal.membership.MembershipManager;
 import com.gemstone.gemfire.distributed.internal.membership.MembershipTestHook;
 import com.gemstone.gemfire.distributed.internal.membership.NetView;
 import com.gemstone.gemfire.distributed.internal.membership.QuorumChecker;
+import com.gemstone.gemfire.distributed.internal.membership.gms.GMSMember;
 import com.gemstone.gemfire.distributed.internal.membership.gms.GMSUtil;
 import com.gemstone.gemfire.distributed.internal.membership.gms.Services;
 import com.gemstone.gemfire.distributed.internal.membership.gms.SuspectMember;
@@ -1429,7 +1431,6 @@ public class GMSMembershipManager implements MembershipManager, Manager
     }
   }
   
-  
   protected boolean isJoining() {
     return this.isJoining;
   }
@@ -1971,6 +1972,7 @@ public class GMSMembershipManager implements MembershipManager, Manager
     boolean sendViaMessenger = isForceUDPCommunications(); // enable when bug #46438 is fixed: || msg.sendViaUDP();
 
     if (useMcast || tcpDisabled || sendViaMessenger) {
+      checkAddressesForUUIDs(destinations);
       result = services.getMessenger().send(msg);
     }
     else {
@@ -1991,6 +1993,20 @@ public class GMSMembershipManager implements MembershipManager, Manager
     forceUseUDPMessaging.set(null);
   }
   
+  void checkAddressesForUUIDs(InternalDistributedMember[] addresses) {
+    for (int i=0; i<addresses.length; i++) {
+      GMSMember id = (GMSMember)addresses[i].getNetMember();
+      if (!id.hasUUID()) {
+        latestViewLock.readLock().lock();
+        try {
+          addresses[i] = latestView.getCanonicalID(addresses[i]);
+        } finally {
+          latestViewLock.readLock().unlock();
+        }
+      }
+    }
+  }
+  
   private boolean isForceUDPCommunications() {
     Boolean forced = forceUseUDPMessaging.get();
     return forced == Boolean.TRUE;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a5906e5b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerDUnitTest.java
index 51771cb..1f411bb 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerDUnitTest.java
@@ -19,9 +19,8 @@ package com.gemstone.gemfire.distributed.internal;
 import java.net.InetAddress;
 import java.util.Properties;
 
-import junit.framework.Assert;
-
 import org.apache.logging.log4j.Logger;
+import org.junit.Assert;
 
 import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.admin.AdminDistributedSystem;
@@ -43,7 +42,9 @@ import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
 import com.gemstone.gemfire.distributed.internal.membership.MembershipManager;
+import com.gemstone.gemfire.distributed.internal.membership.NetView;
 import com.gemstone.gemfire.distributed.internal.membership.gms.MembershipManagerHelper;
+import com.gemstone.gemfire.distributed.internal.membership.gms.interfaces.Manager;
 import com.gemstone.gemfire.distributed.internal.membership.gms.mgr.GMSMembershipManager;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.logging.LogService;
@@ -538,4 +539,42 @@ public class DistributionManagerDUnitTest extends DistributedTestCase {
     props.setProperty(DistributionConfig.BIND_ADDRESS_NAME, InetAddress.getLocalHost().getCanonicalHostName());
     getSystem().disconnect();
   }
+  
+  /**
+   * install a new view and show that waitForViewInstallation works as expected
+   */
+  public void testWaitForViewInstallation() {
+    getSystem(new Properties());
+    
+    MembershipManager mgr = system.getDM().getMembershipManager(); 
+
+    final NetView v = mgr.getView();
+    
+    final boolean[] passed = new boolean[1];
+    Thread t = new Thread("wait for view installation") {
+      public void run() {
+        try {
+          ((DistributionManager)system.getDM()).waitForViewInstallation(v.getViewId()+1);
+          synchronized(passed) {
+            passed[0] = true;
+          }
+        } catch (InterruptedException e) {
+          // failed
+        }
+      }
+    };
+    t.setDaemon(true);
+    t.start();
+    
+    pause(2000);
+
+    NetView newView = new NetView(v, v.getViewId()+1);
+    ((Manager)mgr).installView(newView);
+
+    pause(2000);
+    
+    synchronized(passed) {
+      Assert.assertTrue(passed[0]);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a5906e5b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/MembershipJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/MembershipJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/MembershipJUnitTest.java
index bee2367..7a4971f 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/MembershipJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/MembershipJUnitTest.java
@@ -16,28 +16,17 @@
  */
 package com.gemstone.gemfire.distributed.internal.membership;
 
-import static org.mockito.Mockito.isA;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.mockito.Matchers.isA;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 import java.io.File;
 import java.net.InetAddress;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
 import java.util.Properties;
-import java.util.Set;
-
-import junit.framework.TestCase;
 
 import org.apache.logging.log4j.Level;
 import org.junit.AfterClass;
@@ -47,12 +36,10 @@ import org.junit.experimental.categories.Category;
 
 import com.gemstone.gemfire.GemFireConfigException;
 import com.gemstone.gemfire.distributed.Locator;
-import com.gemstone.gemfire.distributed.internal.DM;
 import com.gemstone.gemfire.distributed.internal.DMStats;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.DistributionConfigImpl;
 import com.gemstone.gemfire.distributed.internal.DistributionManager;
-import com.gemstone.gemfire.distributed.internal.DistributionMessage;
 import com.gemstone.gemfire.distributed.internal.InternalLocator;
 import com.gemstone.gemfire.distributed.internal.SerialAckedMessage;
 import com.gemstone.gemfire.distributed.internal.membership.gms.GMSUtil;
@@ -90,70 +77,7 @@ public class MembershipJUnitTest {
 //    LogService.setBaseLogLevel(baseLogLevel);
   }
   
-  /**
-   * Test that failed weight calculations are correctly performed.  See bug #47342
-   * @throws Exception
-   */
-  public void testFailedWeight() throws Exception {
-    // in #47342 a new view was created that contained a member that was joining but
-    // was no longer reachable.  The member was included in the failed-weight and not
-    // in the previous view-weight, causing a spurious network partition to be declared
-    InternalDistributedMember members[] = new InternalDistributedMember[] {
-        new InternalDistributedMember("localhost", 1), new InternalDistributedMember("localhost", 2), new InternalDistributedMember("localhost", 3),
-        new InternalDistributedMember("localhost", 4), new InternalDistributedMember("localhost", 5), new InternalDistributedMember("localhost", 6)};
-    int i = 0;
-    // weight 3
-    members[i].setVmKind(DistributionManager.LOCATOR_DM_TYPE);
-    members[i++].getNetMember().setPreferredForCoordinator(true);
-    // weight 3
-    members[i].setVmKind(DistributionManager.LOCATOR_DM_TYPE);
-    members[i++].getNetMember().setPreferredForCoordinator(true);
-    // weight 15 (cache+leader)
-    members[i].setVmKind(DistributionManager.NORMAL_DM_TYPE);
-    members[i++].getNetMember().setPreferredForCoordinator(false);
-    // weight 0
-    members[i].setVmKind(DistributionManager.ADMIN_ONLY_DM_TYPE);
-    members[i++].getNetMember().setPreferredForCoordinator(false);
-    // weight 0
-    members[i].setVmKind(DistributionManager.ADMIN_ONLY_DM_TYPE);
-    members[i++].getNetMember().setPreferredForCoordinator(false);
-    // weight 10
-    members[i].setVmKind(DistributionManager.NORMAL_DM_TYPE);
-    members[i++].getNetMember().setPreferredForCoordinator(false);
-    
-    List<InternalDistributedMember> vmbrs = new ArrayList<>(members.length);
-    for (i=0; i<members.length; i++) {
-      vmbrs.add(members[i]);
-    }
-    Set<InternalDistributedMember> empty = Collections.emptySet();
-    NetView lastView = new NetView(members[0], 4, vmbrs, empty, empty);
-    InternalDistributedMember leader = members[2];
-    assertTrue(!leader.getNetMember().preferredForCoordinator());
-    
-    InternalDistributedMember joiningMember = new InternalDistributedMember("localhost", 7);
-    joiningMember.setVmKind(DistributionManager.NORMAL_DM_TYPE);
-    joiningMember.getNetMember().setPreferredForCoordinator(false);
-    
-    // have the joining member and another cache process (weight 10) in the failed members
-    // collection and check to make sure that the joining member is not included in failed
-    // weight calcs.
-    Set<InternalDistributedMember> failedMembers = new HashSet<>(3);
-    failedMembers.add(joiningMember);
-    failedMembers.add(members[members.length-1]); // cache
-    failedMembers.add(members[members.length-2]); // admin
-    List<InternalDistributedMember> newMbrs = new ArrayList<InternalDistributedMember>(lastView.getMembers());
-    newMbrs.removeAll(failedMembers);
-    NetView newView = new NetView(members[0], 5, newMbrs, empty, failedMembers);
-    
-    int failedWeight = newView.getCrashedMemberWeight(lastView);
-//    System.out.println("last view = " + lastView);
-//    System.out.println("failed mbrs = " + failedMembers);
-//    System.out.println("failed weight = " + failedWeight);
-    assertEquals("failure weight calculation is incorrect", 10, failedWeight);
-    Set<InternalDistributedMember> actual = newView.getActualCrashedMembers(lastView);
-    assertTrue(!actual.contains(members[members.length-2]));
-  }
-  
+ 
 //  @Test
 //  public void testRepeat() throws Exception {
 //    for (int i=0; i<50; i++) {
@@ -292,6 +216,48 @@ public class MembershipJUnitTest {
       }
     }
   }
+  
+  @Test
+  public void testJoinTimeoutSetting() throws Exception {
+    long timeout = 30000;
+    Properties nonDefault = new Properties();
+    nonDefault.put(DistributionConfig.MEMBER_TIMEOUT_NAME, ""+timeout);
+    DistributionConfigImpl config = new DistributionConfigImpl(nonDefault);
+    RemoteTransportConfig transport = new RemoteTransportConfig(config,
+        DistributionManager.NORMAL_DM_TYPE);
+    ServiceConfig sc = new ServiceConfig(transport, config);
+    assertEquals(2 * timeout + ServiceConfig.MEMBER_REQUEST_COLLECTION_INTERVAL, sc.getJoinTimeout());
+    
+    nonDefault.clear();
+    config = new DistributionConfigImpl(nonDefault);
+    transport = new RemoteTransportConfig(config,
+        DistributionManager.NORMAL_DM_TYPE);
+    sc = new ServiceConfig(transport, config);
+    assertEquals(24000, sc.getJoinTimeout());
+    
+
+    nonDefault.clear();
+    nonDefault.put(DistributionConfig.LOCATORS_NAME, SocketCreator.getLocalHost().getHostAddress()+"["+12345+"]");
+    config = new DistributionConfigImpl(nonDefault);
+    transport = new RemoteTransportConfig(config,
+        DistributionManager.NORMAL_DM_TYPE);
+    sc = new ServiceConfig(transport, config);
+    assertEquals(60000, sc.getJoinTimeout());
+    
+
+    timeout = 2000;
+    System.setProperty("p2p.joinTimeout", ""+timeout);
+    try {
+      config = new DistributionConfigImpl(nonDefault);
+      transport = new RemoteTransportConfig(config,
+          DistributionManager.NORMAL_DM_TYPE);
+      sc = new ServiceConfig(transport, config);
+      assertEquals(timeout, sc.getJoinTimeout());
+    } finally {
+      System.getProperties().remove("p2p.joinTimeout");
+    }
+    
+  }
 
   @Test
   public void testMulticastDiscoveryNotAllowed() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a5906e5b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/NetViewJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/NetViewJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/NetViewJUnitTest.java
index 603c7bf..9e39d0f 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/NetViewJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/NetViewJUnitTest.java
@@ -191,4 +191,68 @@ public class NetViewJUnitTest {
     assertEquals(100, view.getNewMembers(copy).size());
   }
   
-}
+  /**
+   * Test that failed weight calculations are correctly performed.  See bug #47342
+   * @throws Exception
+   */
+  @Test
+  public void testFailedWeight() throws Exception {
+    // in #47342 a new view was created that contained a member that was joining but
+    // was no longer reachable.  The member was included in the failed-weight and not
+    // in the previous view-weight, causing a spurious network partition to be declared
+    InternalDistributedMember members[] = new InternalDistributedMember[] {
+        new InternalDistributedMember("localhost", 1), new InternalDistributedMember("localhost", 2), new InternalDistributedMember("localhost", 3),
+        new InternalDistributedMember("localhost", 4), new InternalDistributedMember("localhost", 5), new InternalDistributedMember("localhost", 6)};
+    int i = 0;
+    // weight 3
+    members[i].setVmKind(DistributionManager.LOCATOR_DM_TYPE);
+    members[i++].getNetMember().setPreferredForCoordinator(true);
+    // weight 3
+    members[i].setVmKind(DistributionManager.LOCATOR_DM_TYPE);
+    members[i++].getNetMember().setPreferredForCoordinator(true);
+    // weight 15 (cache+leader)
+    members[i].setVmKind(DistributionManager.NORMAL_DM_TYPE);
+    members[i++].getNetMember().setPreferredForCoordinator(false);
+    // weight 0
+    members[i].setVmKind(DistributionManager.ADMIN_ONLY_DM_TYPE);
+    members[i++].getNetMember().setPreferredForCoordinator(false);
+    // weight 0
+    members[i].setVmKind(DistributionManager.ADMIN_ONLY_DM_TYPE);
+    members[i++].getNetMember().setPreferredForCoordinator(false);
+    // weight 10
+    members[i].setVmKind(DistributionManager.NORMAL_DM_TYPE);
+    members[i++].getNetMember().setPreferredForCoordinator(false);
+    
+    List<InternalDistributedMember> vmbrs = new ArrayList<>(members.length);
+    for (i=0; i<members.length; i++) {
+      vmbrs.add(members[i]);
+    }
+    Set<InternalDistributedMember> empty = Collections.emptySet();
+    NetView lastView = new NetView(members[0], 4, vmbrs, empty, empty);
+    InternalDistributedMember leader = members[2];
+    assertTrue(!leader.getNetMember().preferredForCoordinator());
+    
+    InternalDistributedMember joiningMember = new InternalDistributedMember("localhost", 7);
+    joiningMember.setVmKind(DistributionManager.NORMAL_DM_TYPE);
+    joiningMember.getNetMember().setPreferredForCoordinator(false);
+    
+    // have the joining member and another cache process (weight 10) in the failed members
+    // collection and check to make sure that the joining member is not included in failed
+    // weight calcs.
+    Set<InternalDistributedMember> failedMembers = new HashSet<>(3);
+    failedMembers.add(joiningMember);
+    failedMembers.add(members[members.length-1]); // cache
+    failedMembers.add(members[members.length-2]); // admin
+    List<InternalDistributedMember> newMbrs = new ArrayList<InternalDistributedMember>(lastView.getMembers());
+    newMbrs.removeAll(failedMembers);
+    NetView newView = new NetView(members[0], 5, newMbrs, empty, failedMembers);
+    
+    int failedWeight = newView.getCrashedMemberWeight(lastView);
+//    System.out.println("last view = " + lastView);
+//    System.out.println("failed mbrs = " + failedMembers);
+//    System.out.println("failed weight = " + failedWeight);
+    assertEquals("failure weight calculation is incorrect", 10, failedWeight);
+    Set<InternalDistributedMember> actual = newView.getActualCrashedMembers(lastView);
+    assertTrue(!actual.contains(members[members.length-2]));
+  }
+ }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a5906e5b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitorJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitorJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitorJUnitTest.java
index 585ff17..c4ac3a6 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitorJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitorJUnitTest.java
@@ -20,11 +20,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.isA;
-import static org.mockito.Mockito.atLeastOnce;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.*;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -46,8 +42,6 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
 
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.DistributionManager;
@@ -292,7 +286,6 @@ public class GMSHealthMonitorJUnitTest {
 
     NetView v = new NetView(mockMembers.get(0), 2, mockMembers, new HashSet<InternalDistributedMember>(), new HashSet<InternalDistributedMember>());
 
-    MethodExecuted messageSent = new MethodExecuted();
     // 3rd is current member
     when(messenger.getMemberID()).thenReturn(mockMembers.get(3));
 
@@ -300,15 +293,13 @@ public class GMSHealthMonitorJUnitTest {
 
     gmsHealthMonitor.suspect(mockMembers.get(1), "Not responding");
 
-    when(messenger.send(isA(SuspectMembersMessage.class))).thenAnswer(messageSent);
-
     try {
       // suspect thread timeout is 200 ms
       Thread.sleep(100l);
     } catch (InterruptedException e) {
     }
 
-    assertTrue("SuspectMembersMessage shouldn't have sent", !messageSent.isMethodExecuted());
+    verify(messenger, atLeastOnce()).send(isA(SuspectMembersMessage.class));
   }
 
   /***
@@ -544,6 +535,33 @@ public class GMSHealthMonitorJUnitTest {
     int byteReply = dis.read();
     Assert.assertEquals(expectedResult, byteReply);
   }
+  
+  @Test
+  public void testBeSickAndPlayDead() throws Exception {
+    NetView v = new NetView(mockMembers.get(0), 2, mockMembers, new HashSet<InternalDistributedMember>(), new HashSet<InternalDistributedMember>());
+    gmsHealthMonitor.installView(v);
+    gmsHealthMonitor.beSick();
+    
+    // a sick member will not respond to a heartbeat request
+    HeartbeatRequestMessage req = new HeartbeatRequestMessage(mockMembers.get(0), 10);
+    req.setSender(mockMembers.get(0));
+    gmsHealthMonitor.processMessage(req);
+    verify(messenger, never()).send(isA(HeartbeatMessage.class));
+    
+    // a sick member will not record a heartbeat from another member
+    HeartbeatMessage hb = new HeartbeatMessage(-1);
+    hb.setSender(mockMembers.get(0));
+    gmsHealthMonitor.processMessage(hb);
+    assertTrue(gmsHealthMonitor.memberTimeStamps.get(hb.getSender()) == null);
+    
+    // a sick member will not take action on a Suspect message from another member
+    SuspectMembersMessage smm = mock(SuspectMembersMessage.class);
+    Error err = new AssertionError("expected suspect message to be ignored");
+    when(smm.getMembers()).thenThrow(err);
+    when(smm.getSender()).thenThrow(err);
+    when(smm.getDSFID()).thenCallRealMethod();
+    gmsHealthMonitor.processMessage(smm);
+  }
 
   private GMSMember createGMSMember(short version, int viewId, long msb, long lsb) {
     GMSMember gmsMember = new GMSMember();
@@ -560,18 +578,4 @@ public class GMSHealthMonitorJUnitTest {
     return baos.toByteArray();
   }
 
-
-  private class MethodExecuted implements Answer {
-    private boolean methodExecuted = false;
-
-    public boolean isMethodExecuted() {
-      return methodExecuted;
-    }
-
-    @Override
-    public Object answer(InvocationOnMock invocation) throws Throwable {
-      methodExecuted = true;
-      return null;
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a5906e5b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeaveJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeaveJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeaveJUnitTest.java
index 9895f68..01c0695 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeaveJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeaveJUnitTest.java
@@ -16,6 +16,7 @@
  */
 package com.gemstone.gemfire.distributed.internal.membership.gms.membership;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Matchers.any;
@@ -43,12 +44,9 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.mockito.ArgumentCaptor;
 import org.mockito.internal.verification.Times;
-import org.mockito.internal.verification.api.VerificationData;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 import org.mockito.verification.Timeout;
-import org.mockito.verification.VerificationMode;
-import org.mockito.verification.VerificationWithTimeout;
 
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
@@ -63,6 +61,7 @@ import com.gemstone.gemfire.distributed.internal.membership.gms.interfaces.Manag
 import com.gemstone.gemfire.distributed.internal.membership.gms.interfaces.Messenger;
 import com.gemstone.gemfire.distributed.internal.membership.gms.locator.FindCoordinatorResponse;
 import com.gemstone.gemfire.distributed.internal.membership.gms.membership.GMSJoinLeave.SearchState;
+import com.gemstone.gemfire.distributed.internal.membership.gms.membership.GMSJoinLeave.ViewCreator;
 import com.gemstone.gemfire.distributed.internal.membership.gms.membership.GMSJoinLeave.ViewReplyProcessor;
 import com.gemstone.gemfire.distributed.internal.membership.gms.messages.InstallViewMessage;
 import com.gemstone.gemfire.distributed.internal.membership.gms.messages.JoinRequestMessage;
@@ -72,8 +71,6 @@ import com.gemstone.gemfire.distributed.internal.membership.gms.messages.Network
 import com.gemstone.gemfire.distributed.internal.membership.gms.messages.RemoveMemberMessage;
 import com.gemstone.gemfire.distributed.internal.membership.gms.messages.ViewAckMessage;
 import com.gemstone.gemfire.internal.Version;
-import com.gemstone.gemfire.internal.admin.remote.AddStatListenerResponse;
-import com.gemstone.gemfire.internal.admin.remote.StatListenerMessage;
 import com.gemstone.gemfire.security.AuthenticationFailedException;
 import com.gemstone.gemfire.test.junit.categories.UnitTest;
 
@@ -297,7 +294,7 @@ public class GMSJoinLeaveJUnitTest {
     MethodExecuted removeMessageSent = new MethodExecuted();
     when(messenger.send(any(RemoveMemberMessage.class))).thenAnswer(removeMessageSent);
     gmsJoinLeave.remove(mockMembers[0], "removing for test");
-    Thread.sleep(GMSJoinLeave.MEMBER_REQUEST_COLLECTION_INTERVAL*2);
+    Thread.sleep(ServiceConfig.MEMBER_REQUEST_COLLECTION_INTERVAL*2);
     assertTrue(removeMessageSent.methodExecuted);
   }
   
@@ -655,9 +652,6 @@ public class GMSJoinLeaveJUnitTest {
   public void testNetworkPartitionMessageReceived() throws Exception {
     initMocks();
     gmsJoinLeave.becomeCoordinatorForTest();
-    List<InternalDistributedMember> members = Arrays.asList(mockMembers);
-    Set<InternalDistributedMember> empty = Collections.<InternalDistributedMember>emptySet();
-    NetView v = new NetView(mockMembers[0], 2, members, empty, empty);
     NetworkPartitionMessage message = new NetworkPartitionMessage();
     gmsJoinLeave.processMessage(message);
     verify(manager).forceDisconnect(any(String.class));
@@ -805,7 +799,7 @@ public class GMSJoinLeaveJUnitTest {
     gmsJoinLeave.memberShutdown(mockMembers[2], "Shutdown");
     
     //Install a view that still contains one of the left members (as if something like a new member, triggered a new view before coordinator leaves)
-    NetView netView = new NetView(mockMembers[0], 3/*new view id*/, createMemberList(mockMembers[0], gmsJoinLeaveMemberId, mockMembers[1], mockMembers[3]), new HashSet(), new HashSet());
+    NetView netView = new NetView(mockMembers[0], 3/*new view id*/, createMemberList(mockMembers[0], gmsJoinLeaveMemberId, mockMembers[1], mockMembers[3]), new HashSet<InternalDistributedMember>(), new HashSet<InternalDistributedMember>());
     InstallViewMessage installViewMessage = new InstallViewMessage(netView, credentials, false);
     gmsJoinLeave.processMessage(installViewMessage);
     
@@ -866,11 +860,11 @@ public class GMSJoinLeaveJUnitTest {
     int viewRequests = gmsJoinLeave.getViewRequests().size();
     
     assertTrue( "There should be 1 viewRequest but found " + viewRequests, viewRequests == 1);
-    Thread.sleep(2 * GMSJoinLeave.MEMBER_REQUEST_COLLECTION_INTERVAL);
+    Thread.sleep(2 * ServiceConfig.MEMBER_REQUEST_COLLECTION_INTERVAL);
     
     viewRequests = gmsJoinLeave.getViewRequests().size();
-    assertTrue( "There should be 0 viewRequest but found " + viewRequests, viewRequests == 0);
-    }finally {
+    assertEquals( "Found view requests: " + gmsJoinLeave.getViewRequests(), 0, viewRequests);
+    } finally {
       System.getProperties().remove(GMSJoinLeave.BYPASS_DISCOVERY_PROPERTY);
     }
   }
@@ -946,7 +940,7 @@ public class GMSJoinLeaveJUnitTest {
     InternalDistributedMember ids = new InternalDistributedMember("localhost", 97898);
     ids.getNetMember().setPreferredForCoordinator(true);
     gmsJoinLeave.processMessage(reqMsg);
-    ArgumentCaptor<JoinResponseMessage> ac = new ArgumentCaptor<>();
+    ArgumentCaptor<JoinResponseMessage> ac = ArgumentCaptor.forClass(JoinResponseMessage.class);
     verify(messenger).send(ac.capture());
     
     assertTrue("Should have asked for becoming a coordinator", ac.getValue().getBecomeCoordinator());
@@ -964,12 +958,75 @@ public class GMSJoinLeaveJUnitTest {
         msg.setSender(gmsJoinLeaveMemberId);
         gmsJoinLeave.processMessage(msg);
       }
-      Timeout to = new Timeout(2 * GMSJoinLeave.MEMBER_REQUEST_COLLECTION_INTERVAL, new Times(1));
+      Timeout to = new Timeout(2 * ServiceConfig.MEMBER_REQUEST_COLLECTION_INTERVAL, new Times(1));
       verify(messenger, to).send( isA(NetworkPartitionMessage.class));
                  
     }finally {
       System.getProperties().remove(GMSJoinLeave.BYPASS_DISCOVERY_PROPERTY);
     }    
   }
+  
+  @Test
+  public void testViewIgnoredAfterShutdown() throws Exception {
+    try {
+      initMocks(true);
+      System.setProperty(GMSJoinLeave.BYPASS_DISCOVERY_PROPERTY, "true");
+      gmsJoinLeave.join();
+      installView(1, gmsJoinLeaveMemberId, createMemberList(mockMembers[0], mockMembers[1], mockMembers[2], gmsJoinLeaveMemberId, mockMembers[3]));
+      gmsJoinLeave.stop();
+      for(int i = 1; i < 4; i++) {
+        RemoveMemberMessage msg = new RemoveMemberMessage(gmsJoinLeaveMemberId, mockMembers[i], "crashed");
+        msg.setSender(gmsJoinLeaveMemberId);
+        gmsJoinLeave.processMessage(msg);
+      }
+      Timeout to = new Timeout(2 * ServiceConfig.MEMBER_REQUEST_COLLECTION_INTERVAL, never());
+      verify(messenger, to).send( isA(NetworkPartitionMessage.class));
+                 
+    }finally {
+      System.getProperties().remove(GMSJoinLeave.BYPASS_DISCOVERY_PROPERTY);
+    }    
+  }
+  
+  @Test
+  public void testPreparedViewFoundDuringBecomeCoordinator() throws Exception {
+    initMocks(false);
+    prepareAndInstallView(mockMembers[0], createMemberList(mockMembers[0], gmsJoinLeaveMemberId));
+    
+    // a new member is joining
+    NetView preparedView = new NetView(gmsJoinLeave.getView(), gmsJoinLeave.getView().getViewId()+5);
+    mockMembers[1].setVmViewId(preparedView.getViewId());
+    preparedView.add(mockMembers[1]);
+    
+    InstallViewMessage msg = new InstallViewMessage(preparedView, null, true);
+    gmsJoinLeave.processMessage(msg);
+    
+    gmsJoinLeave.becomeCoordinatorForTest();
+
+    Thread.sleep(2000);
+    ViewCreator vc = gmsJoinLeave.getViewCreator();
+    
+    ViewAckMessage vack = new ViewAckMessage(gmsJoinLeaveMemberId, gmsJoinLeave.getPreparedView().getViewId(), true);
+    vack.setSender(mockMembers[0]);
+    gmsJoinLeave.processMessage(vack);
+    vack = new ViewAckMessage(gmsJoinLeaveMemberId, gmsJoinLeave.getPreparedView().getViewId(), true);
+    vack.setSender(mockMembers[1]);
+    gmsJoinLeave.processMessage(vack);
+    vack = new ViewAckMessage(gmsJoinLeaveMemberId, gmsJoinLeave.getPreparedView().getViewId(), true);
+    vack.setSender(gmsJoinLeaveMemberId);
+    gmsJoinLeave.processMessage(vack);
+    
+    int tries = 0;
+    while (!vc.waiting) {
+      if (tries > 30) {
+        Assert.fail("view creator never finished");
+      }
+      tries++;
+      Thread.sleep(1000);
+    }
+    NetView newView = gmsJoinLeave.getView();
+    System.out.println("new view is " + newView);
+    assertTrue(newView.contains(mockMembers[1]));
+    assertTrue(newView.getViewId() > preparedView.getViewId());
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a5906e5b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManagerJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManagerJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManagerJUnitTest.java
index 2b59ca5..e133625 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManagerJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManagerJUnitTest.java
@@ -25,9 +25,11 @@ import java.util.Date;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Properties;
+import java.util.Random;
 import java.util.Set;
 import java.util.Timer;
 
+import org.jgroups.util.UUID;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -35,16 +37,22 @@ import org.junit.experimental.categories.Category;
 
 import com.gemstone.gemfire.distributed.DistributedSystemDisconnectedException;
 import com.gemstone.gemfire.distributed.internal.AdminMessageType;
+import com.gemstone.gemfire.distributed.internal.DM;
+import com.gemstone.gemfire.distributed.internal.DMStats;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.DistributionConfigImpl;
 import com.gemstone.gemfire.distributed.internal.DistributionManager;
 import com.gemstone.gemfire.distributed.internal.DistributionMessage;
 import com.gemstone.gemfire.distributed.internal.HighPriorityAckedMessage;
 import com.gemstone.gemfire.distributed.internal.HighPriorityDistributionMessage;
+import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
+import com.gemstone.gemfire.distributed.internal.MembershipListener;
+import com.gemstone.gemfire.distributed.internal.ReplyProcessor21;
 import com.gemstone.gemfire.distributed.internal.direct.DirectChannel;
 import com.gemstone.gemfire.distributed.internal.membership.DistributedMembershipListener;
 import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
 import com.gemstone.gemfire.distributed.internal.membership.NetView;
+import com.gemstone.gemfire.distributed.internal.membership.gms.GMSMember;
 import com.gemstone.gemfire.distributed.internal.membership.gms.ServiceConfig;
 import com.gemstone.gemfire.distributed.internal.membership.gms.Services;
 import com.gemstone.gemfire.distributed.internal.membership.gms.Services.Stopper;
@@ -72,6 +80,7 @@ public class GMSMembershipManagerJUnitTest {
   private Services services;
   private ServiceConfig mockConfig;
   private DistributionConfig distConfig;
+  private Properties distProperties;
   private Authenticator authenticator;
   private HealthMonitor healthMonitor;
   private InternalDistributedMember myMemberId;
@@ -88,6 +97,8 @@ public class GMSMembershipManagerJUnitTest {
   @Before
   public void initMocks() throws Exception {
     Properties nonDefault = new Properties();
+    nonDefault.put(DistributionConfig.ACK_WAIT_THRESHOLD_NAME, "1");
+    nonDefault.put(DistributionConfig.ACK_SEVERE_ALERT_THRESHOLD_NAME, "10");
     nonDefault.put(DistributionConfig.DISABLE_TCP_NAME, "true");
     nonDefault.put(DistributionConfig.MCAST_PORT_NAME, "0");
     nonDefault.put(DistributionConfig.MCAST_TTL_NAME, "0");
@@ -96,6 +107,7 @@ public class GMSMembershipManagerJUnitTest {
     nonDefault.put(DistributionConfig.MEMBER_TIMEOUT_NAME, "2000");
     nonDefault.put(DistributionConfig.LOCATORS_NAME, "localhost[10344]");
     distConfig = new DistributionConfigImpl(nonDefault);
+    distProperties = nonDefault;
     RemoteTransportConfig tconfig = new RemoteTransportConfig(distConfig,
         DistributionManager.NORMAL_DM_TYPE);
     
@@ -129,9 +141,13 @@ public class GMSMembershipManagerJUnitTest {
     Timer t = new Timer(true);
     when(services.getTimer()).thenReturn(t);
     
+    Random r = new Random();
     mockMembers = new InternalDistributedMember[5];
     for (int i = 0; i < mockMembers.length; i++) {
       mockMembers[i] = new InternalDistributedMember("localhost", 8888 + i);
+      GMSMember m = (GMSMember)mockMembers[i].getNetMember();
+      UUID uuid = new UUID(r.nextLong(), r.nextLong());
+      m.setUUID(uuid);
     }
     members = new ArrayList<>(Arrays.asList(mockMembers));
 
@@ -366,5 +382,63 @@ public class GMSMembershipManagerJUnitTest {
     verify(dc).send(isA(GMSMembershipManager.class), isA(mockMembers.getClass()), isA(DistributionMessage.class), anyInt(), anyInt());
   }
   
+  /**
+   * This test ensures that the membership manager can accept an ID that
+   * does not have a UUID and replace it with one that does have a UUID
+   * from the current membership view.
+   */
+  @Test
+  public void testAddressesWithoutUUIDs() throws Exception {
+    manager.start();
+    manager.started();
+    manager.isJoining = true;
+
+    List<InternalDistributedMember> viewmembers = Arrays.asList(new InternalDistributedMember[] {mockMembers[0], mockMembers[1], myMemberId});
+    manager.installView(new NetView(myMemberId, 2, viewmembers, emptyMembersSet, emptyMembersSet));
+    
+    InternalDistributedMember[] destinations = new InternalDistributedMember[viewmembers.size()];
+    for (int i=0; i<destinations.length; i++) {
+      InternalDistributedMember id = viewmembers.get(i);
+      destinations[i] = new InternalDistributedMember(id.getHost(), id.getPort());
+    }
+    manager.checkAddressesForUUIDs(destinations);
+    // each destination w/o a UUID should have been replaced with the corresponding
+    // ID from the membership view
+    for (int i=0; i<destinations.length; i++) {
+      assertTrue(viewmembers.get(i) == destinations[i]);
+    }
+  }
+  
+  @Test
+  public void testReplyProcessorInitiatesSuspicion() throws Exception {
+    DM dm = mock(DM.class);
+    DMStats stats = mock(DMStats.class);
+    
+    InternalDistributedSystem system = InternalDistributedSystem.newInstanceForTesting(dm, distProperties);
+
+    when(dm.getStats()).thenReturn(stats);
+    when(dm.getSystem()).thenReturn(system);
+    when(dm.getCancelCriterion()).thenReturn(stopper);
+    when(dm.getMembershipManager()).thenReturn(manager);
+    when(dm.getViewMembers()).thenReturn(members);
+    when(dm.getDistributionManagerIds()).thenReturn(new HashSet(members));
+    when(dm.addMembershipListenerAndGetDistributionManagerIds(any(MembershipListener.class))).thenReturn(new HashSet(members));
+    
+    manager.start();
+    manager.started();
+    manager.isJoining = true;
+
+    List<InternalDistributedMember> viewmembers = Arrays.asList(new InternalDistributedMember[] {mockMembers[0], mockMembers[1], myMemberId});
+    manager.installView(new NetView(myMemberId, 2, viewmembers, emptyMembersSet, emptyMembersSet));
+
+    List<InternalDistributedMember> mbrs = new ArrayList<>(1);
+    mbrs.add(mockMembers[0]);
+    ReplyProcessor21 rp = new ReplyProcessor21(dm, mbrs);
+    rp.enableSevereAlertProcessing();
+    boolean result = rp.waitForReplies(2000);
+    assertFalse(result);  // the wait should have timed out
+    verify(healthMonitor, atLeastOnce()).checkIfAvailable(isA(InternalDistributedMember.class), isA(String.class), isA(Boolean.class));
+  }
+  
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a5906e5b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/DataSerializableJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/DataSerializableJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/DataSerializableJUnitTest.java
index 91a3411..5f69393 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/DataSerializableJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/DataSerializableJUnitTest.java
@@ -3545,21 +3545,26 @@ public class DataSerializableJUnitTest extends TestCase
   }
 
   public void testObjectEnum() throws Exception {
-    System.getProperties().setProperty("DataSerializer.DEBUG", "true");
-    DAY_OF_WEEK e = DAY_OF_WEEK.SUN;
-    MONTH m = MONTH.FEB;
-    DataOutputStream out = getDataOutput();
-    DataSerializer.writeObject(e, out);
-    DataSerializer.writeObject(m, out);
-    out.flush();
-
-    DataInput in = getDataInput();
-    DAY_OF_WEEK e2 = (DAY_OF_WEEK)DataSerializer.readObject(in);
-    MONTH m2 = (MONTH)DataSerializer.readObject(in);
-    assertEquals(e, e2);
-    assertEquals(m, m2);
-    // Make sure there's nothing left in the stream
-    assertEquals(0, in.skipBytes(1));
+    final String propName = "DataSerializer.DEBUG";
+    System.setProperty(propName, "true");
+    try {
+      DAY_OF_WEEK e = DAY_OF_WEEK.SUN;
+      MONTH m = MONTH.FEB;
+      DataOutputStream out = getDataOutput();
+      DataSerializer.writeObject(e, out);
+      DataSerializer.writeObject(m, out);
+      out.flush();
+  
+      DataInput in = getDataInput();
+      DAY_OF_WEEK e2 = (DAY_OF_WEEK)DataSerializer.readObject(in);
+      MONTH m2 = (MONTH)DataSerializer.readObject(in);
+      assertEquals(e, e2);
+      assertEquals(m, m2);
+      // Make sure there's nothing left in the stream
+      assertEquals(0, in.skipBytes(1));
+    } finally {
+      System.getProperties().remove(propName);
+    }
   }
   
   /**


[09/50] [abbrv] incubator-geode git commit: GEODE-563: Moving gfsh tests from closed

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/UserCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/UserCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/UserCommandsDUnitTest.java
new file mode 100644
index 0000000..3a8811d
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/UserCommandsDUnitTest.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.internal.ClassBuilder;
+import com.gemstone.gemfire.internal.ClassPathLoader;
+import com.gemstone.gemfire.internal.FileUtil;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.internal.cli.CommandManager;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import dunit.Host;
+import dunit.SerializableRunnable;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Properties;
+
+/**
+ * Unit tests for configuring user commands.
+ *
+ * @author David Hoots
+ * @since 8.0
+ */
+public class UserCommandsDUnitTest extends CliCommandTestBase {
+  private static final long serialVersionUID = 1L;
+  final File jarDirectory = new File(
+      (new File(ClassPathLoader.class.getProtectionDomain().getCodeSource().getLocation().getPath())).getParent(),
+      "ext");
+  final File jarFile = new File(this.jarDirectory, "UserCommandsDUnit.jar");
+  boolean deleteJarDirectory = false;
+
+  public UserCommandsDUnitTest(String name) throws Exception {
+    super(name);
+  }
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    createUserCommandJarFile();
+  }
+
+  @Override
+  public void tearDown2() throws Exception {
+    super.tearDown2();
+    if (this.deleteJarDirectory) {
+      FileUtil.delete(this.jarDirectory);
+    } else {
+      FileUtil.delete(this.jarFile);
+    }
+
+    System.clearProperty(CommandManager.USER_CMD_PACKAGES_PROPERTY);
+    ClassPathLoader.setLatestToDefault();
+    CommandManager.clearInstance();
+
+    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
+      private static final long serialVersionUID = 1L;
+
+      @Override
+      public void run() {
+        System.clearProperty(CommandManager.USER_CMD_PACKAGES_PROPERTY);
+        ClassPathLoader.setLatestToDefault();
+        CommandManager.clearInstance();
+      }
+    });
+  }
+
+  public void createUserCommandJarFile() throws IOException {
+    this.deleteJarDirectory = this.jarDirectory.mkdir();
+
+    StringBuffer stringBuffer = new StringBuffer();
+
+    stringBuffer.append("package junit.ucdunit;");
+    stringBuffer.append("import org.springframework.shell.core.CommandMarker;");
+    stringBuffer.append("import org.springframework.shell.core.annotation.CliAvailabilityIndicator;");
+    stringBuffer.append("import org.springframework.shell.core.annotation.CliCommand;");
+    stringBuffer.append("import org.springframework.shell.core.annotation.CliOption;");
+    stringBuffer.append("import com.gemstone.gemfire.management.cli.Result;");
+    stringBuffer.append("import com.gemstone.gemfire.management.internal.cli.CliUtil;");
+    stringBuffer.append("import com.gemstone.gemfire.management.internal.cli.result.ResultBuilder;");
+    stringBuffer.append("import com.gemstone.gemfire.management.internal.cli.shell.Gfsh;");
+
+    stringBuffer.append("public final class UCDunitClass implements CommandMarker { public UCDunitClass() {}");
+    stringBuffer.append("@CliCommand(value = { \"ucdunitcmd\" }, help = \"ucdunitcmd help\")");
+    stringBuffer.append(
+        "public final Result ucdunitcmd(@CliOption(key = { \"name\" }, help = \"ucdunitcmd name help\") String name) {");
+    stringBuffer.append("return ResultBuilder.createInfoResult(\"ucdunitcmd \" + name); }");
+    stringBuffer.append("@CliAvailabilityIndicator({ \"ucdunitcmd\" })");
+    stringBuffer.append("public final boolean isAvailable() { return true; } }");
+
+    ClassBuilder classBuilder = new ClassBuilder();
+    final byte[] jarBytes = classBuilder.createJarFromClassContent("junit/ucdunit/UCDunitClass",
+        stringBuffer.toString());
+
+    final FileOutputStream outStream = new FileOutputStream(this.jarFile);
+    outStream.write(jarBytes);
+    outStream.close();
+  }
+
+  @Test
+  public void testCommandLineProperty() {
+    System.setProperty(CommandManager.USER_CMD_PACKAGES_PROPERTY, "junit.ucdunit");
+
+    ClassPathLoader.setLatestToDefault();
+    CommandManager.clearInstance();
+
+    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
+      private static final long serialVersionUID = 1L;
+
+      @Override
+      public void run() {
+        System.setProperty(CommandManager.USER_CMD_PACKAGES_PROPERTY, "junit.ucdunit");
+        ClassPathLoader.setLatestToDefault();
+        CommandManager.clearInstance();
+      }
+    });
+
+    createDefaultSetup(null);
+
+    CommandResult cmdResult = executeCommand("ucdunitcmd");
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+  }
+
+  @Test
+  public void testGemFireProperty() {
+    System.setProperty(CommandManager.USER_CMD_PACKAGES_PROPERTY, "junit.ucdunit");
+
+    ClassPathLoader.setLatestToDefault();
+    CommandManager.clearInstance();
+
+    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
+      private static final long serialVersionUID = 1L;
+
+      @Override
+      public void run() {
+        ClassPathLoader.setLatestToDefault();
+        CommandManager.clearInstance();
+      }
+    });
+
+    Properties properties = new Properties();
+    properties.setProperty(DistributionConfig.USER_COMMAND_PACKAGES, "junit.ucdunit");
+    createDefaultSetup(properties);
+
+    CommandResult cmdResult = executeCommand("ucdunitcmd");
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+  }
+}


[25/50] [abbrv] incubator-geode git commit: GEODE-623: add unit test for RefCountChangeInfo

Posted by kl...@apache.org.
GEODE-623: add unit test for RefCountChangeInfo


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/68dfcab1
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/68dfcab1
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/68dfcab1

Branch: refs/heads/feature/GEODE-291
Commit: 68dfcab10d68b1babb2035bc4c87c93acf52077c
Parents: 476c6cd
Author: Scott Jewell <sj...@pivotal.io>
Authored: Fri Dec 4 13:06:58 2015 -0800
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Tue Dec 8 15:45:57 2015 -0800

----------------------------------------------------------------------
 .../internal/offheap/RefCountChangeInfo.java    |  43 +++--
 .../offheap/RefCountChangeInfoJUnitTest.java    | 159 +++++++++++++++++++
 2 files changed, 185 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/68dfcab1/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/RefCountChangeInfo.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/RefCountChangeInfo.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/RefCountChangeInfo.java
index 56cab97..67688ed 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/RefCountChangeInfo.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/RefCountChangeInfo.java
@@ -68,20 +68,11 @@ public class RefCountChangeInfo extends Throwable {
       ps.print("@");
       ps.print(System.identityHashCode(this.owner));
     }
+    
     ps.println(": ");
-    StackTraceElement[] trace = getStackTrace();
-    // skip the initial elements from SimpleMemoryAllocatorImpl
-    int skip=0;
-    for (int i=0; i < trace.length; i++) {
-      if (!trace[i].getClassName().contains("SimpleMemoryAllocatorImpl")) {
-        skip = i;
-        break;
-      }
-    }
-    for (int i=skip; i < trace.length; i++) {
-      ps.println("\tat " + trace[i]);
-    }
+    cleanStackTrace(ps); 
     ps.flush();
+    
     return baos.toString();
   }
   
@@ -99,14 +90,32 @@ public class RefCountChangeInfo extends Throwable {
   }
 
   private String stackTraceString;
-  private String getStackTraceString() {
+  String getStackTraceString() {
     String result = this.stackTraceString;
     if (result == null) {
-      StringPrintWriter spr = new StringPrintWriter();
-      printStackTrace(spr);
-      result = spr.getBuilder().toString();
-      this.stackTraceString = result;
+	ByteArrayOutputStream baos = new ByteArrayOutputStream(64*1024);
+	PrintStream spr = new PrintStream(baos);
+
+	cleanStackTrace(spr);
+	result = baos.toString();
+	this.stackTraceString = result;
     }
     return result;
   }
+  
+  private void cleanStackTrace(PrintStream ps) {
+      StackTraceElement[] trace = getStackTrace();
+      // skip the initial elements from the offheap package
+      int skip=0;
+      for (int i=0; i < trace.length; i++) {
+	if (!trace[i].getClassName().contains("com.gemstone.gemfire.internal.offheap")) {
+          skip = i;
+          break;
+        }
+      }
+      for (int i=skip; i < trace.length; i++) {
+        ps.println("\tat " + trace[i]);
+      }   
+  }
+
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/68dfcab1/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/RefCountChangeInfoJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/RefCountChangeInfoJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/RefCountChangeInfoJUnitTest.java
new file mode 100644
index 0000000..fc726ce
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/RefCountChangeInfoJUnitTest.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.internal.offheap;
+
+import static org.junit.Assert.*;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.test.junit.categories.UnitTest;
+
+@Category(UnitTest.class)
+public class RefCountChangeInfoJUnitTest {
+
+  @Before
+  public void setUp() throws Exception {
+  }
+
+  @After
+  public void tearDown() throws Exception {
+  }
+
+  @Test
+  public void testGetOwner() {
+
+    String owner1 = new String("Info1");
+    String notOwner1 = new String("notInfo1");
+
+    RefCountChangeInfo refInfo1 = new RefCountChangeInfo(true, 1, owner1);
+
+    assertEquals(owner1, refInfo1.getOwner());
+
+    try {
+      assertEquals(owner1, notOwner1);
+      fail("Expected owner1 != notOwner1");
+    } catch (AssertionError e) {
+      // Ignore expected error
+    }
+
+  }
+
+  @Test
+  public void testGetDupCount() {
+
+    String owner1 = new String("Info1");
+    String owner2 = new String("Info2");
+
+    RefCountChangeInfo refInfo1 = new RefCountChangeInfo(true, 1, owner1);
+    assertEquals(0, refInfo1.getDupCount());
+
+    RefCountChangeInfo refInfo2 = new RefCountChangeInfo(true, 1, owner1);
+    assertTrue(refInfo1.isDuplicate(refInfo2));
+    assertEquals(1, refInfo1.getDupCount());
+
+    // owner not used in isDup
+    RefCountChangeInfo refInfo3 = new RefCountChangeInfo(true, 1, owner2);
+    assertTrue(refInfo1.isDuplicate(refInfo3));
+    assertEquals(2, refInfo1.getDupCount());
+
+    RefCountChangeInfo refInfo4 = new RefCountChangeInfo(false, 1, owner2);
+    assertFalse(refInfo1.isDuplicate(refInfo4));
+    assertEquals(2, refInfo1.getDupCount());
+
+  }
+
+  @Test
+  public void testDecDupCount() {
+
+    String owner1 = new String("Info1");
+    String owner2 = new String("Info2");
+
+    RefCountChangeInfo refInfo1 = new RefCountChangeInfo(true, 1, owner1);
+    assertEquals(0, refInfo1.getDupCount());
+
+    RefCountChangeInfo refInfo2 = new RefCountChangeInfo(true, 1, owner1);
+    assertTrue(refInfo1.isDuplicate(refInfo2));
+    assertEquals(1, refInfo1.getDupCount());
+
+    // owner not used in isDuplicate check
+    RefCountChangeInfo refInfo3 = new RefCountChangeInfo(true, 1, owner2);
+    assertTrue(refInfo1.isDuplicate(refInfo3));
+    assertEquals(2, refInfo1.getDupCount());
+
+    refInfo1.decDupCount();
+    assertEquals(1, refInfo1.getDupCount());
+
+    refInfo1.decDupCount();
+    assertEquals(0, refInfo1.getDupCount());
+
+  }
+
+  @Test
+  public void testToString() {
+
+    String owner1 = new String("Info1");
+
+    RefCountChangeInfo refInfo1 = new RefCountChangeInfo(true, 1, owner1);
+
+    RefCountChangeInfo refInfo2 = new RefCountChangeInfo(true, 1, owner1);
+    assertEquals(refInfo1.toString(), refInfo2.toString());
+
+    RefCountChangeInfo refInfo3 = new RefCountChangeInfo(false, 1, owner1);
+    try {
+      assertEquals(refInfo1.toString(), refInfo3.toString());
+      fail("expected refInfo1.toString() != refInfo3.toString()");
+    } catch (AssertionError e) {
+      // ignore expected IllegalArgumentException
+    }
+
+    RefCountChangeInfo refInfo4 = new RefCountChangeInfo(true, 2, owner1);
+    try {
+      assertEquals(refInfo1.toString(), refInfo4.toString());
+      fail("expected refInfo1.toString() != refInfo4.toString()");
+    } catch (AssertionError e) {
+      // ignore expected IllegalArgumentException
+    }
+
+  }
+
+  @Test
+  public void testIsDuplicate() {
+
+    String owner1 = new String("Info1");
+    String owner2 = new String("Info2");
+
+    RefCountChangeInfo refInfo1 = new RefCountChangeInfo(true, 1, owner1);
+    assertEquals(0, refInfo1.getDupCount());
+
+    RefCountChangeInfo refInfo2 = new RefCountChangeInfo(true, 1, owner1);
+    assertTrue(refInfo1.isDuplicate(refInfo2));
+    assertEquals(1, refInfo1.getDupCount());
+
+    RefCountChangeInfo refInfo3 = new RefCountChangeInfo(false, 1, owner1);
+    assertFalse(refInfo1.isDuplicate(refInfo3));
+    assertEquals(1, refInfo1.getDupCount());
+
+    RefCountChangeInfo refInfo4 = new RefCountChangeInfo(true, 1, owner2);
+    assertTrue(refInfo1.isDuplicate(refInfo4));
+    assertEquals(2, refInfo1.getDupCount());
+
+  }
+
+}


[37/50] [abbrv] incubator-geode git commit: GEODE-18: Added missing source headers

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gradle/dependency-versions.properties
----------------------------------------------------------------------
diff --git a/gradle/dependency-versions.properties b/gradle/dependency-versions.properties
index 3e6b6a5..d02110c 100644
--- a/gradle/dependency-versions.properties
+++ b/gradle/dependency-versions.properties
@@ -1,3 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 # Buildscript Dependencies
 gradle-maven-publish-auth.version = 2.0.1
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gradle/rat.gradle
----------------------------------------------------------------------
diff --git a/gradle/rat.gradle b/gradle/rat.gradle
index ab2da4c..361aba9 100644
--- a/gradle/rat.gradle
+++ b/gradle/rat.gradle
@@ -59,6 +59,9 @@ rat {
     'gemfire-spark-connector/project/build.properties',
     '**/log4j2*.xml',
     'gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/domain/CacheElementJUnitTest.xml',
+
+    // TODO - go through all the gemfire-site files!!
+    '**/gemfire-site/**',
  
     // ANTLR generated files
     'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/OQLLexer.java',

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/settings.gradle
----------------------------------------------------------------------
diff --git a/settings.gradle b/settings.gradle
index 50b7e40..c57c8df 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -1,3 +1,19 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 rootProject.name = 'gemfire'
 
 include 'gemfire-common'


[31/50] [abbrv] incubator-geode git commit: GEODE-608: Expand the exclude directives

Posted by kl...@apache.org.
GEODE-608: Expand the exclude directives

GEODE-18 has a RAT excludes file attached. This change adds the
excludes in that file to the gradle build.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/fddd33f3
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/fddd33f3
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/fddd33f3

Branch: refs/heads/feature/GEODE-291
Commit: fddd33f3cbced1369edd95caedd7f7b98ac509cc
Parents: f133ff1
Author: Anthony Baker <ab...@pivotal.io>
Authored: Sat Nov 28 11:33:42 2015 -0800
Committer: Anthony Baker <ab...@pivotal.io>
Committed: Thu Dec 10 09:44:56 2015 -0800

----------------------------------------------------------------------
 build.gradle | 136 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 134 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/fddd33f3/build.gradle
----------------------------------------------------------------------
diff --git a/build.gradle b/build.gradle
index 6f5c2a3..4d0216b 100755
--- a/build.gradle
+++ b/build.gradle
@@ -15,11 +15,22 @@ apply plugin: "org.nosphere.apache.rat"
 
 rat {
   excludes = [
+    // git
     '.git/**',
     '**/.gitignore',
+    
+    // gradle
     '**/.gradle/**',
-    '.gradle',
+    '**/build.gradle',
+    'gradlew',
+    'gradlew.bat',
+    'gradle.properties',
+    'settings.gradle',
+    'gradle/dependency-versions.properties',
+    'gradle/wrapper/gradle-wrapper.properties',
     '**/build/**',
+    
+    // IDE
     '**/.project',
     '**/.classpath',
     '**/.settings/**',
@@ -27,7 +38,128 @@ rat {
     '*.iml',
     '.idea/**',
 
-    '**/doc-files/*.fig'
+    // text files
+    '**/*.fig',
+    '**/*.txt',
+    '**/*.md',
+    '**/*.json',
+    '**/*.tx0',
+    '**/*.txo',
+    
+    // binary files
+    '**/*.cer',
+    '**/*.gfs',
+    '**/keystore',
+    '**/*.ser',
+    '**/*.xls',
+    
+    // other text files
+    'gemfire-assembly/src/main/dist/bin/gfsh',
+    'gemfire-assembly/src/main/dist/bin/gfsh-completion.bash',
+    'gemfire-assembly/src/main/dist/bin/gfsh.bat',
+    'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/fixantlr.sh',
+    'gemfire-spark-connector/project/plugins.sbt',
+    'gemfire-spark-connector/project/build.properties',
+    'gemfire-web-api/src/main/webapp/docs/o2c.html',
+    'gemfire-web-api/src/main/webapp/docs/index.html',
+    
+    // XML
+    'etc/eclipseFormatterProfile.xml',
+    'etc/intellijIdeaCodeStyle.xml',
+    'log4j2*.xml',
+    '**/example*cache.xml',
+    'gemfire-core/src/test/resources/**/*JUnitTest*.xml',
+    'gemfire-core/src/test/resources/**/IndexCreation.xml',
+    'gemfire-core/src/test/resources/**/PRIndexCreation.xml',
+    'gemfire-core/src/test/resources/**/PartitionRegionCacheExample*.xml',
+    'gemfire-core/src/test/resources/**/attributesUnordered.xml',
+    'gemfire-core/src/test/resources/**/bad*.xml',
+    'gemfire-core/src/test/resources/**/bug44710.xml',
+    'gemfire-core/src/test/resources/**/cachejta.xml',
+    'gemfire-core/src/test/resources/**/cachequeryindex*.xml',
+    'gemfire-core/src/test/resources/**/callback*.xml',
+    'gemfire-core/src/test/resources/**/coLocation*.xml',
+    'gemfire-core/src/test/resources/**/ewtest.xml',
+    'gemfire-core/src/test/resources/**/incorrect*.xml',
+    'gemfire-core/src/test/resources/**/index-creation-*.xml',
+    'gemfire-core/src/test/resources/**/index-recovery-overflow.xml',
+    'gemfire-core/src/test/resources/**/loaderNotLoader.xml',
+    'gemfire-core/src/test/resources/**/malformed.xml',
+    'gemfire-core/src/test/resources/**/mixed_diskstore_disk*.xml',
+    'gemfire-core/src/test/resources/**/namedAttributes.xml',
+    'gemfire-core/src/test/resources/**/partitioned*.xml',
+    'gemfire-core/src/test/resources/**/same*.xml',
+    'gemfire-core/src/test/resources/**/spring-gemfire-context.xml',
+    'gemfire-core/src/test/resources/**/test*.xml',
+    'gemfire-core/src/test/resources/**/unknownNamedAttributes.xml',
+    
+    // ANTLR generated files
+    'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/OQLLexer.java',
+    'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/OQLLexerTokenTypes.java',
+    'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/OQLParser.java',
+    'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/OQLLexerTokenTypes.txt',
+    'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/oql.g',
+    
+    // Service META-INF
+    '**/META-INF/services/org.xml.sax.ext.EntityResolver2',
+    '**/META-INF/services/com.gemstone.gemfire.internal.cache.CacheService',
+    '**/META-INF/services/com.gemstone.gemfire.internal.cache.xmlcache.XmlParser',
+    '**/META-INF/services/org.springframework.shell.core.CommandMarker',
+
+    // --- Other Licenses ---
+    
+    // Copied from other ASF projects 
+    'gemfire-core/src/main/resources/com/gemstone/gemfire/admin/jmx/internal/doc-files/mbeans-descriptors.dtd',
+    'gemfire-core/src/main/resources/com/gemstone/gemfire/admin/jmx/mbeans-descriptors.xml',
+
+    // Public Domain http://meyerweb.com/eric/tools/css/reset/
+    'gemfire-web-api/src/main/webapp/docs/css/reset.css',
+
+    // Public Domain - http://creativecommons.org/licenses/publicdomain
+    'SynchronousQueueNoSpin.java',
+
+    // JSON License - permissive, used for Good, not Evil
+    'gemfire-json/src/main/java/org/json/CDL.java',
+    'gemfire-json/src/main/java/org/json/Cookie.java',
+    'gemfire-json/src/main/java/org/json/CookieList.java',
+    'gemfire-json/src/main/java/org/json/CDL.java',
+    'gemfire-json/src/main/java/org/json/Cookie.java',
+    'gemfire-json/src/main/java/org/json/CookieList.java',
+    'gemfire-json/src/main/java/org/json/HTTP.java',
+    'gemfire-json/src/main/java/org/json/HTTPTokener.java',
+    'gemfire-json/src/main/java/org/json/JSONArray.java',
+    'gemfire-json/src/main/java/org/json/JSONException.java',
+    'gemfire-json/src/main/java/org/json/JSONML.java',
+    'gemfire-json/src/main/java/org/json/JSONObject.java',
+    'gemfire-json/src/main/java/org/json/JSONString.java',
+    'gemfire-json/src/main/java/org/json/JSONStringer.java',
+    'gemfire-json/src/main/java/org/json/JSONTokener.java',
+    'gemfire-json/src/main/java/org/json/JSONWriter.java',
+    'gemfire-json/src/main/java/org/json/XML.java',
+    'gemfire-json/src/main/java/org/json/XMLTokener.java',
+
+    // MIT License
+    'gemfire-web-api/src/main/webapp/docs/lib/backbone-min.js',
+    'gemfire-web-api/src/main/webapp/docs/lib/jquery-1.8.0.min.js',
+    'gemfire-web-api/src/main/webapp/docs/lib/jquery.ba-bbq.min.js',
+    'gemfire-web-api/src/main/webapp/docs/lib/jquery.slideto.min.js',
+    'gemfire-web-api/src/main/webapp/docs/lib/jquery.wiggle.min.js',
+    'gemfire-web-api/src/main/webapp/docs/lib/underscore-min.js',
+    'gemfire-site/src/jbake/**',
+
+    // MIT or ISC
+    'gemfire-web-api/src/main/webapp/docs/lib/shred.bundle.js',
+    'gemfire-web-api/src/main/webapp/docs/lib/shred/content.js',
+
+    // BSD License
+    'gemfire-web-api/src/main/webapp/docs/lib/highlight.7.3.pack.js',
+
+    // Apache License
+    'gemfire-web-api/src/main/webapp/docs/lib/swagger-oauth.js',
+    'gemfire-web-api/src/main/webapp/docs/lib/swagger.js',
+    'gemfire-web-api/src/main/webapp/docs/css/screen.css',
+    'gemfire-web-api/src/main/webapp/docs/swagger-ui.js',
+    'gemfire-web-api/src/main/webapp/docs/swagger-ui.min.js'
   ]
 }
 


[40/50] [abbrv] incubator-geode git commit: Add unit tests for GMSHealthMonitor tcp check

Posted by kl...@apache.org.
Add unit tests for GMSHealthMonitor tcp check


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/7cbb5db0
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/7cbb5db0
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/7cbb5db0

Branch: refs/heads/feature/GEODE-291
Commit: 7cbb5db091107267da87243259c09d9d047ffbbf
Parents: 35394ef
Author: Jason Huynh <hu...@gmail.com>
Authored: Thu Dec 10 10:02:24 2015 -0800
Committer: Jason Huynh <hu...@gmail.com>
Committed: Thu Dec 10 10:17:00 2015 -0800

----------------------------------------------------------------------
 .../membership/gms/fd/GMSHealthMonitor.java     | 34 ++++++++----
 .../gms/fd/GMSHealthMonitorJUnitTest.java       | 57 ++++++++++++++++++--
 2 files changed, 76 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7cbb5db0/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitor.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitor.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitor.java
index cc64f9b..7709114 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitor.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitor.java
@@ -472,6 +472,7 @@ public class GMSHealthMonitor implements HealthMonitor, MessageHandler {
     TimeStamp ts = memberTimeStamps.get(suspectMember);
     return (ts != null && (System.currentTimeMillis() - ts.getTime()) <= memberTimeout);
   }
+  
 
   /**
    * During final check, establish TCP connection between current member and suspect member.
@@ -486,6 +487,26 @@ public class GMSHealthMonitor implements HealthMonitor, MessageHandler {
       logger.debug("Checking member {} with TCP socket connection {}:{}.", suspectMember, suspectMember.getInetAddress(), port);
       clientSocket = SocketCreator.getDefaultInstance().connect(suspectMember.getInetAddress(), port,
           (int)memberTimeout, new ConnectTimeoutTask(services.getTimer(), memberTimeout), false, -1, false);
+      return doTCPCheckMember(suspectMember, clientSocket);
+    }
+    catch (IOException e) {
+      logger.debug("Unexpected exception", e);
+    } 
+    finally {
+      try {
+        if (clientSocket != null) {
+          clientSocket.close();
+        }
+      } catch (IOException e) {
+        logger.trace("Unexpected exception", e);
+      }
+    }
+    return false;
+  }
+
+  //Package protected for testing purposes
+  boolean doTCPCheckMember(InternalDistributedMember suspectMember, Socket clientSocket) {
+    try {
       if (clientSocket.isConnected()) {
         clientSocket.setSoTimeout((int) services.getConfig().getMemberTimeout());
         InputStream in = clientSocket.getInputStream();
@@ -513,17 +534,8 @@ public class GMSHealthMonitor implements HealthMonitor, MessageHandler {
       logger.debug("tcp/ip connection timed out");
       return false;
     } catch (IOException e) {
-      logger.debug("Unexpected exception", e);
-    } finally {
-      try {
-        if (clientSocket != null) {
-          clientSocket.close();
-        }
-      } catch (IOException e) {
-        logger.trace("Unexpected exception", e);
-      }
-    }
-
+      logger.trace("Unexpected exception", e);
+    } 
     return false;
   }
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7cbb5db0/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitorJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitorJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitorJUnitTest.java
index c4ac3a6..eb17ca8 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitorJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitorJUnitTest.java
@@ -79,7 +79,7 @@ public class GMSHealthMonitorJUnitTest {
 
   @Before
   public void initMocks() throws UnknownHostException {
-    System.setProperty("gemfire.bind-address", "localhost");
+    //System.setProperty("gemfire.bind-address", "localhost");
     mockDistConfig = mock(DistributionConfig.class);
     mockConfig = mock(ServiceConfig.class);
     messenger = mock(Messenger.class);
@@ -121,7 +121,7 @@ public class GMSHealthMonitorJUnitTest {
   @After
   public void tearDown() {
     gmsHealthMonitor.stop();
-    System.getProperties().remove("gemfire.bind-address");
+    //System.getProperties().remove("gemfire.bind-address");
   }
 
   @Test
@@ -562,12 +562,61 @@ public class GMSHealthMonitorJUnitTest {
     when(smm.getDSFID()).thenCallRealMethod();
     gmsHealthMonitor.processMessage(smm);
   }
-
-  private GMSMember createGMSMember(short version, int viewId, long msb, long lsb) {
+  
+  @Test
+  public void testDoTCPCheckMemberWithOkStatus() throws Exception {
+    executeTestDoTCPCheck(GMSHealthMonitor.OK, true);
+  }
+  
+  @Test
+  public void testDoTCPCheckMemberWithErrorStatus() throws Exception {
+    executeTestDoTCPCheck(GMSHealthMonitor.ERROR, false);
+  }
+  
+  @Test
+  public void testDoTCPCheckMemberWithUnkownStatus() throws Exception {
+    executeTestDoTCPCheck(GMSHealthMonitor.ERROR + 100, false);
+  }
+  
+  private void executeTestDoTCPCheck(int receivedStatus, boolean expectedResult) throws Exception {
+    InternalDistributedMember otherMember = createInternalDistributedMember(Version.CURRENT_ORDINAL, 0, 1, 1);
+    InternalDistributedMember gmsMember = createInternalDistributedMember(Version.CURRENT_ORDINAL, 0, 1, 1);
+    
+    //Set up the incoming/received bytes.  We just wrap output streams and write out the gms member information
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    baos.write(receivedStatus);
+    
+    byte[] receivedBytes = baos.toByteArray();
+    InputStream mockInputStream = new ByteArrayInputStream(receivedBytes);
+    
+    Socket fakeSocket = mock(Socket.class);
+    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
+    when(fakeSocket.getInputStream()).thenReturn(mockInputStream);
+    when(fakeSocket.getOutputStream()).thenReturn(outputStream);
+    when(fakeSocket.isConnected()).thenReturn(true);
+    
+    Assert.assertEquals(expectedResult, gmsHealthMonitor.doTCPCheckMember(otherMember, fakeSocket));
+    
+    //we can check to see if the gms member information was written out by the tcp check
+    byte[] bytesWritten = outputStream.toByteArray();
+    Assert.assertArrayEquals(writeMemberToBytes((GMSMember)gmsMember.getNetMember()), bytesWritten);
+  }
+  
+  private InternalDistributedMember createInternalDistributedMember(short version, int viewId, long msb, long lsb) throws UnknownHostException{
+    GMSMember gmsMember = createGMSMember(version, viewId, msb, lsb);
+    InternalDistributedMember idm = new InternalDistributedMember("localhost", 9000, Version.CURRENT, gmsMember);
+    //We set to our expected test viewId in the IDM as well as reseting the gms member
+    idm.setVmViewId(viewId);
+    gmsMember.setBirthViewId(viewId);
+    return idm;
+  }
+  
+  private GMSMember createGMSMember(short version, int viewId, long msb, long lsb) throws UnknownHostException{
     GMSMember gmsMember = new GMSMember();
     gmsMember.setVersionOrdinal(version);
     gmsMember.setBirthViewId(viewId);
     gmsMember.setUUID(new UUID(msb, lsb));
+    gmsMember.setInetAddr(InetAddress.getLocalHost());
     return gmsMember;
   }
   


[39/50] [abbrv] incubator-geode git commit: GEODE-18: Added missing source headers

Posted by kl...@apache.org.
GEODE-18: Added missing source headers


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/35394efd
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/35394efd
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/35394efd

Branch: refs/heads/feature/GEODE-291
Commit: 35394efd5221dff5c99dd7021ce1dfa0083ae828
Parents: c9f677b
Author: Anthony Baker <ab...@pivotal.io>
Authored: Wed Dec 9 14:59:29 2015 -0800
Committer: Anthony Baker <ab...@pivotal.io>
Committed: Thu Dec 10 09:44:57 2015 -0800

----------------------------------------------------------------------
 build.gradle                                    | 17 ++++++++++++++
 gemfire-assembly/build.gradle                   | 17 ++++++++++++++
 gemfire-assembly/src/main/dist/bin/gfsh         | 14 ++++++++++++
 .../src/main/dist/bin/gfsh-completion.bash      | 15 ++++++++++++
 gemfire-assembly/src/main/dist/bin/gfsh.bat     | 14 ++++++++++++
 .../SharedConfigurationEndToEndDUnitTest.java   | 21 ++++++++++++-----
 gemfire-common/build.gradle                     | 17 ++++++++++++++
 gemfire-core/build.gradle                       | 17 ++++++++++++++
 gemfire-core/src/jca/ra.xml                     | 17 +++++++++++++-
 .../client/doc-files/example-client-cache.xml   | 16 +++++++++++++
 .../gemfire/cache/doc-files/example-cache.xml   | 16 +++++++++++++
 .../gemfire/cache/doc-files/example2-cache.xml  | 17 ++++++++++++++
 .../gemfire/cache/doc-files/example3-cache.xml  | 16 +++++++++++++
 .../cache/query/internal/parse/fixantlr.sh      | 15 ++++++++++++
 .../gemfire/cache/query/internal/parse/oql.g    | 17 ++++++++++++++
 .../internal/direct/DirectChannelListener.java  | 16 +++++++++++++
 .../tools/gfsh/app/windowsbindings.properties   | 15 ++++++++++++
 .../internal/membership/NetViewJUnitTest.java   | 16 +++++++++++++
 .../membership/gms/GMSMemberJUnitTest.java      | 16 +++++++++++++
 .../InstantiatorPropagationDUnitTest.java       | 16 +++++++++++++
 .../cache/wan/AsyncEventQueueTestBase.java      | 20 +++++++++++-----
 .../asyncqueue/AsyncEventListenerDUnitTest.java | 21 ++++++++++++-----
 .../AsyncEventListenerOffHeapDUnitTest.java     | 16 +++++++++++++
 .../AsyncEventQueueStatsDUnitTest.java          | 21 ++++++++++++-----
 .../ConcurrentAsyncEventQueueDUnitTest.java     | 24 ++++++++++++--------
 ...ncurrentAsyncEventQueueOffHeapDUnitTest.java | 16 +++++++++++++
 .../CommonParallelAsyncEventQueueDUnitTest.java | 24 ++++++++++++--------
 ...ParallelAsyncEventQueueOffHeapDUnitTest.java | 16 +++++++++++++
 .../ClientCacheFactoryJUnitTest_single_pool.xml | 17 ++++++++++++++
 .../gemfire/cache/query/dunit/IndexCreation.xml | 17 ++++++++++++++
 .../functional/index-creation-with-eviction.xml | 17 ++++++++++++++
 .../index-creation-without-eviction.xml         | 17 ++++++++++++++
 .../functional/index-recovery-overflow.xml      | 17 ++++++++++++++
 .../query/internal/index/cachequeryindex.xml    | 17 ++++++++++++++
 .../internal/index/cachequeryindexwitherror.xml | 17 ++++++++++++++
 .../cache/query/partitioned/PRIndexCreation.xml | 17 ++++++++++++++
 .../gemfire/cache30/attributesUnordered.xml     | 17 ++++++++++++++
 .../com/gemstone/gemfire/cache30/badFloat.xml   | 17 ++++++++++++++
 .../com/gemstone/gemfire/cache30/badInt.xml     | 17 ++++++++++++++
 .../gemfire/cache30/badKeyConstraintClass.xml   | 17 ++++++++++++++
 .../com/gemstone/gemfire/cache30/badScope.xml   | 17 ++++++++++++++
 .../com/gemstone/gemfire/cache30/bug44710.xml   | 17 ++++++++++++++
 .../gemfire/cache30/callbackNotDeclarable.xml   | 17 ++++++++++++++
 .../gemfire/cache30/callbackWithException.xml   | 17 ++++++++++++++
 .../com/gemstone/gemfire/cache30/coLocation.xml | 17 ++++++++++++++
 .../gemstone/gemfire/cache30/coLocation3.xml    | 17 ++++++++++++++
 .../com/gemstone/gemfire/cache30/ewtest.xml     | 17 ++++++++++++++
 .../cache30/examples_3_0/example-cache.xml      | 16 +++++++++++++
 .../cache30/examples_4_0/example-cache.xml      | 16 +++++++++++++
 .../gemfire/cache30/loaderNotLoader.xml         | 17 ++++++++++++++
 .../com/gemstone/gemfire/cache30/malformed.xml  | 17 ++++++++++++++
 .../gemfire/cache30/namedAttributes.xml         | 17 ++++++++++++++
 .../gemfire/cache30/partitionedRegion.xml       | 17 ++++++++++++++
 .../gemfire/cache30/partitionedRegion51.xml     | 17 ++++++++++++++
 .../gemstone/gemfire/cache30/sameRootRegion.xml | 17 ++++++++++++++
 .../gemstone/gemfire/cache30/sameSubregion.xml  | 17 ++++++++++++++
 .../gemfire/cache30/unknownNamedAttributes.xml  | 17 ++++++++++++++
 .../internal/SharedConfigurationJUnitTest.xml   | 17 ++++++++++++++
 .../internal/cache/BackupJUnitTest.cache.xml    | 17 ++++++++++++++
 .../internal/cache/DiskRegCacheXmlJUnitTest.xml | 16 +++++++++++++
 .../cache/PartitionRegionCacheExample1.xml      | 17 ++++++++++++++
 .../cache/PartitionRegionCacheExample2.xml      | 17 ++++++++++++++
 .../incorrect_bytes_threshold.xml               | 17 ++++++++++++++
 .../faultyDiskXMLsForTesting/incorrect_dir.xml  | 17 ++++++++++++++
 .../incorrect_dir_size.xml                      | 17 ++++++++++++++
 .../incorrect_max_oplog_size.xml                | 17 ++++++++++++++
 .../incorrect_roll_oplogs_value.xml             | 17 ++++++++++++++
 .../incorrect_sync_value.xml                    | 17 ++++++++++++++
 .../incorrect_time_interval.xml                 | 17 ++++++++++++++
 .../mixed_diskstore_diskdir.xml                 | 17 ++++++++++++++
 .../mixed_diskstore_diskwriteattrs.xml          | 17 ++++++++++++++
 .../tier/sockets/RedundancyLevelJUnitTest.xml   | 16 +++++++++++++
 ...testDTDFallbackWithNonEnglishLocal.cache.xml | 17 ++++++++++++++
 .../gemstone/gemfire/internal/jta/cachejta.xml  | 17 ++++++++++++++
 ...dNewNodeJUnitTest.testAddNewNodeNewNamed.xml | 17 ++++++++++++++
 ...ewNodeJUnitTest.testAddNewNodeNewUnnamed.xml | 17 ++++++++++++++
 ...itTest.testAddNewNodeNewUnnamedExtension.xml | 17 ++++++++++++++
 ...NodeJUnitTest.testAddNewNodeReplaceNamed.xml | 17 ++++++++++++++
 ...deJUnitTest.testAddNewNodeReplaceUnnamed.xml | 17 ++++++++++++++
 ...st.testAddNewNodeReplaceUnnamedExtension.xml | 17 ++++++++++++++
 ...sAddNewNodeJUnitTest.testDeleteNodeNamed.xml | 17 ++++++++++++++
 ...ddNewNodeJUnitTest.testDeleteNodeUnnamed.xml | 17 ++++++++++++++
 ...JUnitTest.testDeleteNodeUnnamedExtension.xml | 17 ++++++++++++++
 .../utils/XmlUtilsAddNewNodeJUnitTest.xml       | 17 ++++++++++++++
 ...Test.testBuildSchemaLocationMapAttribute.xml | 17 ++++++++++++++
 ...testBuildSchemaLocationMapEmptyAttribute.xml | 17 ++++++++++++++
 ...ationMapMapOfStringListOfStringAttribute.xml | 17 ++++++++++++++
 ....testBuildSchemaLocationMapNullAttribute.xml | 17 ++++++++++++++
 ...XmlUtilsJUnitTest.testQuerySingleElement.xml | 17 ++++++++++++++
 .../src/test/resources/jta/cachejta.xml         | 17 ++++++++++++++
 .../resources/spring/spring-gemfire-context.xml | 17 ++++++++++++++
 gemfire-lucene/build.gradle                     | 17 ++++++++++++++
 ...erIntegrationJUnitTest.createIndex.cache.xml | 17 ++++++++++++++
 ...serIntegrationJUnitTest.parseIndex.cache.xml | 17 ++++++++++++++
 gemfire-rebalancer/build.gradle                 | 17 ++++++++++++++
 .../src/it/resources/test-regions.xml           | 17 ++++++++++++++
 .../src/it/resources/test-retrieve-regions.xml  | 17 ++++++++++++++
 gemfire-web-api/build.gradle                    | 17 ++++++++++++++
 gemfire-web/build.gradle                        | 17 ++++++++++++++
 gradle.properties                               | 14 ++++++++++++
 gradle/dependency-versions.properties           | 15 ++++++++++++
 gradle/rat.gradle                               |  3 +++
 settings.gradle                                 | 16 +++++++++++++
 103 files changed, 1691 insertions(+), 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/build.gradle
----------------------------------------------------------------------
diff --git a/build.gradle b/build.gradle
index 9042976..9c8ac44 100755
--- a/build.gradle
+++ b/build.gradle
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 buildscript {
   repositories {
     maven {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-assembly/build.gradle
----------------------------------------------------------------------
diff --git a/gemfire-assembly/build.gradle b/gemfire-assembly/build.gradle
index 514b4a1..373d77a 100755
--- a/gemfire-assembly/build.gradle
+++ b/gemfire-assembly/build.gradle
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 buildscript {
     repositories {
         mavenCentral()

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-assembly/src/main/dist/bin/gfsh
----------------------------------------------------------------------
diff --git a/gemfire-assembly/src/main/dist/bin/gfsh b/gemfire-assembly/src/main/dist/bin/gfsh
index bcb5a3d..d08e0d4 100755
--- a/gemfire-assembly/src/main/dist/bin/gfsh
+++ b/gemfire-assembly/src/main/dist/bin/gfsh
@@ -1,4 +1,18 @@
 #!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 
 #
 # Environment variables:

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-assembly/src/main/dist/bin/gfsh-completion.bash
----------------------------------------------------------------------
diff --git a/gemfire-assembly/src/main/dist/bin/gfsh-completion.bash b/gemfire-assembly/src/main/dist/bin/gfsh-completion.bash
index 6db7ef1..d81e5f3 100755
--- a/gemfire-assembly/src/main/dist/bin/gfsh-completion.bash
+++ b/gemfire-assembly/src/main/dist/bin/gfsh-completion.bash
@@ -1,4 +1,19 @@
 #!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 #
 # Auto completion script for GemFire's gfsh script
 #

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-assembly/src/main/dist/bin/gfsh.bat
----------------------------------------------------------------------
diff --git a/gemfire-assembly/src/main/dist/bin/gfsh.bat b/gemfire-assembly/src/main/dist/bin/gfsh.bat
index 10c2726..770e0b9 100755
--- a/gemfire-assembly/src/main/dist/bin/gfsh.bat
+++ b/gemfire-assembly/src/main/dist/bin/gfsh.bat
@@ -1,4 +1,18 @@
 @echo off
+REM Licensed to the Apache Software Foundation (ASF) under one or more
+REM contributor license agreements.  See the NOTICE file distributed with
+REM this work for additional information regarding copyright ownership.
+REM The ASF licenses this file to You under the Apache License, Version 2.0
+REM (the "License"); you may not use this file except in compliance with
+REM the License.  You may obtain a copy of the License at
+REM
+REM      http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
 
 REM
 REM Environment variables:

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/configuration/SharedConfigurationEndToEndDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/configuration/SharedConfigurationEndToEndDUnitTest.java b/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/configuration/SharedConfigurationEndToEndDUnitTest.java
index 383012e..26ada92 100644
--- a/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/configuration/SharedConfigurationEndToEndDUnitTest.java
+++ b/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/configuration/SharedConfigurationEndToEndDUnitTest.java
@@ -1,9 +1,18 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
 package com.gemstone.gemfire.management.internal.configuration;
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-common/build.gradle
----------------------------------------------------------------------
diff --git a/gemfire-common/build.gradle b/gemfire-common/build.gradle
index aa8adcb..e28ba44 100755
--- a/gemfire-common/build.gradle
+++ b/gemfire-common/build.gradle
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 dependencies {
   provided project(path: ':gemfire-junit', configuration: 'testOutput')
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/build.gradle
----------------------------------------------------------------------
diff --git a/gemfire-core/build.gradle b/gemfire-core/build.gradle
index dd3b765..19aeee1 100755
--- a/gemfire-core/build.gradle
+++ b/gemfire-core/build.gradle
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 sourceSets {
   jca {
     compileClasspath += configurations.compile

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/jca/ra.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/jca/ra.xml b/gemfire-core/src/jca/ra.xml
index 0638ed0..78b4579 100644
--- a/gemfire-core/src/jca/ra.xml
+++ b/gemfire-core/src/jca/ra.xml
@@ -1,5 +1,20 @@
-
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 
 <!DOCTYPE connector PUBLIC '-//Sun Microsystems, Inc.//DTD Connector 1.0//EN' 'http://java.sun.com/j2ee/dtds/connector_1_0.dtd'>
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/main/java/com/gemstone/gemfire/cache/client/doc-files/example-client-cache.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/client/doc-files/example-client-cache.xml b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/client/doc-files/example-client-cache.xml
index edff479..bddeb9f 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/client/doc-files/example-client-cache.xml
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/client/doc-files/example-client-cache.xml
@@ -1,4 +1,20 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 
 <!-- A sample client GemFire declarative caching XML File -->
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/main/java/com/gemstone/gemfire/cache/doc-files/example-cache.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/doc-files/example-cache.xml b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/doc-files/example-cache.xml
index 6f99a58..0061b5c 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/doc-files/example-cache.xml
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/doc-files/example-cache.xml
@@ -1,4 +1,20 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 
 <!-- A sample GemFire declarative caching XML File -->
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/main/java/com/gemstone/gemfire/cache/doc-files/example2-cache.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/doc-files/example2-cache.xml b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/doc-files/example2-cache.xml
index 79d98b0..b65c285 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/doc-files/example2-cache.xml
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/doc-files/example2-cache.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <cache
     xmlns="http://schema.pivotal.io/gemfire/cache"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/main/java/com/gemstone/gemfire/cache/doc-files/example3-cache.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/doc-files/example3-cache.xml b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/doc-files/example3-cache.xml
index 4d3a97a..803e72e 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/doc-files/example3-cache.xml
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/doc-files/example3-cache.xml
@@ -1,4 +1,20 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 
 <!-- A sample GemFire declarative caching XML File that demonstrates
      the "named region attributes" feature.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/fixantlr.sh
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/fixantlr.sh b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/fixantlr.sh
index b514b5f..533f5fa 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/fixantlr.sh
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/fixantlr.sh
@@ -1,3 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 set -e
 sed -e 's/(char)//' OQLLexer.java >x.java
 mv x.java OQLLexer.java

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/oql.g
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/oql.g b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/oql.g
index e18e480..caf0ec0 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/oql.g
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/oql.g
@@ -1,4 +1,21 @@
 /*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
 **  oql.g
 **
 ** Built with Antlr 2.7.4

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/direct/DirectChannelListener.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/direct/DirectChannelListener.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/direct/DirectChannelListener.java
index 28f481b..a97676d 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/direct/DirectChannelListener.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/direct/DirectChannelListener.java
@@ -1,3 +1,19 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package com.gemstone.gemfire.distributed.internal.direct;
 
 import com.gemstone.gemfire.distributed.internal.DistributionManager;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/main/resources/com/gemstone/gemfire/internal/tools/gfsh/app/windowsbindings.properties
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/resources/com/gemstone/gemfire/internal/tools/gfsh/app/windowsbindings.properties b/gemfire-core/src/main/resources/com/gemstone/gemfire/internal/tools/gfsh/app/windowsbindings.properties
index 1fc6136..4291d0b 100644
--- a/gemfire-core/src/main/resources/com/gemstone/gemfire/internal/tools/gfsh/app/windowsbindings.properties
+++ b/gemfire-core/src/main/resources/com/gemstone/gemfire/internal/tools/gfsh/app/windowsbindings.properties
@@ -1,3 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 # This is a file that comes with jline distro with key bindings for 'ESC' key 
 # commented. For associated bug see: 
 # http://sourceforge.net/tracker/index.php?func=detail&aid=1982039&group_id=64033&atid=506056

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/NetViewJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/NetViewJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/NetViewJUnitTest.java
index 9e39d0f..f21ac4c 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/NetViewJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/NetViewJUnitTest.java
@@ -30,6 +30,22 @@ import com.gemstone.gemfire.distributed.internal.membership.NetView;
 import com.gemstone.gemfire.internal.SocketCreator;
 import com.gemstone.gemfire.test.junit.categories.UnitTest;
 
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 @Category(UnitTest.class)
 public class NetViewJUnitTest {
   List<InternalDistributedMember> members;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/GMSMemberJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/GMSMemberJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/GMSMemberJUnitTest.java
index 0b75d3d..606ae1a 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/GMSMemberJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/GMSMemberJUnitTest.java
@@ -1,3 +1,19 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package com.gemstone.gemfire.distributed.internal.membership.gms;
 
 import static org.mockito.Mockito.mock;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InstantiatorPropagationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InstantiatorPropagationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InstantiatorPropagationDUnitTest.java
index 063112a..7c5cf53 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InstantiatorPropagationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InstantiatorPropagationDUnitTest.java
@@ -41,6 +41,22 @@ import dunit.DistributedTestCase;
 import dunit.Host;
 import dunit.VM;
 
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
   private static Cache cache = null;
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/AsyncEventQueueTestBase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/AsyncEventQueueTestBase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/AsyncEventQueueTestBase.java
index a800118..ec38649 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/AsyncEventQueueTestBase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/AsyncEventQueueTestBase.java
@@ -1,10 +1,18 @@
 /*
- * =========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved. This
- * product is protected by U.S. and international copyright and intellectual
- * property laws. Pivotal products are covered by one or more patents listed at
- * http://www.pivotal.io/patents.
- * =========================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
 package com.gemstone.gemfire.internal.cache.wan;
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
index 1eafbb0..e696248 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
@@ -1,9 +1,18 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
 package com.gemstone.gemfire.internal.cache.wan.asyncqueue;
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerOffHeapDUnitTest.java
index b050ef5..7feec9a 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerOffHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerOffHeapDUnitTest.java
@@ -1,3 +1,19 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package com.gemstone.gemfire.internal.cache.wan.asyncqueue;
 
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventQueueStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventQueueStatsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventQueueStatsDUnitTest.java
index cf4a184..30123a3 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventQueueStatsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventQueueStatsDUnitTest.java
@@ -1,9 +1,18 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
 package com.gemstone.gemfire.internal.cache.wan.asyncqueue;
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueDUnitTest.java
index 2fb7496..fdcc6f6 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueDUnitTest.java
@@ -1,12 +1,18 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-/**
- * 
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
 package com.gemstone.gemfire.internal.cache.wan.concurrent;
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueOffHeapDUnitTest.java
index 41eb22d..ac6472e 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueOffHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueOffHeapDUnitTest.java
@@ -1,3 +1,19 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package com.gemstone.gemfire.internal.cache.wan.concurrent;
 
 @SuppressWarnings("serial")

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueDUnitTest.java
index 425d1a6..5b9d3bd 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueDUnitTest.java
@@ -1,12 +1,18 @@
-/*=========================================================================
- * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
- * This product is protected by U.S. and international copyright
- * and intellectual property laws. Pivotal products are covered by
- * one or more patents listed at http://www.pivotal.io/patents.
- *=========================================================================
- */
-/**
- * 
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
 package com.gemstone.gemfire.internal.cache.wan.misc;
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueOffHeapDUnitTest.java
index 8ab77b9..bceae58 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueOffHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueOffHeapDUnitTest.java
@@ -1,3 +1,19 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package com.gemstone.gemfire.internal.cache.wan.misc;
 
 @SuppressWarnings("serial")

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/client/ClientCacheFactoryJUnitTest_single_pool.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/client/ClientCacheFactoryJUnitTest_single_pool.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/client/ClientCacheFactoryJUnitTest_single_pool.xml
index 0bef14f..92454c4 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/client/ClientCacheFactoryJUnitTest_single_pool.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/client/ClientCacheFactoryJUnitTest_single_pool.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!-- MultiuserSecurityClient.xml
      Configures a region as a client region in a Security cache. The 
      region's pool connects to a server listening on port 40404.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/dunit/IndexCreation.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/dunit/IndexCreation.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/dunit/IndexCreation.xml
index 894a762..aa46ce8 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/dunit/IndexCreation.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/dunit/IndexCreation.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
   "-//GemStone Systems, Inc.//GemFire Declarative Cache 7.0//EN" "http://www.gemstone.com/dtd/cache7_0.dtd">
   <cache>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/functional/index-creation-with-eviction.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/functional/index-creation-with-eviction.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/functional/index-creation-with-eviction.xml
index 4160b8d..861bb23 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/functional/index-creation-with-eviction.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/functional/index-creation-with-eviction.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
     "-//GemStone Systems, Inc.//GemFire Declarative Caching 6.6//EN"
     "http://www.gemstone.com/dtd/cache6_6.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/functional/index-creation-without-eviction.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/functional/index-creation-without-eviction.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/functional/index-creation-without-eviction.xml
index cc4edbe..f8bdc78 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/functional/index-creation-without-eviction.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/functional/index-creation-without-eviction.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
     "-//GemStone Systems, Inc.//GemFire Declarative Caching 6.6//EN"
     "http://www.gemstone.com/dtd/cache6_6.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/functional/index-recovery-overflow.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/functional/index-recovery-overflow.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/functional/index-recovery-overflow.xml
index 4476e9b..4ca5e2f 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/functional/index-recovery-overflow.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/functional/index-recovery-overflow.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
     "-//GemStone Systems, Inc.//GemFire Declarative Caching 6.6//EN"
     "http://www.gemstone.com/dtd/cache6_6.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/internal/index/cachequeryindex.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/internal/index/cachequeryindex.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/internal/index/cachequeryindex.xml
index 95067a9..4a97ba1 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/internal/index/cachequeryindex.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/internal/index/cachequeryindex.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC "-//GemStone Systems, Inc.//GemFire Declarative Caching 6.6//EN" "http://www.gemstone.com/dtd/cache6_6.dtd">
 <cache lock-lease="120" lock-timeout="60" search-timeout="300">
   <vm-root-region name="root">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/internal/index/cachequeryindexwitherror.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/internal/index/cachequeryindexwitherror.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/internal/index/cachequeryindexwitherror.xml
index c2f20fc..f39c330 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/internal/index/cachequeryindexwitherror.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/internal/index/cachequeryindexwitherror.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC "-//GemStone Systems, Inc.//GemFire Declarative Caching 6.6//EN" "http://www.gemstone.com/dtd/cache6_6.dtd">
 <cache lock-lease="120" lock-timeout="60" search-timeout="300">
   <vm-root-region name="root">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/partitioned/PRIndexCreation.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/partitioned/PRIndexCreation.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/partitioned/PRIndexCreation.xml
index 313158d..5d0ba46 100755
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/partitioned/PRIndexCreation.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache/query/partitioned/PRIndexCreation.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC "-//GemStone Systems, Inc.//GemFire Declarative Caching 5.1//EN" "http://www.gemstone.com/dtd/cache5_1.dtd">
 <cache lock-lease="120" lock-timeout="60" search-timeout="300" is-server="false" copy-on-read="false">
   <cache-transaction-manager/>

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/attributesUnordered.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/attributesUnordered.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/attributesUnordered.xml
index f3c39af..aa8c3f4 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/attributesUnordered.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/attributesUnordered.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
     "-//GemStone Systems, Inc.//GemFire Declarative Caching 4.0//EN"
     "http://www.gemstone.com/dtd/cache4_0.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/badFloat.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/badFloat.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/badFloat.xml
index c0f3957..883216c 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/badFloat.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/badFloat.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
     "-//GemStone Systems, Inc.//GemFire Declarative Caching 4.0//EN"
     "http://www.gemstone.com/dtd/cache4_0.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/badInt.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/badInt.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/badInt.xml
index 3544290..17f9aa1 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/badInt.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/badInt.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
     "-//GemStone Systems, Inc.//GemFire Declarative Caching 4.0//EN"
     "http://www.gemstone.com/dtd/cache4_0.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/badKeyConstraintClass.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/badKeyConstraintClass.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/badKeyConstraintClass.xml
index 91e2bc4..9aa8cb3 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/badKeyConstraintClass.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/badKeyConstraintClass.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
     "-//GemStone Systems, Inc.//GemFire Declarative Caching 4.0//EN"
     "http://www.gemstone.com/dtd/cache4_0.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/badScope.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/badScope.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/badScope.xml
index ec683e5..0744a72 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/badScope.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/badScope.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
     "-//GemStone Systems, Inc.//GemFire Declarative Caching 4.0//EN"
     "http://www.gemstone.com/dtd/cache4_0.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/bug44710.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/bug44710.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/bug44710.xml
index ed6ea67..fa94d5d 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/bug44710.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/bug44710.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC "-//GemStone Systems, Inc.//GemFire Declarative Cache 7.0//EN" "http://www.gemstone.com/dtd/cache7_0.dtd">
 <cache>
   <region name="r1">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/callbackNotDeclarable.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/callbackNotDeclarable.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/callbackNotDeclarable.xml
index 7ecaa3f..eccaf7c 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/callbackNotDeclarable.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/callbackNotDeclarable.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
     "-//GemStone Systems, Inc.//GemFire Declarative Caching 4.0//EN"
     "http://www.gemstone.com/dtd/cache4_0.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/callbackWithException.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/callbackWithException.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/callbackWithException.xml
index 1a41012..964a497 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/callbackWithException.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/callbackWithException.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC
     "-//GemStone Systems, Inc.//GemFire Declarative Caching 4.0//EN"
     "http://www.gemstone.com/dtd/cache4_0.dtd">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/coLocation.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/coLocation.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/coLocation.xml
index f57e6ad..6c23d5f 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/coLocation.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/coLocation.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC "-//GemStone Systems, Inc.//GemFire Declarative Cache 5.8//EN" "http://www.gemstone.com/dtd/cache5_8.dtd">
 <cache>
   <region name="Customer">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/coLocation3.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/coLocation3.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/coLocation3.xml
index ed42012..20fae74 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/coLocation3.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/coLocation3.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC "-//GemStone Systems, Inc.//GemFire Declarative Cache 7.0//EN" "http://www.gemstone.com/dtd/cache7_0.dtd">
 <cache>
   <region-attributes id="ORDER_ATTS" data-policy="partition">

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/ewtest.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/ewtest.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/ewtest.xml
index 7b53fae..1f40036 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/ewtest.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/ewtest.xml
@@ -1,4 +1,21 @@
 <?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
 <!DOCTYPE cache PUBLIC "-//GemStone Systems, Inc.//GemFire Declarative Caching 6.5//EN" "http://www.gemstone.com/dtd/cache6_5.dtd">
 <cache lock-lease="120" lock-timeout="60" search-timeout="300">
 	<!-- Disk store declarations -->

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/examples_3_0/example-cache.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/examples_3_0/example-cache.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/examples_3_0/example-cache.xml
index aabdc94..f7bdc8e 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/examples_3_0/example-cache.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/examples_3_0/example-cache.xml
@@ -1,4 +1,20 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 
 <!-- A sample GemFire declarative caching XML File -->
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/35394efd/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/examples_4_0/example-cache.xml
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/examples_4_0/example-cache.xml b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/examples_4_0/example-cache.xml
index 82cecf7..4b21e8f 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/examples_4_0/example-cache.xml
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/cache30/examples_4_0/example-cache.xml
@@ -1,4 +1,20 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 
 <!-- A sample GemFire declarative caching XML File -->
 



[49/50] [abbrv] incubator-geode git commit: added null check in checkAddressesForUUIDs method

Posted by kl...@apache.org.
added null check in checkAddressesForUUIDs method


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/949507ff
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/949507ff
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/949507ff

Branch: refs/heads/feature/GEODE-291
Commit: 949507ffad3b8a6237474f7ab302a118f06f6218
Parents: 7dfce7c
Author: Hitesh Khamesra <hi...@yahoo.com>
Authored: Thu Dec 10 13:58:23 2015 -0800
Committer: Hitesh Khamesra <hi...@yahoo.com>
Committed: Thu Dec 10 15:31:15 2015 -0800

----------------------------------------------------------------------
 .../membership/gms/mgr/GMSMembershipManager.java   | 17 ++++++++++-------
 1 file changed, 10 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/949507ff/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManager.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManager.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManager.java
index 93c14e2..e5835c4 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManager.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/mgr/GMSMembershipManager.java
@@ -1995,13 +1995,16 @@ public class GMSMembershipManager implements MembershipManager, Manager
   
   void checkAddressesForUUIDs(InternalDistributedMember[] addresses) {
     for (int i=0; i<addresses.length; i++) {
-      GMSMember id = (GMSMember)addresses[i].getNetMember();
-      if (!id.hasUUID()) {
-        latestViewLock.readLock().lock();
-        try {
-          addresses[i] = latestView.getCanonicalID(addresses[i]);
-        } finally {
-          latestViewLock.readLock().unlock();
+      InternalDistributedMember m = addresses[i];
+      if(m != null) {
+        GMSMember id = (GMSMember)m.getNetMember();
+        if (!id.hasUUID()) {
+          latestViewLock.readLock().lock();
+          try {
+            addresses[i] = latestView.getCanonicalID(addresses[i]);
+          } finally {
+            latestViewLock.readLock().unlock();
+          }
         }
       }
     }


[42/50] [abbrv] incubator-geode git commit: Revision 56b3757129fb20ab047d8c164fa965eff50099c4 closed #50

Posted by kl...@apache.org.
Revision 56b3757129fb20ab047d8c164fa965eff50099c4 closed #50


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/aa27c6a7
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/aa27c6a7
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/aa27c6a7

Branch: refs/heads/feature/GEODE-291
Commit: aa27c6a77734d417e4f369a32dd709978ecff63d
Parents: 56b3757
Author: Darrel Schneider <ds...@pivotal.io>
Authored: Thu Dec 10 11:28:24 2015 -0800
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Thu Dec 10 11:28:24 2015 -0800

----------------------------------------------------------------------

----------------------------------------------------------------------



[23/50] [abbrv] incubator-geode git commit: GEODE-637: Additional tests for AsyncEventQueues

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/476c6cd3/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
new file mode 100644
index 0000000..1eafbb0
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
@@ -0,0 +1,1911 @@
+/*=========================================================================
+ * Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
+ * This product is protected by U.S. and international copyright
+ * and intellectual property laws. Pivotal products are covered by
+ * one or more patents listed at http://www.pivotal.io/patents.
+ *=========================================================================
+ */
+package com.gemstone.gemfire.internal.cache.wan.asyncqueue;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+
+import org.junit.Ignore;
+
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueueFactory;
+import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueFactoryImpl;
+import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueImpl;
+import com.gemstone.gemfire.cache.wan.GatewaySender.OrderPolicy;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
+import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.internal.cache.wan.AsyncEventQueueTestBase;
+
+public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
+
+  private static final long serialVersionUID = 1L;
+
+  public AsyncEventListenerDUnitTest(String name) {
+    super(name);
+  }
+
+  public void setUp() throws Exception {
+    super.setUp();
+  }
+
+  /**
+   * Test to verify that AsyncEventQueue can not be created when null listener
+   * is passed.
+   */
+  public void testCreateAsyncEventQueueWithNullListener() {
+    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(testName);
+    Properties props = new Properties();
+    props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    InternalDistributedSystem ds = test.getSystem(props);
+    cache = CacheFactory.create(ds);
+
+    AsyncEventQueueFactory asyncQueueFactory = cache
+        .createAsyncEventQueueFactory();
+    try {
+      asyncQueueFactory.create("testId", null);
+      fail("AsyncQueueFactory should not allow to create AsyncEventQueue with null listener");
+    }
+    catch (IllegalArgumentException e) {
+      // expected
+    }
+
+  }
+
+  public void testSerialAsyncEventQueueAttributes() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 150, true, true, "testDS", true });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventQueueAttributes",
+        new Object[] { "ln", 100, 150, AsyncEventQueueFactoryImpl.DEFAULT_BATCH_TIME_INTERVAL, true, "testDS", true, true });
+  }
+  
+  public void testSerialAsyncEventQueueSize() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+
+    vm4
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm5
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm6
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm7
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    pause(1000);// pause at least for the batchTimeInterval
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+        1000 });
+
+    int vm4size = (Integer)vm4.invoke(AsyncEventQueueTestBase.class,
+        "getAsyncEventQueueSize", new Object[] { "ln" });
+    int vm5size = (Integer)vm5.invoke(AsyncEventQueueTestBase.class,
+        "getAsyncEventQueueSize", new Object[] { "ln" });
+    assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm4size);
+    assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm5size);
+  }
+  
+  /**
+   * Added to reproduce defect #50366: 
+   * NullPointerException with AsyncEventQueue#size() when number of dispatchers is more than 1
+   */
+  public void testConcurrentSerialAsyncEventQueueSize() {
+	Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+		"createFirstLocatorWithDSId", new Object[] { 1 });
+
+	vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+	vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+	vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+	vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 150, true, false, null, false, 2, OrderPolicy.KEY });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 150, true, false, null, false, 2, OrderPolicy.KEY });
+
+	vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+		new Object[] { testName + "_RR", "ln", isOffHeap() });
+	vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+		new Object[] { testName + "_RR", "ln", isOffHeap() });
+	vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+	    new Object[] { testName + "_RR", "ln", isOffHeap() });
+	vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+		new Object[] { testName + "_RR", "ln", isOffHeap() });
+
+	vm4
+	  .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+		new Object[] { "ln" });
+	vm5
+	  .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+		new Object[] { "ln" });
+
+	pause(1000);// pause at least for the batchTimeInterval
+
+	vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+		1000 });
+
+	int vm4size = (Integer)vm4.invoke(AsyncEventQueueTestBase.class,
+		"getAsyncEventQueueSize", new Object[] { "ln" });
+	int vm5size = (Integer)vm5.invoke(AsyncEventQueueTestBase.class,
+		"getAsyncEventQueueSize", new Object[] { "ln" });
+	assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm4size);
+	assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm5size);
+  }
+  
+  /**
+   * Test configuration::
+   * 
+   * Region: Replicated WAN: Serial Region persistence enabled: false Async
+   * channel persistence enabled: false
+   */
+
+  public void testReplicatedSerialAsyncEventQueue() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+        1000 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 1000 });// primary sender
+    vm5.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+    vm6.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+    vm7.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+  }
+  
+  /**
+   * Verify that the events loaded by CacheLoader reach the AsyncEventListener
+   * with correct operation detail (added for defect #50237).
+   */
+  public void testReplicatedSerialAsyncEventQueueWithCacheLoader() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false, "MyAsyncEventListener_CacheLoader" });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false, "MyAsyncEventListener_CacheLoader" });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false, "MyAsyncEventListener_CacheLoader" });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false, "MyAsyncEventListener_CacheLoader" });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithCacheLoaderAndAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln" });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithCacheLoaderAndAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln" });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithCacheLoaderAndAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln" });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithCacheLoaderAndAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln" });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doGets", new Object[] { testName + "_RR",
+        10 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventForOperationDetail",
+        new Object[] { "ln", 10, true, false });// primary sender
+    vm5.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventForOperationDetail",
+        new Object[] { "ln", 0, true, false });// secondary
+    vm6.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventForOperationDetail",
+        new Object[] { "ln", 0, true, false });// secondary
+    vm7.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventForOperationDetail",
+        new Object[] { "ln", 0, true, false });// secondary
+  }
+  
+  /**
+   * Test configuration::
+   * 
+   * Region: Replicated 
+   * WAN: Serial 
+   * Region persistence enabled: false 
+   * Async queue persistence enabled: false
+   * 
+   * Error is thrown from AsyncEventListener implementation while processing the batch.
+   * Added to test the fix done for defect #45152.
+   */
+
+  public void testReplicatedSerialAsyncEventQueue_ExceptionScenario() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueueWithCustomListener", new Object[] { "ln",
+        false, 100, 100, false, false, null, false, 1 });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueueWithCustomListener", new Object[] { "ln",
+        false, 100, 100, false, false, null, false, 1 });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueueWithCustomListener", new Object[] { "ln",
+        false, 100, 100, false, false, null, false, 1 });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueueWithCustomListener", new Object[] { "ln",
+        false, 100, 100, false, false, null, false, 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    
+    vm4
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm5
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm6
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm7
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    pause(2000);// pause at least for the batchTimeInterval
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+        100 });
+    
+    vm4.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+    vm5.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+    vm6.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+    vm7.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateCustomAsyncEventListener",
+        new Object[] { "ln", 100 });// primary sender
+    vm5.invoke(AsyncEventQueueTestBase.class, "validateCustomAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+    vm6.invoke(AsyncEventQueueTestBase.class, "validateCustomAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+    vm7.invoke(AsyncEventQueueTestBase.class, "validateCustomAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+  }
+
+  /**
+   * Test configuration::
+   * 
+   * Region: Replicated WAN: Serial Region persistence enabled: false Async
+   * channel persistence enabled: false AsyncEventQueue conflation enabled: true
+   */
+  public void testReplicatedSerialAsyncEventQueueWithConflationEnabled() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+
+    vm4
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm5
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm6
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm7
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    pause(1000);// pause at least for the batchTimeInterval
+
+    final Map keyValues = new HashMap();
+    final Map updateKeyValues = new HashMap();
+    for (int i = 0; i < 1000; i++) {
+      keyValues.put(i, i);
+    }
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
+        testName + "_RR", keyValues });
+
+    pause(1000);
+    vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] {
+        "ln", keyValues.size() });
+
+    for (int i = 0; i < 500; i++) {
+      updateKeyValues.put(i, i + "_updated");
+    }
+
+    // Put the update events and check the queue size.
+    // There should be no conflation with the previous create events.
+    vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
+        testName + "_RR", updateKeyValues });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] {
+        "ln", keyValues.size() + updateKeyValues.size() });
+
+    // Put the update events again and check the queue size.
+    // There should be conflation with the previous update events.
+    vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
+        testName + "_RR", updateKeyValues });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] {
+        "ln", keyValues.size() + updateKeyValues.size() });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+    vm5.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+    vm6.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+    vm7.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 1000 });// primary sender
+    vm5.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+    vm6.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+    vm7.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+  }
+
+  
+
+  /**
+   * Test configuration::
+   * 
+   * Region: Replicated WAN: Serial Region persistence enabled: false Async
+   * event queue persistence enabled: false
+   * 
+   * Note: The test doesn't create a locator but uses MCAST port instead.
+   */
+  @Ignore("Disabled until I can sort out the hydra dependencies - see bug 52214")
+  public void DISABLED_testReplicatedSerialAsyncEventQueueWithoutLocator() {
+    int mPort = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCacheWithoutLocator",
+        new Object[] { mPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCacheWithoutLocator",
+        new Object[] { mPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCacheWithoutLocator",
+        new Object[] { mPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCacheWithoutLocator",
+        new Object[] { mPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+        1000 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 1000 });// primary sender
+    vm5.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+    vm6.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+    vm7.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+  }
+
+  /**
+   * Test configuration::
+   * 
+   * Region: Replicated WAN: Serial Region persistence enabled: false Async
+   * channel persistence enabled: true
+   * 
+   * No VM is restarted.
+   */
+
+  public void testReplicatedSerialAsyncEventQueueWithPeristenceEnabled() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+        1000 });
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 1000 });// primary sender
+    vm5.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+    vm6.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+    vm7.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+  }
+
+  /**
+   * Test configuration::
+   * 
+   * Region: Replicated WAN: Serial Region persistence enabled: false Async
+   * channel persistence enabled: true
+   * 
+   * There is only one vm in the site and that vm is restarted
+   */
+
+  @Ignore("Disabled for 52351")
+  public void DISABLED_testReplicatedSerialAsyncEventQueueWithPeristenceEnabled_Restart() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    String firstDStore = (String)vm4.invoke(AsyncEventQueueTestBase.class,
+        "createAsyncEventQueueWithDiskStore", new Object[] { "ln", false, 100,
+            100, true, null });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+
+    // pause async channel and then do the puts
+    vm4
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+        1000 });
+
+    // ------------------ KILL VM4 AND REBUILD
+    // ------------------------------------------
+    vm4.invoke(AsyncEventQueueTestBase.class, "killSender", new Object[] {});
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueueWithDiskStore",
+        new Object[] { "ln", false, 100, 100, true, firstDStore });
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    // -----------------------------------------------------------------------------------
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 1000 });// primary sender
+  }
+
+  /**
+   * Test configuration::
+   * 
+   * Region: Replicated WAN: Serial Region persistence enabled: false Async
+   * channel persistence enabled: true
+   * 
+   * There are 3 VMs in the site and the VM with primary sender is shut down.
+   */
+  @Ignore("Disabled for 52351")
+  public void DISABLED_testReplicatedSerialAsyncEventQueueWithPeristenceEnabled_Restart2() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueueWithDiskStore",
+        new Object[] { "ln", false, 100, 100, true, null });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueueWithDiskStore",
+        new Object[] { "ln", false, 100, 100, true, null });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueueWithDiskStore",
+        new Object[] { "ln", false, 100, 100, true, null });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm4.invoke(AsyncEventQueueTestBase.class, "addCacheListenerAndCloseCache",
+        new Object[] { testName + "_RR" });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+
+    vm5.invoke(AsyncEventQueueTestBase.class, "doPuts",
+        new Object[] { testName + "_RR", 2000 });
+
+    // -----------------------------------------------------------------------------------
+    vm5.invoke(AsyncEventQueueTestBase.class, "waitForSenderToBecomePrimary",
+        new Object[] { AsyncEventQueueImpl
+            .getSenderIdFromAsyncEventQueueId("ln") });
+    
+    vm5.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+
+    int vm4size = (Integer)vm4.invoke(AsyncEventQueueTestBase.class,
+        "getAsyncEventListenerMapSize", new Object[] { "ln" });
+    int vm5size = (Integer)vm5.invoke(AsyncEventQueueTestBase.class,
+        "getAsyncEventListenerMapSize", new Object[] { "ln" });
+
+    getLogWriter().info("vm4 size is: " + vm4size);
+    getLogWriter().info("vm5 size is: " + vm5size);
+    // verify that there is no event loss
+    assertTrue(
+        "Total number of entries in events map on vm4 and vm5 should be at least 2000",
+        (vm4size + vm5size) >= 2000);
+  }
+  
+  /**
+   * Test configuration::
+   * 
+   * Region: Replicated 
+   * WAN: Serial 
+   * Dispatcher threads: more than 1
+   * Order policy: key based ordering
+   */
+  public void testConcurrentSerialAsyncEventQueueWithReplicatedRegion() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false, 3, OrderPolicy.KEY });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false, 3, OrderPolicy.KEY });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false, 3, OrderPolicy.KEY });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false, 3, OrderPolicy.KEY });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+        1000 });
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] {"ln", 1000 });// primary sender
+    vm5.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] {"ln", 0 });// secondary
+    vm6.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] {"ln", 0 });// secondary
+    vm7.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] {"ln", 0 });// secondary
+  }
+  
+  /**
+   * Test configuration::
+   * 
+   * Region: Replicated 
+   * WAN: Serial 
+   * Region persistence enabled: false 
+   * Async queue persistence enabled: false
+   */
+  public void testConcurrentSerialAsyncEventQueueWithReplicatedRegion_2() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false, 3, OrderPolicy.THREAD });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false, 3, OrderPolicy.THREAD });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false, 3, OrderPolicy.THREAD });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false, 3, OrderPolicy.THREAD });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+
+    vm4.invokeAsync(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+        500 });
+    vm4.invokeAsync(AsyncEventQueueTestBase.class, "doNextPuts", new Object[] { testName + "_RR",
+      500, 1000 });
+    vm4.invokeAsync(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+      1000, 1500 });
+    
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] {"ln", 1000 });// primary sender
+    vm5.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] {"ln", 0 });// secondary
+    vm6.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] {"ln", 0 });// secondary
+    vm7.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] {"ln", 0 });// secondary
+  }
+  
+  /**
+   * Dispatcher threads set to more than 1 but no order policy set.
+   * Added for defect #50514.
+   */
+  public void testConcurrentSerialAsyncEventQueueWithoutOrderPolicy() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false, 3, null });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false, 3, null });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false, 3, null });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false, 3, null });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+        1000 });
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] {"ln", 1000 });// primary sender
+    vm5.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] {"ln", 0 });// secondary
+    vm6.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] {"ln", 0 });// secondary
+    vm7.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] {"ln", 0 });// secondary
+  }
+
+  /**
+   * Test configuration::
+   * 
+   * Region: Partitioned WAN: Serial Region persistence enabled: false Async
+   * channel persistence enabled: false
+   */
+  public void testPartitionedSerialAsyncEventQueue() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, false, null, false });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_PR",
+        500 });
+    vm5.invoke(AsyncEventQueueTestBase.class, "doPutsFrom", new Object[] {
+        testName + "_PR", 500, 1000 });
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 1000 });// primary sender
+    vm5.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+    vm6.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+    vm7.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+  }
+
+  /**
+   * Test configuration::
+   * 
+   * Region: Partitioned WAN: Serial Region persistence enabled: false Async
+   * channel persistence enabled: false AsyncEventQueue conflation enabled: true
+   */
+  public void testPartitionedSerialAsyncEventQueueWithConflationEnabled() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, true, false, null, false });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+
+    vm4
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm5
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm6
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm7
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    
+    pause(2000);
+
+    final Map keyValues = new HashMap();
+    final Map updateKeyValues = new HashMap();
+    for (int i = 0; i < 1000; i++) {
+      keyValues.put(i, i);
+    }
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
+        testName + "_PR", keyValues });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] {
+        "ln", keyValues.size() });
+
+    for (int i = 0; i < 500; i++) {
+      updateKeyValues.put(i, i + "_updated");
+    }
+
+    // Put the update events and check the queue size.
+    // There should be no conflation with the previous create events.
+    vm5.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
+        testName + "_PR", updateKeyValues });
+
+    vm5.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] {
+        "ln", keyValues.size() + updateKeyValues.size() });
+
+    // Put the update events again and check the queue size.
+    // There should be conflation with the previous update events.
+    vm5.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
+      testName + "_PR", updateKeyValues });
+
+    vm5.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] {
+      "ln", keyValues.size() + updateKeyValues.size() });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+    vm5.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+    vm6.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+    vm7.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 1000 });// primary sender
+    vm5.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+    vm6.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+    vm7.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+  }
+
+  /**
+   * Test configuration::
+   * 
+   * Region: Partitioned WAN: Serial Region persistence enabled: false Async
+   * channel persistence enabled: true
+   * 
+   * No VM is restarted.
+   */
+  public void testPartitionedSerialAsyncEventQueueWithPeristenceEnabled() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, true, null, false });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, true, null, false });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, true, null, false });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        false, 100, 100, false, true, null, false });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_PR",
+        500 });
+    vm5.invoke(AsyncEventQueueTestBase.class, "doPutsFrom", new Object[] {
+        testName + "_PR", 500, 1000 });
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 1000 });// primary sender
+    vm5.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+    vm6.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+    vm7.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });// secondary
+  }
+
+  /**
+   * Test configuration::
+   * 
+   * Region: Partitioned WAN: Serial Region persistence enabled: false Async
+   * channel persistence enabled: true
+   * 
+   * There is only one vm in the site and that vm is restarted
+   */
+  public void testPartitionedSerialAsyncEventQueueWithPeristenceEnabled_Restart() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    String firstDStore = (String)vm4.invoke(AsyncEventQueueTestBase.class,
+        "createAsyncEventQueueWithDiskStore", new Object[] { "ln", false, 100,
+            100, true, null });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+
+    // pause async channel and then do the puts
+    vm4
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueueAndWaitForDispatcherToPause",
+            new Object[] { "ln" });
+  
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_PR",
+        1000 });
+
+    // ------------------ KILL VM4 AND REBUILD
+    // ------------------------------------------
+    vm4.invoke(AsyncEventQueueTestBase.class, "killSender", new Object[] {});
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueueWithDiskStore",
+        new Object[] { "ln", false, 100, 100, true, firstDStore });
+    vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    // -----------------------------------------------------------------------------------
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 1000 });// primary sender
+  }
+
+  public void testParallelAsyncEventQueueWithReplicatedRegion() {
+    try {
+      Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+          "createFirstLocatorWithDSId", new Object[] { 1 });
+
+      vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+      vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+      vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+      vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+      vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] {
+          "ln", true, 100, 100, true, false, null, false });
+      vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] {
+          "ln", true, 100, 100, true, false, null, false });
+      vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] {
+          "ln", true, 100, 100, true, false, null, false });
+      vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] {
+          "ln", true, 100, 100, true, false, null, false });
+
+      vm4.invoke(AsyncEventQueueTestBase.class,
+          "createReplicatedRegionWithAsyncEventQueue", new Object[] {
+              testName + "_RR", "ln", isOffHeap() });
+      fail("Expected GatewaySenderConfigException where parallel async event queue can not be used with replicated region");
+    }
+    catch (Exception e) {
+      if (!e.getCause().getMessage()
+          .contains("can not be used with replicated region")) {
+        fail("Expected GatewaySenderConfigException where parallel async event queue can not be used with replicated region");
+      }
+    }
+  }
+  
+  public void testParallelAsyncEventQueue() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, false, null, false });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, false, null, false });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, false, null, false });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, false, null, false });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_PR",
+        256 });
+    
+    vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm5.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm6.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm7.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    
+    int vm4size = (Integer)vm4.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+        new Object[] { "ln"});
+    int vm5size = (Integer)vm5.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+        new Object[] { "ln"});
+    int vm6size = (Integer)vm6.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+        new Object[] { "ln"});
+    int vm7size = (Integer)vm7.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+        new Object[] { "ln"});
+    
+    assertEquals(vm4size + vm5size + vm6size + vm7size, 256);
+  }
+  
+  /**
+   * Verify that the events reaching the AsyncEventListener have correct operation detail.
+   * (added for defect #50237).
+   */
+  public void testParallelAsyncEventQueueWithCacheLoader() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+	  "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+    	true, 100, 100, false, false, null, false, "MyAsyncEventListener_CacheLoader" });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+    	true, 100, 100, false, false, null, false, "MyAsyncEventListener_CacheLoader" });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+    	true, 100, 100, false, false, null, false, "MyAsyncEventListener_CacheLoader" });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+    	true, 100, 100, false, false, null, false, "MyAsyncEventListener_CacheLoader" });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithCacheLoaderAndAsyncQueue",
+    	new Object[] { testName + "_PR", "ln" });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithCacheLoaderAndAsyncQueue",
+    	new Object[] { testName + "_PR", "ln" });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithCacheLoaderAndAsyncQueue",
+    	new Object[] { testName + "_PR", "ln" });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithCacheLoaderAndAsyncQueue",
+    	new Object[] { testName + "_PR", "ln" });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPutAll", new Object[] { testName + "_PR",
+    	100, 10 });
+    vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventForOperationDetail",
+    	new Object[] { "ln", 250, false, true });
+    vm5.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventForOperationDetail",
+    	new Object[] { "ln", 250, false, true });
+    vm6.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventForOperationDetail",
+    	new Object[] { "ln", 250, false, true });
+    vm7.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventForOperationDetail",
+    	new Object[] { "ln", 250, false, true });
+  }
+  
+  public void testParallelAsyncEventQueueSize() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, false, null, false });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, false, null, false });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, false, null, false });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, false, null, false });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+
+    vm4
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm5
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm6
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm7
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    pause(1000);// pause at least for the batchTimeInterval
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_PR",
+        1000 });
+
+    int vm4size = (Integer)vm4.invoke(AsyncEventQueueTestBase.class,
+        "getAsyncEventQueueSize", new Object[] { "ln" });
+    int vm5size = (Integer)vm5.invoke(AsyncEventQueueTestBase.class,
+        "getAsyncEventQueueSize", new Object[] { "ln" });
+    
+    assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm4size);
+    assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm5size);
+  }
+  
+  /**
+   * Added to reproduce defect #50366: 
+   * NullPointerException with AsyncEventQueue#size() when number of dispatchers is more than 1
+   */
+  public void testConcurrentParallelAsyncEventQueueSize() {
+	Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+	  "createFirstLocatorWithDSId", new Object[] { 1 });
+
+	vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+	vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+	vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+	vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+	vm4.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+	  true, 100, 100, false, false, null, false, 2, OrderPolicy.KEY });
+	vm5.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+	  true, 100, 100, false, false, null, false, 2, OrderPolicy.KEY });
+	vm6.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+	  true, 100, 100, false, false, null, false, 2, OrderPolicy.KEY });
+	vm7.invoke(AsyncEventQueueTestBase.class, "createConcurrentAsyncEventQueue", new Object[] { "ln",
+	  true, 100, 100, false, false, null, false, 2, OrderPolicy.KEY });
+
+	vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+	  new Object[] { testName + "_PR", "ln", isOffHeap() });
+	vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+	  new Object[] { testName + "_PR", "ln", isOffHeap() });
+	vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+	  new Object[] { testName + "_PR", "ln", isOffHeap() });
+	vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+	  new Object[] { testName + "_PR", "ln", isOffHeap() });
+
+	vm4
+	  .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+		new Object[] { "ln" });
+	vm5
+	  .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+		new Object[] { "ln" });
+	vm6
+	  .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+		new Object[] { "ln" });
+	vm7
+	  .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+		new Object[] { "ln" });
+	pause(1000);// pause at least for the batchTimeInterval
+
+	vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_PR",
+	  1000 });
+
+	int vm4size = (Integer)vm4.invoke(AsyncEventQueueTestBase.class,
+	  "getAsyncEventQueueSize", new Object[] { "ln" });
+	int vm5size = (Integer)vm5.invoke(AsyncEventQueueTestBase.class,
+	  "getAsyncEventQueueSize", new Object[] { "ln" });
+	    
+	assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm4size);
+	assertEquals("Size of AsyncEventQueue is incorrect", 1000, vm5size);
+  }
+  
+  public void testParallelAsyncEventQueueWithConflationEnabled() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, true, false, null, false });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, true, false, null, false });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, true, false, null, false });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, true, false, null, false });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+
+    vm4
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm5
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm6
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm7
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+
+    pause(2000);// pause for the batchTimeInterval to ensure that all the
+    // senders are paused
+
+    final Map keyValues = new HashMap();
+    final Map updateKeyValues = new HashMap();
+    for (int i = 0; i < 1000; i++) {
+      keyValues.put(i, i);
+    }
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
+        testName + "_PR", keyValues });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] {
+        "ln", keyValues.size() });
+
+    for (int i = 0; i < 500; i++) {
+      updateKeyValues.put(i, i + "_updated");
+    }
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
+        testName + "_PR", updateKeyValues });
+
+ 
+    vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncEventQueueSize", new Object[] {
+        "ln", keyValues.size() + updateKeyValues.size() }); // no conflation of creates
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
+        testName + "_PR", updateKeyValues });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncEventQueueSize", new Object[] {
+        "ln", keyValues.size() + updateKeyValues.size() }); // conflation of updates
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+    vm5.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+    vm6.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+    vm7.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm5.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm6.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm7.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    
+    int vm4size = (Integer)vm4.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+        new Object[] { "ln"});
+    int vm5size = (Integer)vm5.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+        new Object[] { "ln"});
+    int vm6size = (Integer)vm6.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+        new Object[] { "ln"});
+    int vm7size = (Integer)vm7.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+        new Object[] { "ln"});
+    
+    assertEquals(vm4size + vm5size + vm6size + vm7size, keyValues.size());
+  }
+
+  /**
+   * Added to reproduce defect #47213
+   */
+  public void testParallelAsyncEventQueueWithConflationEnabled_bug47213() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, true, false, null, false });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, true, false, null, false });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, true, false, null, false });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, true, false, null, false });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createPRWithRedundantCopyWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createPRWithRedundantCopyWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createPRWithRedundantCopyWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createPRWithRedundantCopyWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+
+    vm4
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm5
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm6
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm7
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+
+    pause(2000);// pause for the batchTimeInterval to ensure that all the
+    // senders are paused
+
+    final Map keyValues = new HashMap();
+    final Map updateKeyValues = new HashMap();
+    for (int i = 0; i < 1000; i++) {
+      keyValues.put(i, i);
+    }
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
+        testName + "_PR", keyValues });
+
+    pause(2000);
+    vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] {
+        "ln", keyValues.size() });
+
+    for (int i = 0; i < 500; i++) {
+      updateKeyValues.put(i, i + "_updated");
+    }
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
+        testName + "_PR", updateKeyValues });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
+        testName + "_PR", updateKeyValues });
+
+    // pause to ensure that events have been conflated.
+    pause(2000);
+    vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] {
+        "ln", keyValues.size() + updateKeyValues.size() });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+    vm5.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+    vm6.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+    vm7.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm5.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm6.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm7.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    
+    int vm4size = (Integer)vm4.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+        new Object[] { "ln"});
+    int vm5size = (Integer)vm5.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+        new Object[] { "ln"});
+    int vm6size = (Integer)vm6.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+        new Object[] { "ln"});
+    int vm7size = (Integer)vm7.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+        new Object[] { "ln"});
+    
+    assertEquals(vm4size + vm5size + vm6size + vm7size, keyValues.size());
+    
+  }
+
+  public void testParallelAsyncEventQueueWithOneAccessor() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm3.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm3.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, false, null, false });
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, false, null, false });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, false, null, false });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, false, null, false });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, false, null, false });
+
+    vm3.invoke(AsyncEventQueueTestBase.class,
+        "createPartitionedRegionAccessorWithAsyncEventQueue", new Object[] {
+            testName + "_PR", "ln" });
+    vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+
+    vm3.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_PR",
+        256 });
+    
+    vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm5.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm6.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm7.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    
+    vm3.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
+        new Object[] { "ln", 0 });
+    
+    int vm4size = (Integer)vm4.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+        new Object[] { "ln"});
+    int vm5size = (Integer)vm5.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+        new Object[] { "ln"});
+    int vm6size = (Integer)vm6.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+        new Object[] { "ln"});
+    int vm7size = (Integer)vm7.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+        new Object[] { "ln"});
+    
+    assertEquals(vm4size + vm5size + vm6size + vm7size, 256);
+
+  }
+
+  public void testParallelAsyncEventQueueWithPersistence() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, true, null, false });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, true, null, false });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, true, null, false });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, true, null, false });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_PR", "ln", isOffHeap() });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_PR",
+        256 });
+    
+    vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm5.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm6.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm7.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    
+    int vm4size = (Integer)vm4.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+        new Object[] { "ln"});
+    int vm5size = (Integer)vm5.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+        new Object[] { "ln"});
+    int vm6size = (Integer)vm6.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+        new Object[] { "ln"});
+    int vm7size = (Integer)vm7.invoke(AsyncEventQueueTestBase.class, "getAsyncEventListenerMapSize",
+        new Object[] { "ln"});
+    
+    assertEquals(vm4size + vm5size + vm6size + vm7size, 256);
+  }
+  
+  /**
+   * Below test is disabled intentionally Replicated region with Parallel Async
+   * Event queue is not supported. Test is added for the same
+   * testParallelAsyncEventQueueWithReplicatedRegion
+   * 
+   * We are gone support this configuration in upcoming releases
+   */
+  
+  public void DISABLED_DUETO_BUG51491_testReplicatedParallelAsyncEventQueue() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+        "createFirstLocatorWithDSId", new Object[] { 1 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, false, null, false });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, false, null, false });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, false, null, false });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
+        true, 100, 100, false, false, null, false });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+    vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
+        new Object[] { testName + "_RR", "ln", isOffHeap() });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+        1000 });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm5.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm6.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+    vm7.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { "ln" });
+
+    int vm4size = (Integer)vm4.invoke(AsyncEventQueueTestBase.class,
+        "getAsyncEventListenerMapSize", new Object[] { "ln" });
+    int vm5size = (Integer)vm5.invoke(AsyncEventQueueTestBase.class,
+        "getAsyncEventListenerMapSize", new Object[] { "ln" });
+    int vm6size = (Integer)vm6.invoke(AsyncEventQueueTestBase.class,
+        "getAsyncEventListenerMapSize", new Object[] { "ln" });
+    int vm7size = (Integer)vm7.invoke(AsyncEventQueueTestBase.class,
+        "getAsyncEventListenerMapSize", new Object[] { "ln" });
+
+    assertEquals(vm4size + vm5size + vm6size + vm7size, 1000);
+  }
+  
+/**
+ * Test case to test possibleDuplicates. vm4 & vm5 are hosting the PR. vm5 is
+ * killed so the buckets hosted by it are shifted to vm4.
+ */
+  @Ignore("Disabled for 52349")
+  public void DISABLED_testParallelAsyncEventQueueHA_Scenario1() {
+    Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
+      "createFirstLocatorWithDSId", new Object[] { 1 });
+    vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
+
+    getLogWriter().info("Created the cache");
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueueWithListener2",
+        new Object[] { "ln", true, 100, 5, false, null });
+    vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueueWithListener2",
+        new Object[] { "ln", true, 100, 5, false, null });
+
+    getLogWriter().info("Created the AsyncEventQueue");
+
+    vm4.invoke(AsyncEventQueueTestBase.class,
+        "createPRWithRedundantCopyWithAsyncEventQueue", new Object[] {
+            testName + "_PR", "ln", isOffHeap() });
+    vm5.invoke(AsyncEventQueueTestBase.class,
+        "createPRWithRedundantCopyWithAsyncEventQueue", new Object[] {
+            testName + "_PR", "ln", isOffHeap() });
+
+    getLogWriter().info("Created PR with AsyncEventQueue");
+
+    vm4
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    vm5
+        .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
+            new Object[] { "ln" });
+    pause(1000);// pause for the batchTimeInterval to make sure the AsyncQueue
+                // is paused
+
+    getLogWriter().info("Paused the AsyncEventQueue");
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts",
+        new Object[] { testName + "_PR", 80 });
+
+    getLogWriter().info("Done puts");
+
+    Set<Integer> primaryBucketsVm5 = (Set<Integer>)vm5.invoke(
+        AsyncEventQueueTestBase.class, "getAllPrimaryBucketsOnTheNode",
+        new Object[] { testName + "_PR" });
+
+    getLogWriter().info("Primary buckets on vm5: " + primaryBucketsVm5);
+    // ---------------------------- Kill vm5 --------------------------
+    vm5.invoke(AsyncEventQueueTestBase.class, "killSender", new Object[] {});
+
+    pause(1000);// give some time for rebalancing to happen
+    vm4.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
+        new Object[] { "ln" });
+
+    vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
+        new Object[] { 

<TRUNCATED>


[35/50] [abbrv] incubator-geode git commit: GEODE-608: Exclude cache element xml to prevent failing test

Posted by kl...@apache.org.
GEODE-608: Exclude cache element xml to prevent failing test


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/c9f677b4
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/c9f677b4
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/c9f677b4

Branch: refs/heads/feature/GEODE-291
Commit: c9f677b49deed0aac6741f731f3b842821a7d512
Parents: 3da5bcc
Author: Anthony Baker <ab...@pivotal.io>
Authored: Wed Dec 9 14:30:30 2015 -0800
Committer: Anthony Baker <ab...@pivotal.io>
Committed: Thu Dec 10 09:44:57 2015 -0800

----------------------------------------------------------------------
 gradle/rat.gradle | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c9f677b4/gradle/rat.gradle
----------------------------------------------------------------------
diff --git a/gradle/rat.gradle b/gradle/rat.gradle
index 96d1944..ab2da4c 100644
--- a/gradle/rat.gradle
+++ b/gradle/rat.gradle
@@ -58,6 +58,7 @@ rat {
     'gemfire-spark-connector/project/plugins.sbt',
     'gemfire-spark-connector/project/build.properties',
     '**/log4j2*.xml',
+    'gemfire-core/src/test/resources/com/gemstone/gemfire/management/internal/configuration/domain/CacheElementJUnitTest.xml',
  
     // ANTLR generated files
     'gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/parse/OQLLexer.java',


[15/50] [abbrv] incubator-geode git commit: GEODE-563: Moving gfsh tests from closed

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommandsDUnitTest.java
new file mode 100644
index 0000000..18dfe67
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommandsDUnitTest.java
@@ -0,0 +1,1148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.PartitionAttributesFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionAttributes;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache.asyncqueue.AsyncEvent;
+import com.gemstone.gemfire.cache.asyncqueue.AsyncEventListener;
+import com.gemstone.gemfire.cache.wan.GatewaySenderFactory;
+import com.gemstone.gemfire.compression.SnappyCompressor;
+import com.gemstone.gemfire.distributed.Locator;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.distributed.internal.InternalLocator;
+import com.gemstone.gemfire.distributed.internal.SharedConfiguration;
+import com.gemstone.gemfire.internal.AvailablePort;
+import com.gemstone.gemfire.internal.ClassBuilder;
+import com.gemstone.gemfire.internal.FileUtil;
+import com.gemstone.gemfire.internal.cache.RegionEntryContext;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.internal.MBeanJMXAdapter;
+import com.gemstone.gemfire.management.internal.ManagementConstants;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+import dunit.DistributedTestCase;
+import dunit.Host;
+import dunit.SerializableCallable;
+import dunit.SerializableRunnable;
+import dunit.VM;
+
+import javax.management.MBeanServer;
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectName;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.text.MessageFormat;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+public class CreateAlterDestroyRegionCommandsDUnitTest extends CliCommandTestBase {
+  private static final long serialVersionUID = 1L;
+
+  final String alterRegionName = "testAlterRegionRegion";
+  final String alterAsyncEventQueueId1 = "testAlterRegionQueue1";
+  final String alterAsyncEventQueueId2 = "testAlterRegionQueue2";
+  final String alterAsyncEventQueueId3 = "testAlterRegionQueue3";
+  final String alterGatewaySenderId1 = "testAlterRegionSender1";
+  final String alterGatewaySenderId2 = "testAlterRegionSender2";
+  final String alterGatewaySenderId3 = "testAlterRegionSender3";
+  final String region46391 = "region46391";
+  VM alterVm1;
+  String alterVm1Name;
+  VM alterVm2;
+  String alterVm2Name;
+
+  final List<String> filesToBeDeleted = new CopyOnWriteArrayList<String>();
+
+  public CreateAlterDestroyRegionCommandsDUnitTest(String name) {
+    super(name);
+  }
+
+  /**
+   * Asserts that the "compressor" option for the "create region" command succeeds for a recognized compressor.
+   */
+  public void testCreateRegionWithGoodCompressor() {
+    createDefaultSetup(null);
+    VM vm = Host.getHost(0).getVM(1);
+
+    // Create a cache in vm 1
+    vm.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        assertNotNull(getCache());
+      }
+    });
+
+    // Run create region command with compression
+    CommandStringBuilder commandStringBuilder = new CommandStringBuilder(CliStrings.CREATE_REGION);
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__REGION, "compressedRegion");
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__REGIONSHORTCUT, "REPLICATE");
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__COMPRESSOR,
+        RegionEntryContext.DEFAULT_COMPRESSION_PROVIDER);
+    CommandResult cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    // Make sure our region exists with compression enabled
+    vm.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        Region region = getCache().getRegion("compressedRegion");
+        assertNotNull(region);
+        assertTrue(SnappyCompressor.getDefaultInstance().equals(region.getAttributes().getCompressor()));
+      }
+    });
+
+    // cleanup
+    commandStringBuilder = new CommandStringBuilder(CliStrings.DESTROY_REGION);
+    commandStringBuilder.addOption(CliStrings.DESTROY_REGION__REGION, "compressedRegion");
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+  }
+
+  /**
+   * Asserts that the "compressor" option for the "create region" command fails for an unrecognized compressorc.
+   */
+  public void testCreateRegionWithBadCompressor() {
+    createDefaultSetup(null);
+
+    VM vm = Host.getHost(0).getVM(1);
+
+    // Create a cache in vm 1
+    vm.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        assertNotNull(getCache());
+      }
+    });
+
+    // Create a region with an unrecognized compressor
+    CommandStringBuilder commandStringBuilder = new CommandStringBuilder(CliStrings.CREATE_REGION);
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__REGION, "compressedRegion");
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__REGIONSHORTCUT, "REPLICATE");
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__COMPRESSOR, "BAD_COMPRESSOR");
+    CommandResult cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.ERROR, cmdResult.getStatus());
+
+    // Assert that our region was not created
+    vm.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        Region region = getCache().getRegion("compressedRegion");
+        assertNull(region);
+      }
+    });
+  }
+
+  /**
+   * Asserts that a missing "compressor" option for the "create region" command results in a region with no
+   * compression.
+   */
+  public void testCreateRegionWithNoCompressor() {
+    createDefaultSetup(null);
+
+    VM vm = Host.getHost(0).getVM(1);
+
+    // Create a cache in vm 1
+    vm.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        assertNotNull(getCache());
+      }
+    });
+
+    // Create a region with no compression
+    CommandStringBuilder commandStringBuilder = new CommandStringBuilder(CliStrings.CREATE_REGION);
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__REGION, "testRegion");
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__REGIONSHORTCUT, "REPLICATE");
+    CommandResult cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    // Assert that our newly created region has no compression
+    vm.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        Region region = getCache().getRegion("testRegion");
+        assertNotNull(region);
+        assertNull(region.getAttributes().getCompressor());
+      }
+    });
+
+    // Cleanup
+    commandStringBuilder = new CommandStringBuilder(CliStrings.DESTROY_REGION);
+    commandStringBuilder.addOption(CliStrings.DESTROY_REGION__REGION, "testRegion");
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+  }
+
+  public void testDestroyRegion() {
+    createDefaultSetup(null);
+
+    for (int i = 1; i <= 2; i++) {
+      Host.getHost(0).getVM(i).invoke(new SerializableRunnable() {
+        @Override
+        public void run() {
+          final Cache cache = getCache();
+
+          RegionFactory<Object, Object> factory = cache.createRegionFactory(RegionShortcut.PARTITION);
+          factory.create("Customer");
+
+          PartitionAttributesFactory paFactory = new PartitionAttributesFactory();
+          paFactory.setColocatedWith("Customer");
+          factory.setPartitionAttributes(paFactory.create());
+          factory.create("Order");
+        }
+      });
+    }
+
+    // Make sure that the region has been registered with the Manager MXBean
+    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+          @Override
+          public boolean done() {
+            try {
+              MBeanServer mbeanServer = MBeanJMXAdapter.mbeanServer;
+              String queryExp = MessageFormat.format(ManagementConstants.OBJECTNAME__REGION_MXBEAN,
+                  new Object[]{"/Customer", "*"});
+              ObjectName queryExpON = new ObjectName(queryExp);
+              return !(mbeanServer.queryNames(null, queryExpON).isEmpty());
+            } catch (MalformedObjectNameException mone) {
+              getLogWriter().error(mone);
+              fail(mone.getMessage());
+              return false;
+            }
+          }
+
+          @Override
+          public String description() {
+            return "Waiting for the region to be registed with the MXBean";
+          }
+        };
+
+        DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+      }
+    });
+
+    // Test failure when region not found
+    String command = "destroy region --name=DOESNOTEXIST";
+    getLogWriter().info("testDestroyRegion command=" + command);
+    CommandResult cmdResult = executeCommand(command);
+    String strr = commandResultToString(cmdResult);
+    getLogWriter().info("testDestroyRegion strr=" + strr);
+    assertTrue(stringContainsLine(strr, "Could not find.*\"DOESNOTEXIST\".*"));
+    assertEquals(Result.Status.ERROR, cmdResult.getStatus());
+
+    // Test unable to destroy with co-location
+    command = "destroy region --name=/Customer";
+    getLogWriter().info("testDestroyRegion command=" + command);
+    cmdResult = executeCommand(command);
+    strr = commandResultToString(cmdResult);
+    getLogWriter().info("testDestroyRegion strr=" + strr);
+    assertEquals(Result.Status.ERROR, cmdResult.getStatus());
+
+    // Test success
+    command = "destroy region --name=/Order";
+    getLogWriter().info("testDestroyRegion command=" + command);
+    cmdResult = executeCommand(command);
+    strr = commandResultToString(cmdResult);
+    assertTrue(stringContainsLine(strr, ".*Order.*destroyed successfully.*"));
+    getLogWriter().info("testDestroyRegion strr=" + strr);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    command = "destroy region --name=/Customer";
+    getLogWriter().info("testDestroyRegion command=" + command);
+    cmdResult = executeCommand(command);
+    strr = commandResultToString(cmdResult);
+    assertTrue(stringContainsLine(strr, ".*Customer.*destroyed successfully.*"));
+    getLogWriter().info("testDestroyRegion strr=" + strr);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+  }
+
+
+  public void testCreateRegion46391() throws IOException {
+    createDefaultSetup(null);
+    String command = CliStrings.CREATE_REGION + " --" + CliStrings.CREATE_REGION__REGION + "=" + this.region46391 + " --" + CliStrings.CREATE_REGION__REGIONSHORTCUT + "=REPLICATE";
+
+    getLogWriter().info("testCreateRegion46391 create region command=" + command);
+
+    CommandResult cmdResult = executeCommand(command);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    command = CliStrings.PUT + " --" + CliStrings.PUT__KEY + "=k1" + " --" + CliStrings.PUT__VALUE + "=k1" + " --" + CliStrings.PUT__REGIONNAME + "=" + this.region46391;
+
+    getLogWriter().info("testCreateRegion46391 put command=" + command);
+
+    CommandResult cmdResult2 = executeCommand(command);
+    assertEquals(Result.Status.OK, cmdResult2.getStatus());
+
+    getLogWriter().info("testCreateRegion46391  cmdResult2=" + commandResultToString(cmdResult2));
+    String str1 = "Result      : true";
+    String str2 = "Key         : k1";
+    String str3 = "Key Class   : java.lang.String";
+    String str4 = "Value Class : java.lang.String";
+    String str5 = "Old Value   : <NULL>";
+
+    assertTrue(commandResultToString(cmdResult).contains("Region \"/" + this.region46391 + "\" created on"));
+
+    assertTrue(commandResultToString(cmdResult2).contains(str1));
+    assertTrue(commandResultToString(cmdResult2).contains(str2));
+    assertTrue(commandResultToString(cmdResult2).contains(str3));
+    assertTrue(commandResultToString(cmdResult2).contains(str4));
+    assertTrue(commandResultToString(cmdResult2).contains(str5));
+  }
+
+  public void bug51924_testAlterRegion() throws IOException {
+    createDefaultSetup(null);
+
+    CommandResult cmdResult = executeCommand(CliStrings.LIST_REGION);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    assertTrue(commandResultToString(cmdResult).contains("No Regions Found"));
+
+    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        cache.createRegionFactory(RegionShortcut.PARTITION).setStatisticsEnabled(true).create(alterRegionName);
+      }
+    });
+
+    this.alterVm1 = Host.getHost(0).getVM(1);
+    this.alterVm1Name = "VM" + this.alterVm1.getPid();
+    this.alterVm1.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.NAME_NAME, alterVm1Name);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group1");
+        getSystem(localProps);
+        Cache cache = getCache();
+
+        // Setup queues and gateway senders to be used by all tests
+        cache.createRegionFactory(RegionShortcut.PARTITION).setStatisticsEnabled(true).create(alterRegionName);
+        AsyncEventListener listener = new AsyncEventListener() {
+          @Override
+          public void close() {
+            // Nothing to do
+          }
+
+          @Override
+          public boolean processEvents(List<AsyncEvent> events) {
+            return true;
+          }
+        };
+        cache.createAsyncEventQueueFactory().create(alterAsyncEventQueueId1, listener);
+        cache.createAsyncEventQueueFactory().create(alterAsyncEventQueueId2, listener);
+        cache.createAsyncEventQueueFactory().create(alterAsyncEventQueueId3, listener);
+
+        GatewaySenderFactory gatewaySenderFactory = cache.createGatewaySenderFactory();
+        gatewaySenderFactory.setManualStart(true);
+        gatewaySenderFactory.create(alterGatewaySenderId1, 2);
+        gatewaySenderFactory.create(alterGatewaySenderId2, 3);
+        gatewaySenderFactory.create(alterGatewaySenderId3, 4);
+      }
+    });
+
+    this.alterVm2 = Host.getHost(0).getVM(2);
+    this.alterVm2Name = "VM" + this.alterVm2.getPid();
+    this.alterVm2.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.NAME_NAME, alterVm2Name);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, "Group1,Group2");
+        getSystem(localProps);
+        Cache cache = getCache();
+
+        cache.createRegionFactory(RegionShortcut.PARTITION).setStatisticsEnabled(true).create(alterRegionName);
+      }
+    });
+
+    deployJarFilesForRegionAlter();
+    regionAlterGroupTest();
+    regionAlterSetAllTest();
+    regionAlterNoChangeTest();
+    regionAlterSetDefaultsTest();
+    regionAlterManipulatePlugInsTest();
+
+    this.alterVm1.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        getCache().getRegion(alterRegionName).destroyRegion();
+      }
+    });
+  }
+
+  private void regionAlterGroupTest() {
+    CommandStringBuilder commandStringBuilder = new CommandStringBuilder(CliStrings.ALTER_REGION);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__REGION, this.alterRegionName);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__GROUP, "Group1");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__EVICTIONMAX, "5764");
+    CommandResult cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    String stringResult = commandResultToString(cmdResult);
+    assertEquals(4, countLinesInString(stringResult, false));
+    assertEquals(false, stringResult.contains("ERROR"));
+    assertTrue(
+        stringContainsLine(stringResult, this.alterVm1Name + ".*Region \"/" + this.alterRegionName + "\" altered.*"));
+    assertTrue(
+        stringContainsLine(stringResult, this.alterVm2Name + ".*Region \"/" + this.alterRegionName + "\" altered.*"));
+
+    this.alterVm1.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        RegionAttributes attributes = getCache().getRegion(alterRegionName).getAttributes();
+        assertEquals(5764, attributes.getEvictionAttributes().getMaximum());
+      }
+    });
+
+    this.alterVm2.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        RegionAttributes attributes = getCache().getRegion(alterRegionName).getAttributes();
+        assertEquals(5764, attributes.getEvictionAttributes().getMaximum());
+      }
+    });
+
+    commandStringBuilder = new CommandStringBuilder(CliStrings.ALTER_REGION);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__REGION, "/" + this.alterRegionName);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__GROUP, "Group2");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__EVICTIONMAX, "6963");
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(3, countLinesInString(stringResult, false));
+    assertEquals(false, stringResult.contains("ERROR"));
+    assertFalse(
+        stringContainsLine(stringResult, this.alterVm1Name + ".*Region \"/" + this.alterRegionName + "\" altered.*"));
+    assertTrue(
+        stringContainsLine(stringResult, this.alterVm2Name + ".*Region \"/" + this.alterRegionName + "\" altered.*"));
+
+    this.alterVm1.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        RegionAttributes attributes = getCache().getRegion(alterRegionName).getAttributes();
+        assertEquals(5764, attributes.getEvictionAttributes().getMaximum());
+      }
+    });
+
+    this.alterVm2.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        RegionAttributes attributes = getCache().getRegion(alterRegionName).getAttributes();
+        assertEquals(6963, attributes.getEvictionAttributes().getMaximum());
+      }
+    });
+  }
+
+  private void regionAlterSetAllTest() {
+    CommandStringBuilder commandStringBuilder = new CommandStringBuilder(CliStrings.ALTER_REGION);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__REGION, "/" + this.alterRegionName);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__EVICTIONMAX, "35464");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__CLONINGENABLED, "true");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__ASYNCEVENTQUEUEID, this.alterAsyncEventQueueId1);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__ENTRYEXPIRATIONIDLETIME, "3453");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__ENTRYEXPIRATIONIDLETIMEACTION, "DESTROY");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__ENTRYEXPIRATIONTIMETOLIVE, "7563");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__ENTRYEXPIRATIONTTLACTION, "DESTROY");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__CACHELISTENER, "com.cadrdunit.RegionAlterCacheListenerA");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__CACHELOADER, "com.cadrdunit.RegionAlterCacheLoader");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__CACHEWRITER, "com.cadrdunit.RegionAlterCacheWriter");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__GATEWAYSENDERID, this.alterGatewaySenderId1);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__REGIONEXPIRATIONIDLETIME, "6234");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__REGIONEXPIRATIONIDLETIMEACTION, "DESTROY");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__REGIONEXPIRATIONTTL, "4562");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__REGIONEXPIRATIONTTLACTION, "DESTROY");
+
+    CommandResult cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    String stringResult = commandResultToString(cmdResult);
+    assertEquals(5, countLinesInString(stringResult, false));
+    assertEquals(false, stringResult.contains("ERROR"));
+    assertTrue(stringContainsLine(stringResult, "Manager.*Region \"/" + this.alterRegionName + "\" altered.*"));
+    assertTrue(
+        stringContainsLine(stringResult, this.alterVm1Name + ".*Region \"/" + this.alterRegionName + "\" altered.*"));
+    assertTrue(
+        stringContainsLine(stringResult, this.alterVm2Name + ".*Region \"/" + this.alterRegionName + "\" altered.*"));
+
+    this.alterVm1.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        RegionAttributes attributes = getCache().getRegion(alterRegionName).getAttributes();
+        assertEquals(35464, attributes.getEvictionAttributes().getMaximum());
+        assertEquals(3453, attributes.getEntryIdleTimeout().getTimeout());
+        assertTrue(attributes.getEntryIdleTimeout().getAction().isDestroy());
+        assertEquals(7563, attributes.getEntryTimeToLive().getTimeout());
+        assertTrue(attributes.getEntryTimeToLive().getAction().isDestroy());
+        assertEquals(6234, attributes.getRegionIdleTimeout().getTimeout());
+        assertTrue(attributes.getRegionIdleTimeout().getAction().isDestroy());
+        assertEquals(4562, attributes.getRegionTimeToLive().getTimeout());
+        assertTrue(attributes.getRegionTimeToLive().getAction().isDestroy());
+        assertEquals(1, attributes.getAsyncEventQueueIds().size());
+        assertTrue(attributes.getAsyncEventQueueIds().contains(alterAsyncEventQueueId1));
+        assertEquals(1, attributes.getGatewaySenderIds().size());
+        assertTrue(attributes.getGatewaySenderIds().contains(alterGatewaySenderId1));
+        assertEquals(1, attributes.getCacheListeners().length);
+        assertEquals("com.cadrdunit.RegionAlterCacheListenerA", attributes.getCacheListeners()[0].getClass().getName());
+        assertEquals("com.cadrdunit.RegionAlterCacheWriter", attributes.getCacheWriter().getClass().getName());
+        assertEquals("com.cadrdunit.RegionAlterCacheLoader", attributes.getCacheLoader().getClass().getName());
+      }
+    });
+  }
+
+  private void regionAlterNoChangeTest() {
+    CommandStringBuilder commandStringBuilder = new CommandStringBuilder(CliStrings.ALTER_REGION);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__REGION, "/" + this.alterRegionName);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__GROUP, "Group1");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__CLONINGENABLED, "true");
+
+    CommandResult cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    String stringResult = commandResultToString(cmdResult);
+    assertEquals(4, countLinesInString(stringResult, false));
+    assertEquals(false, stringResult.contains("ERROR"));
+    assertTrue(
+        stringContainsLine(stringResult, this.alterVm1Name + ".*Region \"/" + this.alterRegionName + "\" altered.*"));
+    assertTrue(
+        stringContainsLine(stringResult, this.alterVm2Name + ".*Region \"/" + this.alterRegionName + "\" altered.*"));
+
+    this.alterVm2.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        RegionAttributes attributes = getCache().getRegion(alterRegionName).getAttributes();
+        assertEquals(35464, attributes.getEvictionAttributes().getMaximum());
+        assertEquals(3453, attributes.getEntryIdleTimeout().getTimeout());
+        assertTrue(attributes.getEntryIdleTimeout().getAction().isDestroy());
+        assertEquals(7563, attributes.getEntryTimeToLive().getTimeout());
+        assertTrue(attributes.getEntryTimeToLive().getAction().isDestroy());
+        assertEquals(6234, attributes.getRegionIdleTimeout().getTimeout());
+        assertTrue(attributes.getRegionIdleTimeout().getAction().isDestroy());
+        assertEquals(4562, attributes.getRegionTimeToLive().getTimeout());
+        assertTrue(attributes.getRegionTimeToLive().getAction().isDestroy());
+        assertEquals(1, attributes.getAsyncEventQueueIds().size());
+        assertTrue(attributes.getAsyncEventQueueIds().contains(alterAsyncEventQueueId1));
+        assertEquals(1, attributes.getGatewaySenderIds().size());
+        assertTrue(attributes.getGatewaySenderIds().contains(alterGatewaySenderId1));
+        assertEquals(1, attributes.getCacheListeners().length);
+        assertEquals("com.cadrdunit.RegionAlterCacheListenerA", attributes.getCacheListeners()[0].getClass().getName());
+        assertEquals("com.cadrdunit.RegionAlterCacheWriter", attributes.getCacheWriter().getClass().getName());
+        assertEquals("com.cadrdunit.RegionAlterCacheLoader", attributes.getCacheLoader().getClass().getName());
+      }
+    });
+  }
+
+  private void regionAlterSetDefaultsTest() {
+    CommandStringBuilder commandStringBuilder = new CommandStringBuilder(CliStrings.ALTER_REGION);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__REGION, "/" + this.alterRegionName);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__GROUP, "Group1");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__EVICTIONMAX);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__CLONINGENABLED);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__ASYNCEVENTQUEUEID);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__ENTRYEXPIRATIONIDLETIME);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__ENTRYEXPIRATIONTTLACTION);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__CACHELISTENER);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__CACHELOADER);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__CACHEWRITER);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__GATEWAYSENDERID);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__REGIONEXPIRATIONIDLETIME);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__REGIONEXPIRATIONIDLETIMEACTION);
+
+    CommandResult cmdResult = executeCommand(commandStringBuilder.toString());
+    String stringResult = commandResultToString(cmdResult);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    assertEquals(4, countLinesInString(stringResult, false));
+    assertEquals(false, stringResult.contains("ERROR"));
+    assertTrue(
+        stringContainsLine(stringResult, this.alterVm1Name + ".*Region \"/" + this.alterRegionName + "\" altered.*"));
+    assertTrue(
+        stringContainsLine(stringResult, this.alterVm2Name + ".*Region \"/" + this.alterRegionName + "\" altered.*"));
+
+    this.alterVm1.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        RegionAttributes attributes = getCache().getRegion(alterRegionName).getAttributes();
+        assertEquals(0, attributes.getEvictionAttributes().getMaximum());
+        assertEquals(0, attributes.getEntryIdleTimeout().getTimeout());
+        assertTrue(attributes.getEntryIdleTimeout().getAction().isDestroy());
+        assertEquals(7563, attributes.getEntryTimeToLive().getTimeout());
+        assertTrue(attributes.getEntryTimeToLive().getAction().isInvalidate());
+        assertEquals(0, attributes.getRegionIdleTimeout().getTimeout());
+        assertTrue(attributes.getRegionIdleTimeout().getAction().isInvalidate());
+        assertEquals(4562, attributes.getRegionTimeToLive().getTimeout());
+        assertTrue(attributes.getRegionTimeToLive().getAction().isDestroy());
+        assertEquals(0, attributes.getAsyncEventQueueIds().size());
+        assertEquals(0, attributes.getGatewaySenderIds().size());
+        assertEquals(0, attributes.getCacheListeners().length);
+      }
+    });
+  }
+
+  private void regionAlterManipulatePlugInsTest() {
+
+    // Start out by putting 3 entries into each of the plug-in sets
+    CommandStringBuilder commandStringBuilder = new CommandStringBuilder(CliStrings.ALTER_REGION);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__REGION, "/" + this.alterRegionName);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__GROUP, "Group1");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__ASYNCEVENTQUEUEID, this.alterAsyncEventQueueId1);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__ASYNCEVENTQUEUEID, this.alterAsyncEventQueueId2);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__ASYNCEVENTQUEUEID, this.alterAsyncEventQueueId3);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__GATEWAYSENDERID, this.alterGatewaySenderId1);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__GATEWAYSENDERID, this.alterGatewaySenderId2);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__GATEWAYSENDERID, this.alterGatewaySenderId3);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__CACHELISTENER, "com.cadrdunit.RegionAlterCacheListenerA");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__CACHELISTENER, "com.cadrdunit.RegionAlterCacheListenerB");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__CACHELISTENER, "com.cadrdunit.RegionAlterCacheListenerC");
+    CommandResult cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    String stringResult = commandResultToString(cmdResult);
+
+    assertEquals(4, countLinesInString(stringResult, false));
+    assertEquals(false, stringResult.contains("ERROR"));
+    assertTrue(
+        stringContainsLine(stringResult, this.alterVm1Name + ".*Region \"/" + this.alterRegionName + "\" altered.*"));
+    assertTrue(
+        stringContainsLine(stringResult, this.alterVm2Name + ".*Region \"/" + this.alterRegionName + "\" altered.*"));
+
+    this.alterVm1.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        RegionAttributes attributes = getCache().getRegion(alterRegionName).getAttributes();
+        assertEquals(3, attributes.getAsyncEventQueueIds().size());
+        assertTrue(attributes.getAsyncEventQueueIds().contains(alterAsyncEventQueueId1));
+        assertTrue(attributes.getAsyncEventQueueIds().contains(alterAsyncEventQueueId2));
+        assertTrue(attributes.getAsyncEventQueueIds().contains(alterAsyncEventQueueId3));
+        assertEquals(3, attributes.getGatewaySenderIds().size());
+        assertTrue(attributes.getGatewaySenderIds().contains(alterGatewaySenderId1));
+        assertTrue(attributes.getGatewaySenderIds().contains(alterGatewaySenderId2));
+        assertTrue(attributes.getGatewaySenderIds().contains(alterGatewaySenderId3));
+        assertEquals(3, attributes.getCacheListeners().length);
+        assertEquals("com.cadrdunit.RegionAlterCacheListenerA", attributes.getCacheListeners()[0].getClass().getName());
+        assertEquals("com.cadrdunit.RegionAlterCacheListenerB", attributes.getCacheListeners()[1].getClass().getName());
+        assertEquals("com.cadrdunit.RegionAlterCacheListenerC", attributes.getCacheListeners()[2].getClass().getName());
+      }
+    });
+
+    // Now take 1 entry out of each of the sets
+    commandStringBuilder = new CommandStringBuilder(CliStrings.ALTER_REGION);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__REGION, "/" + this.alterRegionName);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__GROUP, "Group1");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__ASYNCEVENTQUEUEID, this.alterAsyncEventQueueId1);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__ASYNCEVENTQUEUEID, this.alterAsyncEventQueueId2);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__GATEWAYSENDERID, this.alterGatewaySenderId1);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__GATEWAYSENDERID, this.alterGatewaySenderId3);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__CACHELISTENER, "com.cadrdunit.RegionAlterCacheListenerB");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__CACHELISTENER, "com.cadrdunit.RegionAlterCacheListenerC");
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(4, countLinesInString(stringResult, false));
+    assertEquals(false, stringResult.contains("ERROR"));
+    assertTrue(
+        stringContainsLine(stringResult, this.alterVm1Name + ".*Region \"/" + this.alterRegionName + "\" altered.*"));
+    assertTrue(
+        stringContainsLine(stringResult, this.alterVm2Name + ".*Region \"/" + this.alterRegionName + "\" altered.*"));
+
+    this.alterVm2.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        RegionAttributes attributes = getCache().getRegion(alterRegionName).getAttributes();
+        assertEquals(2, attributes.getAsyncEventQueueIds().size());
+        Iterator iterator = attributes.getAsyncEventQueueIds().iterator();
+        assertEquals(alterAsyncEventQueueId1, iterator.next());
+        assertEquals(alterAsyncEventQueueId2, iterator.next());
+        assertEquals(2, attributes.getGatewaySenderIds().size());
+        iterator = attributes.getGatewaySenderIds().iterator();
+        assertEquals(alterGatewaySenderId1, iterator.next());
+        assertEquals(alterGatewaySenderId3, iterator.next());
+        assertEquals(2, attributes.getCacheListeners().length);
+        assertEquals("com.cadrdunit.RegionAlterCacheListenerB", attributes.getCacheListeners()[0].getClass().getName());
+        assertEquals("com.cadrdunit.RegionAlterCacheListenerC", attributes.getCacheListeners()[1].getClass().getName());
+      }
+    });
+
+    // Add 1 back to each of the sets
+    commandStringBuilder = new CommandStringBuilder(CliStrings.ALTER_REGION);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__REGION, "/" + this.alterRegionName);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__GROUP, "Group1");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__ASYNCEVENTQUEUEID, this.alterAsyncEventQueueId1);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__ASYNCEVENTQUEUEID, this.alterAsyncEventQueueId2);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__ASYNCEVENTQUEUEID, this.alterAsyncEventQueueId3);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__GATEWAYSENDERID, this.alterGatewaySenderId1);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__GATEWAYSENDERID, this.alterGatewaySenderId3);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__GATEWAYSENDERID, this.alterGatewaySenderId2);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__CACHELISTENER, "com.cadrdunit.RegionAlterCacheListenerB");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__CACHELISTENER, "com.cadrdunit.RegionAlterCacheListenerC");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__CACHELISTENER, "com.cadrdunit.RegionAlterCacheListenerA");
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(4, countLinesInString(stringResult, false));
+    assertEquals(false, stringResult.contains("ERROR"));
+    assertTrue(
+        stringContainsLine(stringResult, this.alterVm1Name + ".*Region \"/" + this.alterRegionName + "\" altered.*"));
+    assertTrue(
+        stringContainsLine(stringResult, this.alterVm2Name + ".*Region \"/" + this.alterRegionName + "\" altered.*"));
+
+    this.alterVm1.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        RegionAttributes attributes = getCache().getRegion(alterRegionName).getAttributes();
+        assertEquals(3, attributes.getAsyncEventQueueIds().size());
+        assertTrue(attributes.getAsyncEventQueueIds().contains(alterAsyncEventQueueId1));
+        assertTrue(attributes.getAsyncEventQueueIds().contains(alterAsyncEventQueueId2));
+        assertTrue(attributes.getAsyncEventQueueIds().contains(alterAsyncEventQueueId3));
+        assertEquals(3, attributes.getGatewaySenderIds().size());
+        assertTrue(attributes.getGatewaySenderIds().contains(alterGatewaySenderId1));
+        assertTrue(attributes.getGatewaySenderIds().contains(alterGatewaySenderId3));
+        assertTrue(attributes.getGatewaySenderIds().contains(alterGatewaySenderId2));
+        assertEquals(3, attributes.getCacheListeners().length);
+        assertEquals("com.cadrdunit.RegionAlterCacheListenerB", attributes.getCacheListeners()[0].getClass().getName());
+        assertEquals("com.cadrdunit.RegionAlterCacheListenerC", attributes.getCacheListeners()[1].getClass().getName());
+        assertEquals("com.cadrdunit.RegionAlterCacheListenerA", attributes.getCacheListeners()[2].getClass().getName());
+      }
+    });
+  }
+
+  /**
+   * Asserts that creating, altering and destroying regions correctly updates the shared configuration.
+   */
+  public void testCreateAlterDestroyUpdatesSharedConfig() {
+    disconnectAllFromDS();
+
+    final String regionName = "testRegionSharedConfigRegion";
+    final String groupName = "testRegionSharedConfigGroup";
+
+    // Start the Locator and wait for shared configuration to be available
+    final int locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
+    Host.getHost(0).getVM(3).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+
+        final File locatorLogFile = new File("locator-" + locatorPort + ".log");
+        final Properties locatorProps = new Properties();
+        locatorProps.setProperty(DistributionConfig.NAME_NAME, "Locator");
+        locatorProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        locatorProps.setProperty(DistributionConfig.LOG_LEVEL_NAME, "fine");
+        locatorProps.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "true");
+        try {
+          final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locatorPort, locatorLogFile, null,
+              locatorProps);
+
+          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+            @Override
+            public boolean done() {
+              return locator.isSharedConfigurationRunning();
+            }
+
+            @Override
+            public String description() {
+              return "Waiting for shared configuration to be started";
+            }
+          };
+          DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+        } catch (IOException ioex) {
+          fail("Unable to create a locator with a shared configuration");
+        }
+      }
+    });
+
+    // Start the default manager
+    Properties managerProps = new Properties();
+    managerProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    managerProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+    createDefaultSetup(managerProps);
+
+    // Create a cache in VM 1
+    VM vm = Host.getHost(0).getVM(1);
+    vm.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        localProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, groupName);
+        getSystem(localProps);
+        assertNotNull(getCache());
+      }
+    });
+
+    // Test creating the region
+    CommandStringBuilder commandStringBuilder = new CommandStringBuilder(CliStrings.CREATE_REGION);
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__REGION, regionName);
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__REGIONSHORTCUT, "REPLICATE");
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__STATISTICSENABLED, "true");
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__GROUP, groupName);
+    CommandResult cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    // Make sure that the region has been registered with the Manager MXBean
+    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+          @Override
+          public boolean done() {
+            try {
+              MBeanServer mbeanServer = MBeanJMXAdapter.mbeanServer;
+              String queryExp = MessageFormat.format(ManagementConstants.OBJECTNAME__REGION_MXBEAN,
+                  new Object[]{"/" + regionName, "*"});
+              ObjectName queryExpON = new ObjectName(queryExp);
+              return !(mbeanServer.queryNames(null, queryExpON).isEmpty());
+            } catch (MalformedObjectNameException mone) {
+              getLogWriter().error(mone);
+              fail(mone.getMessage());
+              return false;
+            }
+          }
+
+          @Override
+          public String description() {
+            return "Waiting for the region to be registed with the MXBean";
+          }
+        };
+
+        DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+      }
+    });
+
+    // Make sure the region exists in the shared config
+    Host.getHost(0).getVM(3).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        SharedConfiguration sharedConfig = ((InternalLocator) Locator.getLocator()).getSharedConfiguration();
+        try {
+          assertTrue(sharedConfig.getConfiguration(groupName).getCacheXmlContent().contains(regionName));
+        } catch (Exception e) {
+          fail("Error in cluster configuration service", e);
+        }
+      }
+    });
+
+    //Restart the data vm to make sure the changes are in place
+    vm = Host.getHost(0).getVM(1);
+    vm.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        assertNotNull(cache);
+        cache.close();
+        assertTrue(cache.isClosed());
+
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        localProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, groupName);
+        localProps.setProperty(DistributionConfig.USE_CLUSTER_CONFIGURATION_NAME, "true");
+        getSystem(localProps);
+        cache = getCache();
+        assertNotNull(cache);
+        Region region = cache.getRegion(regionName);
+        assertNotNull(region);
+      }
+    });
+
+
+    // Test altering the region
+    commandStringBuilder = new CommandStringBuilder(CliStrings.ALTER_REGION);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__REGION, regionName);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__GROUP, groupName);
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__ENTRYEXPIRATIONTIMETOLIVE, "45635");
+    commandStringBuilder.addOption(CliStrings.ALTER_REGION__ENTRYEXPIRATIONTTLACTION, "DESTROY");
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    // Make sure the region was altered in the shared config
+    Host.getHost(0).getVM(3).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        SharedConfiguration sharedConfig = ((InternalLocator) Locator.getLocator()).getSharedConfiguration();
+        try {
+          assertTrue(sharedConfig.getConfiguration(groupName).getCacheXmlContent().contains("45635"));
+        } catch (Exception e) {
+          fail("Error in cluster configuration service");
+        }
+      }
+    });
+
+    //Restart the data vm to make sure the changes are in place
+    vm = Host.getHost(0).getVM(1);
+    vm.invoke(new SerializableCallable() {
+      @Override
+      public Object call() {
+        Cache cache = getCache();
+        assertNotNull(cache);
+        cache.close();
+        assertTrue(cache.isClosed());
+
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        localProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, groupName);
+        localProps.setProperty(DistributionConfig.USE_CLUSTER_CONFIGURATION_NAME, "true");
+        getSystem(localProps);
+        cache = getCache();
+        assertNotNull(cache);
+        Region region = cache.getRegion(regionName);
+        assertNotNull(region);
+
+        return null;
+      }
+    });
+
+  }
+
+  public void testDestroyRegionWithSharedConfig() {
+
+    disconnectAllFromDS();
+
+    final String regionName = "testRegionSharedConfigRegion";
+    final String groupName = "testRegionSharedConfigGroup";
+
+    // Start the Locator and wait for shared configuration to be available
+    final int locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
+    Host.getHost(0).getVM(3).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+
+        final File locatorLogFile = new File("locator-" + locatorPort + ".log");
+        final Properties locatorProps = new Properties();
+        locatorProps.setProperty(DistributionConfig.NAME_NAME, "Locator");
+        locatorProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        locatorProps.setProperty(DistributionConfig.LOG_LEVEL_NAME, "fine");
+        locatorProps.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "true");
+        try {
+          final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locatorPort, locatorLogFile, null,
+              locatorProps);
+
+          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+            @Override
+            public boolean done() {
+              return locator.isSharedConfigurationRunning();
+            }
+
+            @Override
+            public String description() {
+              return "Waiting for shared configuration to be started";
+            }
+          };
+          DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+        } catch (IOException ioex) {
+          fail("Unable to create a locator with a shared configuration");
+        }
+      }
+    });
+
+    // Start the default manager
+    Properties managerProps = new Properties();
+    managerProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    managerProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+    createDefaultSetup(managerProps);
+
+    // Create a cache in VM 1
+    VM vm = Host.getHost(0).getVM(1);
+    vm.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        localProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, groupName);
+        getSystem(localProps);
+        assertNotNull(getCache());
+      }
+    });
+
+    // Test creating the region
+    CommandStringBuilder commandStringBuilder = new CommandStringBuilder(CliStrings.CREATE_REGION);
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__REGION, regionName);
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__REGIONSHORTCUT, "REPLICATE");
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__STATISTICSENABLED, "true");
+    commandStringBuilder.addOption(CliStrings.CREATE_REGION__GROUP, groupName);
+    CommandResult cmdResult = executeCommand(commandStringBuilder.toString());
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    // Make sure that the region has been registered with the Manager MXBean
+    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+          @Override
+          public boolean done() {
+            try {
+              MBeanServer mbeanServer = MBeanJMXAdapter.mbeanServer;
+              String queryExp = MessageFormat.format(ManagementConstants.OBJECTNAME__REGION_MXBEAN,
+                  new Object[]{"/" + regionName, "*"});
+              ObjectName queryExpON = new ObjectName(queryExp);
+              return !(mbeanServer.queryNames(null, queryExpON).isEmpty());
+            } catch (MalformedObjectNameException mone) {
+              getLogWriter().error(mone);
+              fail(mone.getMessage());
+              return false;
+            }
+          }
+
+          @Override
+          public String description() {
+            return "Waiting for the region to be registed with the MXBean";
+          }
+        };
+
+        DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+      }
+    });
+
+    // Make sure the region exists in the shared config
+    Host.getHost(0).getVM(3).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        SharedConfiguration sharedConfig = ((InternalLocator) Locator.getLocator()).getSharedConfiguration();
+        try {
+          assertTrue(sharedConfig.getConfiguration(groupName).getCacheXmlContent().contains(regionName));
+        } catch (Exception e) {
+          fail("Error occurred in cluster configuration service");
+        }
+      }
+    });
+
+    // Test destroying the region
+    commandStringBuilder = new CommandStringBuilder(CliStrings.DESTROY_REGION);
+    commandStringBuilder.addOption(CliStrings.DESTROY_REGION__REGION, regionName);
+    cmdResult = executeCommand(commandStringBuilder.toString());
+    getLogWriter().info("#SB" + commandResultToString(cmdResult));
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    // Make sure the region was removed from the shared config
+    Host.getHost(0).getVM(3).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        SharedConfiguration sharedConfig = ((InternalLocator) Locator.getLocator()).getSharedConfiguration();
+        try {
+          assertFalse(sharedConfig.getConfiguration(groupName).getCacheXmlContent().contains(regionName));
+        } catch (Exception e) {
+          fail("Error occurred in cluster configuration service");
+        }
+      }
+    });
+
+
+    //Restart the data vm to make sure the region is not existing any more
+    vm = Host.getHost(0).getVM(1);
+    vm.invoke(new SerializableCallable() {
+      @Override
+      public Object call() {
+        Cache cache = getCache();
+        assertNotNull(cache);
+        cache.close();
+        assertTrue(cache.isClosed());
+
+        Properties localProps = new Properties();
+        localProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        localProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+        localProps.setProperty(DistributionConfig.GROUPS_NAME, groupName);
+        localProps.setProperty(DistributionConfig.USE_CLUSTER_CONFIGURATION_NAME, "true");
+        getSystem(localProps);
+        cache = getCache();
+        assertNotNull(cache);
+        Region region = cache.getRegion(regionName);
+        assertNull(region);
+
+        return null;
+      }
+    });
+  }
+
+  @Override
+  public void tearDown2() throws Exception {
+    for (String path : this.filesToBeDeleted) {
+      try {
+        final File fileToDelete = new File(path);
+        FileUtil.delete(fileToDelete);
+        if (path.endsWith(".jar")) {
+          executeCommand("undeploy --jar=" + fileToDelete.getName());
+        }
+      } catch (IOException e) {
+        getLogWriter().error("Unable to delete file", e);
+      }
+    }
+    this.filesToBeDeleted.clear();
+    super.tearDown2();
+  }
+
+  /**
+   * Deploys JAR files which contain classes to be instantiated by the "alter region" test.
+   */
+  private void deployJarFilesForRegionAlter() throws IOException {
+    ClassBuilder classBuilder = new ClassBuilder();
+    final File jarFile1 = new File(new File(".").getAbsolutePath(), "testAlterRegion1.jar");
+    this.filesToBeDeleted.add(jarFile1.getAbsolutePath());
+    final File jarFile2 = new File(new File(".").getAbsolutePath(), "testAlterRegion2.jar");
+    this.filesToBeDeleted.add(jarFile2.getAbsolutePath());
+    final File jarFile3 = new File(new File(".").getAbsolutePath(), "testAlterRegion3.jar");
+    this.filesToBeDeleted.add(jarFile3.getAbsolutePath());
+    final File jarFile4 = new File(new File(".").getAbsolutePath(), "testAlterRegion4.jar");
+    this.filesToBeDeleted.add(jarFile4.getAbsolutePath());
+    final File jarFile5 = new File(new File(".").getAbsolutePath(), "testAlterRegion5.jar");
+    this.filesToBeDeleted.add(jarFile5.getAbsolutePath());
+
+    byte[] jarBytes = classBuilder.createJarFromClassContent("com/cadrdunit/RegionAlterCacheListenerA",
+        "package com.cadrdunit;" + "import com.gemstone.gemfire.cache.util.CacheListenerAdapter;" + "public class RegionAlterCacheListenerA extends CacheListenerAdapter {}");
+    writeJarBytesToFile(jarFile1, jarBytes);
+    CommandResult cmdResult = executeCommand("deploy --jar=testAlterRegion1.jar");
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    jarBytes = classBuilder.createJarFromClassContent("com/cadrdunit/RegionAlterCacheListenerB",
+        "package com.cadrdunit;" + "import com.gemstone.gemfire.cache.util.CacheListenerAdapter;" + "public class RegionAlterCacheListenerB extends CacheListenerAdapter {}");
+    writeJarBytesToFile(jarFile2, jarBytes);
+    cmdResult = executeCommand("deploy --jar=testAlterRegion2.jar");
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    jarBytes = classBuilder.createJarFromClassContent("com/cadrdunit/RegionAlterCacheListenerC",
+        "package com.cadrdunit;" + "import com.gemstone.gemfire.cache.util.CacheListenerAdapter;" + "public class RegionAlterCacheListenerC extends CacheListenerAdapter {}");
+    writeJarBytesToFile(jarFile3, jarBytes);
+    cmdResult = executeCommand("deploy --jar=testAlterRegion3.jar");
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    jarBytes = classBuilder.createJarFromClassContent("com/cadrdunit/RegionAlterCacheLoader",
+        "package com.cadrdunit;" + "import com.gemstone.gemfire.cache.CacheLoader;" + "import com.gemstone.gemfire.cache.CacheLoaderException;" + "import com.gemstone.gemfire.cache.LoaderHelper;" + "public class RegionAlterCacheLoader implements CacheLoader {" + "public void close() {}" + "public Object load(LoaderHelper helper) throws CacheLoaderException {return null;}}");
+    writeJarBytesToFile(jarFile4, jarBytes);
+    cmdResult = executeCommand("deploy --jar=testAlterRegion4.jar");
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    jarBytes = classBuilder.createJarFromClassContent("com/cadrdunit/RegionAlterCacheWriter",
+        "package com.cadrdunit;" + "import com.gemstone.gemfire.cache.util.CacheWriterAdapter;" + "public class RegionAlterCacheWriter extends CacheWriterAdapter {}");
+    writeJarBytesToFile(jarFile5, jarBytes);
+    cmdResult = executeCommand("deploy --jar=testAlterRegion5.jar");
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+  }
+
+  private void writeJarBytesToFile(File jarFile, byte[] jarBytes) throws IOException {
+    final OutputStream outStream = new FileOutputStream(jarFile);
+    outStream.write(jarBytes);
+    outStream.flush();
+    outStream.close();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eddef322/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DeployCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DeployCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DeployCommandsDUnitTest.java
new file mode 100644
index 0000000..eeb9896
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DeployCommandsDUnitTest.java
@@ -0,0 +1,480 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.management.internal.cli.commands;
+
+import com.gemstone.gemfire.distributed.Locator;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.distributed.internal.DistributionManager;
+import com.gemstone.gemfire.distributed.internal.InternalLocator;
+import com.gemstone.gemfire.distributed.internal.SharedConfiguration;
+import com.gemstone.gemfire.internal.AvailablePort;
+import com.gemstone.gemfire.internal.ClassBuilder;
+import com.gemstone.gemfire.internal.JarDeployer;
+import com.gemstone.gemfire.management.cli.Result;
+import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
+import com.gemstone.gemfire.management.internal.cli.remote.CommandExecutionContext;
+import com.gemstone.gemfire.management.internal.cli.remote.CommandProcessor;
+import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
+import dunit.DistributedTestCase;
+import dunit.Host;
+import dunit.SerializableRunnable;
+import dunit.VM;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.util.Properties;
+import java.util.regex.Pattern;
+
+/**
+ * Unit tests for the DeployCommands class
+ *
+ * @author David Hoots
+ * @since 7.0
+ */
+public class DeployCommandsDUnitTest extends CliCommandTestBase {
+  private static final long serialVersionUID = 1L;
+
+  File newDeployableJarFile = new File("DeployCommandsDUnit1.jar");
+
+  transient private ClassBuilder classBuilder = new ClassBuilder();
+  transient private CommandProcessor commandProcessor;
+
+  public DeployCommandsDUnitTest(String name) {
+    super(name);
+  }
+
+  @SuppressWarnings("serial")
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+
+    this.commandProcessor = new CommandProcessor();
+    assertFalse(this.commandProcessor.isStopped());
+
+    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
+      public void run() {
+        deleteSavedJarFiles();
+      }
+    });
+    deleteSavedJarFiles();
+  }
+
+  @SuppressWarnings("serial")
+  @Override
+  public void tearDown2() throws Exception {
+    Host.getHost(0).getVM(1).invoke(new SerializableRunnable() {
+      public void run() {
+        DistributionManager.isDedicatedAdminVM = false;
+      }
+    });
+
+    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
+      public void run() {
+        deleteSavedJarFiles();
+      }
+    });
+    deleteSavedJarFiles();
+
+    super.tearDown2();
+  }
+
+  @SuppressWarnings("serial")
+  public void testDeploy() throws IOException {
+    final Properties props = new Properties();
+    final Host host = Host.getHost(0);
+    final VM vm = host.getVM(0);
+    final String vmName = "VM" + vm.getPid();
+
+    // Create the cache in this VM
+    props.setProperty(DistributionConfig.NAME_NAME, "Controller");
+    props.setProperty(DistributionConfig.GROUPS_NAME, "Group1");
+    getSystem(props);
+    getCache();
+
+    // Create the cache in the other VM
+    vm.invoke(new SerializableRunnable() {
+      public void run() {
+        props.setProperty(DistributionConfig.NAME_NAME, vmName);
+        props.setProperty(DistributionConfig.GROUPS_NAME, "Group2");
+        getSystem(props);
+        getCache();
+      }
+    });
+
+    DeployCommands deployCommands = new DeployCommands();
+
+    // Single JAR all members
+    CommandExecutionContext.setBytesFromShell(
+        new byte[][]{"DeployCommandsDUnit1.jar".getBytes(), this.classBuilder.createJarFromName(
+            "DeployCommandsDUnitA")});
+    Result result = deployCommands.deploy(null, "DeployCommandsDUnit1.jar", null);
+
+    assertEquals(true, result.hasNextLine());
+
+    String resultString = result.nextLine();
+    assertEquals(false, resultString.contains("ERROR"));
+    assertEquals(1, countMatchesInString(resultString, "Controller"));
+    assertEquals(1, countMatchesInString(resultString, vmName));
+    assertEquals(4, countMatchesInString(resultString, "DeployCommandsDUnit1.jar"));
+
+    // Single JAR with group
+    CommandExecutionContext.setBytesFromShell(
+        new byte[][]{"DeployCommandsDUnit2.jar".getBytes(), this.classBuilder.createJarFromName(
+            "DeployCommandsDUnitB")});
+    result = deployCommands.deploy(new String[]{"Group2"}, "DeployCommandsDUnit2.jar", null);
+
+    assertEquals(true, result.hasNextLine());
+
+    resultString = result.nextLine();
+    assertEquals(false, resultString.contains("ERROR"));
+    assertEquals(false, resultString.contains("Controller"));
+    assertEquals(1, countMatchesInString(resultString, vmName));
+    assertEquals(2, countMatchesInString(resultString, "DeployCommandsDUnit2.jar"));
+
+    // Multiple JARs to all members
+    CommandExecutionContext.setBytesFromShell(
+        new byte[][]{"DeployCommandsDUnit3.jar".getBytes(), this.classBuilder.createJarFromName(
+            "DeployCommandsDUnitC"), "DeployCommandsDUnit4.jar".getBytes(), this.classBuilder.createJarFromName(
+            "DeployCommandsDUnitD")});
+    result = deployCommands.deploy(null, null, "AnyDirectory");
+
+    assertEquals(true, result.hasNextLine());
+
+    resultString = result.nextLine();
+    assertEquals(false, resultString.contains("ERROR"));
+    assertEquals(2, countMatchesInString(resultString, "Controller"));
+    assertEquals(2, countMatchesInString(resultString, vmName));
+    assertEquals(4, countMatchesInString(resultString, "DeployCommandsDUnit3.jar"));
+    assertEquals(4, countMatchesInString(resultString, "DeployCommandsDUnit4.jar"));
+
+    // Multiple JARs to a group
+    CommandExecutionContext.setBytesFromShell(
+        new byte[][]{"DeployCommandsDUnit5.jar".getBytes(), this.classBuilder.createJarFromName(
+            "DeployCommandsDUnitE"), "DeployCommandsDUnit6.jar".getBytes(), this.classBuilder.createJarFromName(
+            "DeployCommandsDUnitF")});
+    result = deployCommands.deploy(new String[]{"Group1"}, null, "AnyDirectory");
+
+    assertEquals(true, result.hasNextLine());
+
+    resultString = result.nextLine();
+    assertEquals(false, resultString.contains("ERROR"));
+    assertEquals(2, countMatchesInString(resultString, "Controller"));
+    assertEquals(false, resultString.contains(vmName));
+    assertEquals(2, countMatchesInString(resultString, "DeployCommandsDUnit5.jar"));
+    assertEquals(2, countMatchesInString(resultString, "DeployCommandsDUnit6.jar"));
+  }
+
+  @SuppressWarnings("serial")
+  public void testUndeploy() throws IOException {
+    final Properties props = new Properties();
+    final Host host = Host.getHost(0);
+    final VM vm = host.getVM(0);
+    final String vmName = "VM" + vm.getPid();
+
+    // Create the cache in this VM
+    props.setProperty(DistributionConfig.NAME_NAME, "Controller");
+    props.setProperty(DistributionConfig.GROUPS_NAME, "Group1");
+    getSystem(props);
+    getCache();
+
+    // Create the cache in the other VM
+    vm.invoke(new SerializableRunnable() {
+      public void run() {
+        props.setProperty(DistributionConfig.NAME_NAME, vmName);
+        props.setProperty(DistributionConfig.GROUPS_NAME, "Group2");
+        getSystem(props);
+        getCache();
+      }
+    });
+
+    DeployCommands deployCommands = new DeployCommands();
+
+    // Deploy a couple of JAR files which can be undeployed
+    CommandExecutionContext.setBytesFromShell(
+        new byte[][]{"DeployCommandsDUnit1.jar".getBytes(), this.classBuilder.createJarFromName(
+            "DeployCommandsDUnitA")});
+    deployCommands.deploy(new String[]{"Group1"}, "DeployCommandsDUnit1.jar", null);
+    CommandExecutionContext.setBytesFromShell(
+        new byte[][]{"DeployCommandsDUnit2.jar".getBytes(), this.classBuilder.createJarFromName(
+            "DeployCommandsDUnitB")});
+    deployCommands.deploy(new String[]{"Group2"}, "DeployCommandsDUnit2.jar", null);
+    CommandExecutionContext.setBytesFromShell(
+        new byte[][]{"DeployCommandsDUnit3.jar".getBytes(), this.classBuilder.createJarFromName(
+            "DeployCommandsDUnitC")});
+    deployCommands.deploy(null, "DeployCommandsDUnit3.jar", null);
+    CommandExecutionContext.setBytesFromShell(
+        new byte[][]{"DeployCommandsDUnit4.jar".getBytes(), this.classBuilder.createJarFromName(
+            "DeployCommandsDUnitD")});
+    deployCommands.deploy(null, "DeployCommandsDUnit4.jar", null);
+    CommandExecutionContext.setBytesFromShell(
+        new byte[][]{"DeployCommandsDUnit5.jar".getBytes(), this.classBuilder.createJarFromName(
+            "DeployCommandsDUnitE")});
+    deployCommands.deploy(null, "DeployCommandsDUnit5.jar", null);
+
+    // Undeploy for 1 group
+    Result result = deployCommands.undeploy(new String[]{"Group1"}, "DeployCommandsDUnit1.jar");
+    assertEquals(true, result.hasNextLine());
+    String resultString = result.nextLine();
+    assertEquals(false, resultString.contains("ERROR"));
+    assertEquals(1, countMatchesInString(resultString, "Controller"));
+    assertEquals(false, resultString.contains(vmName));
+    assertEquals(2, countMatchesInString(resultString, "DeployCommandsDUnit1.jar"));
+
+    // Multiple Undeploy for all members
+    result = deployCommands.undeploy(null, "DeployCommandsDUnit2.jar, DeployCommandsDUnit3.jar");
+    assertEquals(true, result.hasNextLine());
+    resultString = result.nextLine();
+    assertEquals(false, resultString.contains("ERROR"));
+    assertEquals(2, countMatchesInString(resultString, "Controller"));
+    assertEquals(2, countMatchesInString(resultString, vmName));
+    assertEquals(3, countMatchesInString(resultString, "DeployCommandsDUnit2.jar"));
+    assertEquals(4, countMatchesInString(resultString, "DeployCommandsDUnit3.jar"));
+
+    // Undeploy all (no JAR specified)
+    result = deployCommands.undeploy(null, null);
+    assertEquals(true, result.hasNextLine());
+    resultString = result.nextLine();
+    assertEquals(false, resultString.contains("ERROR"));
+    assertEquals(2, countMatchesInString(resultString, "Controller"));
+    assertEquals(2, countMatchesInString(resultString, vmName));
+    assertEquals(4, countMatchesInString(resultString, "DeployCommandsDUnit4.jar"));
+    assertEquals(4, countMatchesInString(resultString, "DeployCommandsDUnit5.jar"));
+  }
+
+  @SuppressWarnings("serial")
+  public void testListDeployed() throws IOException {
+    final Properties props = new Properties();
+    final Host host = Host.getHost(0);
+    final VM vm = host.getVM(0);
+    final String vmName = "VM" + vm.getPid();
+
+    // Create the cache in this VM
+    props.setProperty(DistributionConfig.NAME_NAME, "Controller");
+    props.setProperty(DistributionConfig.GROUPS_NAME, "Group1");
+    getSystem(props);
+    getCache();
+
+    // Create the cache in the other VM
+    vm.invoke(new SerializableRunnable() {
+      public void run() {
+        props.setProperty(DistributionConfig.NAME_NAME, vmName);
+        props.setProperty(DistributionConfig.GROUPS_NAME, "Group2");
+        getSystem(props);
+        getCache();
+      }
+    });
+
+    DeployCommands deployCommands = new DeployCommands();
+
+    // Deploy a couple of JAR files which can be listed
+    CommandExecutionContext.setBytesFromShell(
+        new byte[][]{"DeployCommandsDUnit1.jar".getBytes(), this.classBuilder.createJarFromName(
+            "DeployCommandsDUnitA")});
+    deployCommands.deploy(new String[]{"Group1"}, "DeployCommandsDUnit1.jar", null);
+    CommandExecutionContext.setBytesFromShell(
+        new byte[][]{"DeployCommandsDUnit2.jar".getBytes(), this.classBuilder.createJarFromName(
+            "DeployCommandsDUnitB")});
+    deployCommands.deploy(new String[]{"Group2"}, "DeployCommandsDUnit2.jar", null);
+
+    // List for all members
+    Result result = deployCommands.listDeployed(null);
+    assertEquals(true, result.hasNextLine());
+    String resultString = result.nextLine();
+    assertEquals(false, resultString.contains("ERROR"));
+    assertEquals(1, countMatchesInString(resultString, "Controller"));
+    assertEquals(1, countMatchesInString(resultString, vmName));
+    assertEquals(2, countMatchesInString(resultString, "DeployCommandsDUnit1.jar"));
+    assertEquals(2, countMatchesInString(resultString, "DeployCommandsDUnit2.jar"));
+
+    // List for members in Group1
+    result = deployCommands.listDeployed("Group1");
+    assertEquals(true, result.hasNextLine());
+    resultString = result.nextLine();
+    assertEquals(false, resultString.contains("ERROR"));
+    assertEquals(1, countMatchesInString(resultString, "Controller"));
+    assertEquals(false, resultString.contains(vmName));
+    assertEquals(2, countMatchesInString(resultString, "DeployCommandsDUnit1.jar"));
+    assertEquals(false, resultString.contains("DeployCommandsDUnit2.jar"));
+
+    // List for members in Group2
+    result = deployCommands.listDeployed("Group2");
+    assertEquals(true, result.hasNextLine());
+    resultString = result.nextLine();
+    assertEquals(false, resultString.contains("ERROR"));
+    assertEquals(false, resultString.contains("Controller"));
+    assertEquals(1, countMatchesInString(resultString, vmName));
+    assertEquals(false, resultString.contains("DeployCommandsDUnit1.jar"));
+    assertEquals(2, countMatchesInString(resultString, "DeployCommandsDUnit2.jar"));
+  }
+
+  /**
+   * Does an end-to-end test using the complete CLI framework while ensuring that the shared configuration is updated.
+   */
+  public void testEndToEnd() throws IOException {
+    disconnectAllFromDS();
+
+    final String groupName = "testDeployEndToEndGroup";
+
+    // Start the Locator and wait for shared configuration to be available
+    final int locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
+    Host.getHost(0).getVM(3).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+
+        final File locatorLogFile = new File("locator-" + locatorPort + ".log");
+        final Properties locatorProps = new Properties();
+        locatorProps.setProperty(DistributionConfig.NAME_NAME, "Locator");
+        locatorProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+        locatorProps.setProperty(DistributionConfig.LOG_LEVEL_NAME, "fine");
+        locatorProps.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "true");
+        try {
+          final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locatorPort, locatorLogFile, null,
+              locatorProps);
+
+          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+            @Override
+            public boolean done() {
+              return locator.isSharedConfigurationRunning();
+            }
+
+            @Override
+            public String description() {
+              return "Waiting for shared configuration to be started";
+            }
+          };
+          DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+        } catch (IOException ioex) {
+          fail("Unable to create a locator with a shared configuration");
+        }
+      }
+    });
+
+    // Start the default manager
+    Properties managerProps = new Properties();
+    managerProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
+    managerProps.setProperty(DistributionConfig.GROUPS_NAME, groupName);
+    managerProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost:" + locatorPort);
+    createDefaultSetup(managerProps);
+
+    // Create a JAR file
+    this.classBuilder.writeJarFromName("DeployCommandsDUnitA", this.newDeployableJarFile);
+
+    // Deploy the JAR
+    CommandResult cmdResult = executeCommand("deploy --jar=DeployCommandsDUnit1.jar");
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    String stringResult = commandResultToString(cmdResult);
+    assertEquals(3, countLinesInString(stringResult, false));
+    assertTrue(stringContainsLine(stringResult, "Member.*JAR.*JAR Location"));
+    assertTrue(stringContainsLine(stringResult,
+        "Manager.*DeployCommandsDUnit1.jar.*" + JarDeployer.JAR_PREFIX + "DeployCommandsDUnit1.jar#1"));
+
+    // Undeploy the JAR
+    cmdResult = executeCommand("undeploy --jar=DeployCommandsDUnit1.jar");
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(3, countLinesInString(stringResult, false));
+    assertTrue(stringContainsLine(stringResult, "Member.*JAR.*Un-Deployed From JAR Location"));
+    assertTrue(stringContainsLine(stringResult,
+        "Manager.*DeployCommandsDUnit1.jar.*" + JarDeployer.JAR_PREFIX + "DeployCommandsDUnit1.jar#1"));
+
+    // Deploy the JAR to a group
+    cmdResult = executeCommand("deploy --jar=DeployCommandsDUnit1.jar --group=" + groupName);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(3, countLinesInString(stringResult, false));
+    assertTrue(stringContainsLine(stringResult, "Member.*JAR.*JAR Location"));
+    assertTrue(stringContainsLine(stringResult,
+        "Manager.*DeployCommandsDUnit1.jar.*" + JarDeployer.JAR_PREFIX + "DeployCommandsDUnit1.jar#1"));
+
+    // Make sure the deployed jar in the shared config
+    Host.getHost(0).getVM(3).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        SharedConfiguration sharedConfig = ((InternalLocator) Locator.getLocator()).getSharedConfiguration();
+        try {
+          assertTrue(sharedConfig.getConfiguration(groupName).getJarNames().contains("DeployCommandsDUnit1.jar"));
+        } catch (Exception e) {
+          fail("Error occurred in cluster configuration service", e);
+        }
+      }
+    });
+
+    // List deployed for group
+    cmdResult = executeCommand("list deployed --group=" + groupName);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(3, countLinesInString(stringResult, false));
+    assertTrue(stringContainsLine(stringResult, "Member.*JAR.*JAR Location"));
+    assertTrue(stringContainsLine(stringResult,
+        "Manager.*DeployCommandsDUnit1.jar.*" + JarDeployer.JAR_PREFIX + "DeployCommandsDUnit1.jar#1"));
+
+    // Undeploy for group
+    cmdResult = executeCommand("undeploy --group=" + groupName);
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+
+    stringResult = commandResultToString(cmdResult);
+    assertEquals(3, countLinesInString(stringResult, false));
+    assertTrue(stringContainsLine(stringResult, "Member.*JAR.*Un-Deployed From JAR Location"));
+    assertTrue(stringContainsLine(stringResult,
+        "Manager.*DeployCommandsDUnit1.jar.*" + JarDeployer.JAR_PREFIX + "DeployCommandsDUnit1.jar#1"));
+
+    // Make sure the deployed jar was removed from the shared config
+    Host.getHost(0).getVM(3).invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        SharedConfiguration sharedConfig = ((InternalLocator) Locator.getLocator()).getSharedConfiguration();
+        try {
+          assertFalse(sharedConfig.getConfiguration(groupName).getJarNames().contains("DeployCommandsDUnit1.jar"));
+        } catch (Exception e) {
+          fail("Error occurred in cluster configuration service", e);
+        }
+      }
+    });
+
+    // List deployed with nothing deployed
+    cmdResult = executeCommand("list deployed");
+    assertEquals(Result.Status.OK, cmdResult.getStatus());
+    assertTrue(commandResultToString(cmdResult).contains(CliStrings.LIST_DEPLOYED__NO_JARS_FOUND_MESSAGE));
+  }
+
+  final Pattern pattern = Pattern.compile("^" + JarDeployer.JAR_PREFIX + "DeployCommandsDUnit.*#\\d++$");
+
+  void deleteSavedJarFiles() {
+    this.newDeployableJarFile.delete();
+
+    File dirFile = new File(".");
+    // Find all deployed JAR files
+    File[] oldJarFiles = dirFile.listFiles(new FilenameFilter() {
+      @Override
+      public boolean accept(final File file, final String name) {
+        return DeployCommandsDUnitTest.this.pattern.matcher(name).matches();
+      }
+    });
+
+    // Now delete them
+    if (oldJarFiles != null) {
+      for (File oldJarFile : oldJarFiles) {
+        oldJarFile.delete();
+      }
+    }
+  }
+}


[07/50] [abbrv] incubator-geode git commit: new unit tests and code clean-up

Posted by kl...@apache.org.
new unit tests and code clean-up

New unit tests to increase code coverage.  I also tinkered with
trying to get EclEmma output from DUnit ChildVMs but it seems to
be run in a shutdownHook in parallel with the DUnitLauncher
shutdownHook.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/bd43c341
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/bd43c341
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/bd43c341

Branch: refs/heads/feature/GEODE-291
Commit: bd43c341e8483df7fff1caabee666791e75f0dd4
Parents: 8f9b321
Author: Bruce Schuchardt <bs...@pivotal.io>
Authored: Mon Dec 7 13:30:00 2015 -0800
Committer: Bruce Schuchardt <bs...@pivotal.io>
Committed: Mon Dec 7 13:30:00 2015 -0800

----------------------------------------------------------------------
 .../internal/DistributionMessage.java           |   2 +-
 .../internal/membership/MemberAttributes.java   | 131 +----
 .../membership/gms/membership/GMSJoinLeave.java |  44 +-
 .../gms/messages/HeartbeatMessage.java          |   2 +-
 .../gms/messages/HeartbeatRequestMessage.java   |   2 +-
 .../gms/messages/InstallViewMessage.java        |   2 +-
 .../gms/messages/JoinResponseMessage.java       |  10 +-
 .../membership/gms/messages/ViewAckMessage.java |   2 +-
 .../gms/messenger/AddressManager.java           |  21 +-
 .../membership/gms/messenger/GMSPingPonger.java |  22 +-
 .../membership/gms/messenger/JGAddress.java     |  23 +-
 .../gms/messenger/JGroupsMessenger.java         | 385 +++++++--------
 .../membership/gms/messenger/Transport.java     |   2 +-
 .../internal/tcpserver/TcpServer.java           |   2 +-
 .../internal/i18n/ParentLocalizedStrings.java   |   4 +-
 .../gemfire/cache30/ReconnectDUnitTest.java     |   2 +-
 .../internal/DistributionManagerDUnitTest.java  |   1 +
 .../membership/MembershipJUnitTest.java         | 116 ++++-
 .../membership/gms/MembershipManagerHelper.java |   1 +
 .../messenger/JGroupsMessengerJUnitTest.java    | 481 ++++++++++++++++---
 .../src/test/java/dunit/RemoteDUnitVMIF.java    |   2 +
 .../src/test/java/dunit/standalone/ChildVM.java |  11 +-
 .../java/dunit/standalone/DUnitLauncher.java    |  24 +
 .../java/dunit/standalone/ProcessManager.java   |  14 +-
 .../java/dunit/standalone/RemoteDUnitVM.java    |   7 +-
 25 files changed, 821 insertions(+), 492 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionMessage.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionMessage.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionMessage.java
index 23f9dee..80ae4c0 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionMessage.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionMessage.java
@@ -174,7 +174,7 @@ public abstract class DistributionMessage
     }
   } 
   
-  public final boolean isDirectAck() {
+  public boolean isDirectAck() {
     return acker != null;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/MemberAttributes.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/MemberAttributes.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/MemberAttributes.java
index 7cd89d7..2d4d980 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/MemberAttributes.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/MemberAttributes.java
@@ -16,26 +16,20 @@
  */
 package com.gemstone.gemfire.distributed.internal.membership;
 
-import com.gemstone.gemfire.DataSerializable;
-import com.gemstone.gemfire.DataSerializer;
-import com.gemstone.gemfire.InternalGemFireError;
-import com.gemstone.gemfire.distributed.DurableClientAttributes;
-import com.gemstone.gemfire.internal.HeapDataOutputStream;
-import com.gemstone.gemfire.internal.Version;
-import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.StringTokenizer;
 
-import java.io.*;
-import java.util.*;
+import com.gemstone.gemfire.distributed.DurableClientAttributes;
 
 /**
- * The attributes of a distributed member. These attributes are stored as
- * the AdditionalBytes in JGroups' IpAddress.
+ * The attributes of a distributed member.  This is largely deprecated as
+ * GMSMember holds all of this information.
  *
  * @author Kirk Lund
  * @since 5.0
  */
-public class MemberAttributes implements DataSerializable {
-  private static final long serialVersionUID = -3257772958884802693L;
+public class MemberAttributes {
   
   public static final MemberAttributes INVALID = new MemberAttributes(-1, -1, -1, -1, null, null, null);
   
@@ -68,23 +62,6 @@ public class MemberAttributes implements DataSerializable {
     this.durableClientAttributes = durableClientAttributes;
   }
   
-  /** Constructs new MemberAttributes from DataInput.  */
-  public MemberAttributes(byte[] b) throws IOException, ClassNotFoundException {
-    this.byteInfo = b;
-    DataInputStream in = 
-      new DataInputStream(new ByteArrayInputStream(b));
-    fromData(in);
-  }
-  
-  public MemberAttributes(MemberAttributes other) {
-    this.dcPort = other.dcPort;
-    this.vmPid = other.vmPid;
-    this.vmKind = other.vmKind;
-    this.name = other.name;
-    this.groups = other.groups;
-    this.durableClientAttributes = other.durableClientAttributes;
-  }
-  
   /** Returns direct channel port. */
   public int getPort() {
     return this.dcPort;
@@ -115,22 +92,6 @@ public class MemberAttributes implements DataSerializable {
     return this.durableClientAttributes;
   }
 
-  /** Parses comma-separated-values into array of groups (strings). */
-  public static String[] parseGroups(String csv) {
-    if (csv == null || csv.length() == 0) {
-      return new String[0];
-    }
-    List groups = new ArrayList();
-    StringTokenizer st = new StringTokenizer(csv, ",");
-    while (st.hasMoreTokens()) {
-      String groupName = st.nextToken().trim();
-      // TODO make case insensitive
-      if (!groups.contains(groupName)) { // only add each group once
-        groups.add(groupName);
-      }
-    }
-    return (String[]) groups.toArray(new String[groups.size()]);
-  }
   /** Parses comma-separated-roles/groups into array of groups (strings). */
   public static String[] parseGroups(String csvRoles, String csvGroups) {
     List<String> groups = new ArrayList<String>();
@@ -138,6 +99,8 @@ public class MemberAttributes implements DataSerializable {
     parseCsv(groups, csvGroups);
     return (String[]) groups.toArray(new String[groups.size()]);
   }
+  
+  
   private static void parseCsv(List<String> groups, String csv) {
     if (csv == null || csv.length() == 0) {
       return;
@@ -151,82 +114,6 @@ public class MemberAttributes implements DataSerializable {
     }
   }
   
-  /** Writes the contents of this object to the given output. */
-  public void toData(DataOutput out) throws IOException {
-    out.writeInt(this.dcPort);
-    out.writeInt(this.vmPid);
-    out.writeInt(this.vmKind);
-    DataSerializer.writeString(this.name, out);
-    DataSerializer.writeStringArray(this.groups, out);
-    DataSerializer.writeString(this.durableClientAttributes==null ? "" : this.durableClientAttributes.getId(), out);
-    DataSerializer.writeInteger(Integer.valueOf(this.durableClientAttributes==null ? 300 : this.durableClientAttributes.getTimeout()), out);
-  }
-
-  /** Reads the contents of this object from the given input. */
-  public void fromData(DataInput in)
-  throws IOException, ClassNotFoundException {
-    this.dcPort = in.readInt();
-    this.vmPid = in.readInt();
-    this.vmKind = in.readInt();
-    this.name = DataSerializer.readString(in);
-    this.groups = DataSerializer.readStringArray(in);
-    String durableId = DataSerializer.readString(in);
-    int durableTimeout = DataSerializer.readInteger(in).intValue();
-    this.durableClientAttributes = new DurableClientAttributes(durableId, durableTimeout);
-  }
-  
-  private byte[] byteInfo;
-  
-  /** Returns the contents of this objects serialized as a byte array. */
-  public byte[] toByteArray() {
-    if (byteInfo != null) {
-      return byteInfo;
-    }
-    try {
-      HeapDataOutputStream hdos = new HeapDataOutputStream(Version.CURRENT);
-      toData(hdos);
-      byteInfo = hdos.toByteArray();
-      return byteInfo;
-    }
-    catch (IOException e) {
-      throw new InternalGemFireError(LocalizedStrings.MemberAttributes_IOEXCEPTION_ON_A_BYTE_ARRAY_0.toLocalizedString(e));
-    }
-  }
-  
-  public static MemberAttributes fromByteArray(byte[] bytes) {
-    try {
-      return new MemberAttributes(bytes);
-    }
-    catch (IOException e) {
-      throw new InternalGemFireError(LocalizedStrings.MemberAttributes_IOEXCEPTION_ON_A_BYTE_ARRAY_0.toLocalizedString(e));
-    }
-    catch (ClassNotFoundException e) {
-      throw new InternalGemFireError(LocalizedStrings.MemberAttributes_CLASSNOTFOUNDEXCEPTION_IN_DESERIALIZATION_0.toLocalizedString(e));
-    }
-  }
-
-	/**
-	 * Returns a string representation of the object.
-	 * 
-	 * @return a string representation of the object
-	 */
-  @Override
-	public String toString() {
-		final StringBuffer sb = new StringBuffer("[MemberAttributes: ");
-		sb.append("dcPort=").append(this.dcPort);
-		sb.append(", vmPid=").append(this.vmPid);
-		sb.append(", vmKind=").append(this.vmKind);
-		sb.append(", name=").append(this.name);
-		sb.append(", groups=").append("(");
-    for (int i = 0; i < groups.length; i++) {
-      sb.append(groups[i]);
-    }
-    sb.append(")");
-    sb.append(", durableClientAttributes=").append(this.durableClientAttributes);
-    sb.append("]");
-		return sb.toString();
-	}
-
   /**
    * @return the membership view number in which this member was born
    */

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeave.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeave.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeave.java
index 84a0bd7..3e767ae 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeave.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/membership/GMSJoinLeave.java
@@ -230,7 +230,7 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
 
       SearchState state = searchState;
       
-      long locatorWaitTime = services.getConfig().getLocatorWaitTime() * 1000;
+      long locatorWaitTime = ((long)services.getConfig().getLocatorWaitTime()) * 1000L;
       long timeout = services.getConfig().getJoinTimeout();
       logger.debug("join timeout is set to {}", timeout);
       long retrySleep =  JOIN_RETRY_SLEEP;
@@ -560,7 +560,7 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
     logger.debug("JoinLeave is recording the request to be processed in the next membership view");
     synchronized (viewRequests) {
       viewRequests.add(request);
-      viewRequests.notify();
+      viewRequests.notifyAll();
     }
   }
 
@@ -862,7 +862,7 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
     FindCoordinatorRequest request = new FindCoordinatorRequest(this.localAddress, state.alreadyTried, state.viewId);
     Set<InternalDistributedMember> coordinators = new HashSet<InternalDistributedMember>();
     
-    long giveUpTime = System.currentTimeMillis() + services.getConfig().getLocatorWaitTime() * 1000;
+    long giveUpTime = System.currentTimeMillis() + ((long)services.getConfig().getLocatorWaitTime() * 1000L);
     
     int connectTimeout = (int)services.getConfig().getMemberTimeout() * 2;
     boolean anyResponses = false;
@@ -1055,7 +1055,7 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
   private void processJoinResponse(JoinResponseMessage rsp) {
     synchronized (joinResponse) {
       joinResponse[0] = rsp;
-      joinResponse.notify();
+      joinResponse.notifyAll();
     }
   }
   
@@ -1149,7 +1149,7 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
 
       isJoined = true;
       synchronized(joinResponse) {
-        joinResponse.notify();
+        joinResponse.notifyAll();
       }
 
       if (!newView.getCreator().equals(this.localAddress)) {
@@ -1253,7 +1253,7 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
           && newView.getCreator().equals(localAddress)) { // view-creator logs this
         newView.logCrashedMemberWeights(currentView, logger);
       }
-      int failurePoint = (int) (Math.round(51 * oldWeight) / 100.0);
+      int failurePoint = (int) (Math.round(51.0 * oldWeight) / 100.0);
       if (failedWeight > failurePoint && quorumLostView != newView) {
         quorumLostView = newView;
         logger.warn("total weight lost in this view change is {} of {}.  Quorum has been lost!", failedWeight, oldWeight);
@@ -1437,30 +1437,39 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
     logger.debug("processing {}", m);
     switch (m.getDSFID()) {
     case JOIN_REQUEST:
+      assert m instanceof JoinRequestMessage;
       processJoinRequest((JoinRequestMessage) m);
       break;
     case JOIN_RESPONSE:
+      assert m instanceof JoinResponseMessage;
       processJoinResponse((JoinResponseMessage) m);
       break;
     case INSTALL_VIEW_MESSAGE:
+      assert m instanceof InstallViewMessage;
       processViewMessage((InstallViewMessage) m);
       break;
     case VIEW_ACK_MESSAGE:
+      assert m instanceof ViewAckMessage;
       processViewAckMessage((ViewAckMessage) m);
       break;
     case LEAVE_REQUEST_MESSAGE:
+      assert m instanceof LeaveRequestMessage;
       processLeaveRequest((LeaveRequestMessage) m);
       break;
     case REMOVE_MEMBER_REQUEST:
+      assert m instanceof RemoveMemberMessage;
       processRemoveRequest((RemoveMemberMessage) m);
       break;
     case FIND_COORDINATOR_REQ:
+      assert m instanceof FindCoordinatorRequest;
       processFindCoordinatorRequest((FindCoordinatorRequest) m);
       break;
     case FIND_COORDINATOR_RESP:
+      assert m instanceof FindCoordinatorResponse;
       processFindCoordinatorResponse((FindCoordinatorResponse) m);
       break;
     case NETWORK_PARTITION_MESSAGE:
+      assert m instanceof NetworkPartitionMessage;
       processNetworkPartitionMessage((NetworkPartitionMessage) m);
       break;
     default:
@@ -1591,7 +1600,7 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
       if (notRepliedYet.isEmpty() || (pendingRemovals != null && pendingRemovals.containsAll(notRepliedYet))) {
         logger.debug("All anticipated view responses received - notifying waiting thread");
         waiting = false;
-        notify();
+        notifyAll();
       } else {
         logger.debug("Still waiting for these view replies: {}", notRepliedYet);
       }
@@ -1616,14 +1625,16 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
           }
         }
       } finally {
-        if (!this.waiting) {
-          // if we've set waiting to false due to incoming messages then
-          // we've discounted receiving any other responses from the
-          // remaining members due to leave/crash notification
-          result = pendingRemovals;
-        } else {
-          result.addAll(pendingRemovals);
-          this.waiting = false;
+        synchronized(this) {
+          if (!this.waiting) {
+            // if we've set waiting to false due to incoming messages then
+            // we've discounted receiving any other responses from the
+            // remaining members due to leave/crash notification
+            result = pendingRemovals;
+          } else {
+            result.addAll(pendingRemovals);
+            this.waiting = false;
+          }
         }
       }
       return result;
@@ -1690,7 +1701,7 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
     void shutdown() {
       shutdown = true;
       synchronized (viewRequests) {
-        viewRequests.notify();
+        viewRequests.notifyAll();
         interrupt();
       }
     }
@@ -1906,7 +1917,6 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
         // be reused in an auto-reconnect and get a new vmViewID
         mbrs.addAll(joinReqs);
         newView = new NetView(localAddress, viewNumber, mbrs, leaveReqs, new HashSet<InternalDistributedMember>(removalReqs));
-        int size = joinReqs.size();
         for (InternalDistributedMember mbr: joinReqs) {
           if (mbrs.contains(mbr)) {
             newView.setFailureDetectionPort(mbr, joinPorts.get(mbr));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/HeartbeatMessage.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/HeartbeatMessage.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/HeartbeatMessage.java
index a116913..6662d2c 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/HeartbeatMessage.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/HeartbeatMessage.java
@@ -48,7 +48,7 @@ public class HeartbeatMessage extends HighPriorityDistributionMessage {
   }
 
   @Override
-  protected void process(DistributionManager dm) {
+  public void process(DistributionManager dm) {
     throw new IllegalStateException("this message is not intended to execute in a thread pool");
   }
  

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/HeartbeatRequestMessage.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/HeartbeatRequestMessage.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/HeartbeatRequestMessage.java
index f7e1009..3c08e33 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/HeartbeatRequestMessage.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/HeartbeatRequestMessage.java
@@ -48,7 +48,7 @@ public class HeartbeatRequestMessage extends HighPriorityDistributionMessage{
   }
 
   @Override
-  protected void process(DistributionManager dm) {
+  public void process(DistributionManager dm) {
     throw new IllegalStateException("this message is not intended to execute in a thread pool");
   }   
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/InstallViewMessage.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/InstallViewMessage.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/InstallViewMessage.java
index 8d4cb4e..91f6918 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/InstallViewMessage.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/InstallViewMessage.java
@@ -76,7 +76,7 @@ public class InstallViewMessage extends HighPriorityDistributionMessage {
   }
 
   @Override
-  protected void process(DistributionManager dm) {
+  public void process(DistributionManager dm) {
     throw new IllegalStateException("this message is not intended to execute in a thread pool");
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/JoinResponseMessage.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/JoinResponseMessage.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/JoinResponseMessage.java
index df1b3f6..c01353a 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/JoinResponseMessage.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/JoinResponseMessage.java
@@ -36,7 +36,7 @@ public class JoinResponseMessage extends HighPriorityDistributionMessage {
   private NetView currentView;
   private String rejectionMessage;
   private InternalDistributedMember memberID;
-  private Object messengerData;
+  private byte[] messengerData;
   private boolean becomeCoordinator;
   
   public JoinResponseMessage(InternalDistributedMember memberID, NetView view) {
@@ -76,11 +76,11 @@ public class JoinResponseMessage extends HighPriorityDistributionMessage {
     return rejectionMessage;
   }
   
-  public Object getMessengerData() {
+  public byte[] getMessengerData() {
     return this.messengerData;
   }
   
-  public void setMessengerData(Object data) {
+  public void setMessengerData(byte[] data) {
     this.messengerData = data;
   }
 
@@ -114,7 +114,7 @@ public class JoinResponseMessage extends HighPriorityDistributionMessage {
     DataSerializer.writeObject(memberID, out);
     out.writeBoolean(becomeCoordinator);
     DataSerializer.writeString(rejectionMessage, out);
-    DataSerializer.writeObject(messengerData, out);
+    DataSerializer.writeByteArray(messengerData, out);
   }
 
   @Override
@@ -123,7 +123,7 @@ public class JoinResponseMessage extends HighPriorityDistributionMessage {
     memberID = DataSerializer.readObject(in);
     becomeCoordinator = in.readBoolean();
     rejectionMessage = DataSerializer.readString(in);
-    messengerData = DataSerializer.readObject(in);
+    messengerData = DataSerializer.readByteArray(in);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/ViewAckMessage.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/ViewAckMessage.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/ViewAckMessage.java
index 00f31d6..39ade6e 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/ViewAckMessage.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messages/ViewAckMessage.java
@@ -74,7 +74,7 @@ public class ViewAckMessage extends HighPriorityDistributionMessage {
   }
 
   @Override
-  protected void process(DistributionManager dm) {
+  public void process(DistributionManager dm) {
     throw new IllegalStateException("this message is not intended to execute in a thread pool");
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/AddressManager.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/AddressManager.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/AddressManager.java
index 0fd1c6e..1169044 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/AddressManager.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/AddressManager.java
@@ -23,10 +23,8 @@ import java.util.List;
 import org.apache.logging.log4j.Logger;
 import org.jgroups.Address;
 import org.jgroups.Event;
-import org.jgroups.Message;
 import org.jgroups.protocols.PingData;
 import org.jgroups.protocols.TP;
-import org.jgroups.protocols.UDP;
 import org.jgroups.stack.IpAddress;
 import org.jgroups.stack.Protocol;
 import org.jgroups.util.Responses;
@@ -55,19 +53,16 @@ public class AddressManager extends Protocol {
   @Override
   public Object up(Event evt) {
     
-//    logger.info("AddressManager.up: " + evt);
-    
     switch (evt.getType()) {
 
     case Event.FIND_MBRS:
       List<Address> missing = (List<Address>)evt.getArg();
-//      logger.debug("AddressManager.FIND_MBRS processing {}", missing);
+
       Responses responses = new Responses(false);
       for (Address laddr: missing) {
         try {
           if (laddr instanceof JGAddress) {
             PingData pd = new PingData(laddr, true, laddr.toString(), newIpAddress(laddr));
-//            logger.debug("AddressManager.FIND_MBRS adding response {}", pd);
             responses.addResponse(pd, false);
             updateUDPCache(pd);
           }
@@ -96,17 +91,13 @@ public class AddressManager extends Protocol {
       findPingDataMethod();
     }
     if (setPingData != null) {
-      Exception problem = null;
       try {
         setPingData.invoke(transport, new Object[]{pd});
-      } catch (InvocationTargetException e) {
-        problem = e;
-      } catch (IllegalAccessException e) {
-        problem = e;
-      }
-      if (problem != null && !warningLogged) {
-        log.warn("Unable to update JGroups address cache - this may affect performance", problem);
-        warningLogged = true;
+      } catch (InvocationTargetException | IllegalAccessException e) {
+        if (!warningLogged) {
+          log.warn("Unable to update JGroups address cache - this may affect performance", e);
+          warningLogged = true;
+        }
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/GMSPingPonger.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/GMSPingPonger.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/GMSPingPonger.java
index fb32254..e2951ee 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/GMSPingPonger.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/GMSPingPonger.java
@@ -35,23 +35,31 @@ public class GMSPingPonger {
   }
   
   public void sendPongMessage(JChannel channel, Address src, Address dest) throws Exception {
-    channel.send(createJGMessage(pongInBytes, src, dest, Version.CURRENT_ORDINAL)); 
+    channel.send(createPongMessage(src, dest)); 
   }
   
   public Message createPongMessage(Address src, Address dest) {
 	  return createJGMessage(pongInBytes, src, dest, Version.CURRENT_ORDINAL);
   }
   
+  public Message createPingMessage(Address src, Address dest) {
+    return createJGMessage(pingInBytes, src, dest, Version.CURRENT_ORDINAL);
+  }
+  
   public void sendPingMessage(JChannel channel, Address src, JGAddress dest) throws Exception {
-    channel.send(createJGMessage(pingInBytes, src, dest, Version.CURRENT_ORDINAL));
+    channel.send(createPingMessage(src, dest));
   }
 
   private Message createJGMessage(byte[] msgBytes, Address src, Address dest, short version) {
-	Message msg = new Message();
-	msg.setDest(dest);
-	msg.setSrc(src);
-	msg.setObject(msgBytes);
-	return msg;
+    Message msg = new Message();
+    msg.setDest(dest);
+    msg.setSrc(src);
+    msg.setObject(msgBytes);
+    msg.setFlag(Message.Flag.NO_RELIABILITY);
+    msg.setFlag(Message.Flag.NO_FC);
+    msg.setFlag(Message.Flag.DONT_BUNDLE);
+    msg.setFlag(Message.Flag.OOB);
+    return msg;
   }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/JGAddress.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/JGAddress.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/JGAddress.java
index 1380eb2..6ddafa0 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/JGAddress.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/JGAddress.java
@@ -84,22 +84,6 @@ public class JGAddress extends UUID {
   }
 
 
-  private void setAddressToLocalHost() {
-      try {
-          ip_addr=InetAddress.getLocalHost();  // get first NIC found (on multi-homed systems)
-      }
-      catch(Exception e) {
-          ip_addr=null;
-      }
-      if(ip_addr == null) {
-          try {
-              ip_addr=InetAddress.getByName(null);
-          }
-          catch(UnknownHostException e) {
-          }
-      }
-  }
-
   public final InetAddress  getInetAddress()               {return ip_addr;}
   public final int          getPort()                    {return port;}
   
@@ -112,6 +96,7 @@ public class JGAddress extends UUID {
   }
 
 
+  @Override
   public String toString() {
       StringBuilder sb=new StringBuilder();
 
@@ -137,6 +122,7 @@ public class JGAddress extends UUID {
   }
 
 
+  @Override
   public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
       try {
           readFrom(in);
@@ -146,6 +132,7 @@ public class JGAddress extends UUID {
       }
   }
 
+  @Override
   public void writeExternal(ObjectOutput out) throws IOException {
       try {
           writeTo(out);
@@ -155,6 +142,7 @@ public class JGAddress extends UUID {
       }
   }
 
+  @Override
   public void writeTo(DataOutput out) throws Exception {
       if(ip_addr != null) {
           byte[] address=ip_addr.getAddress();  // 4 bytes (IPv4) or 16 bytes (IPv6)
@@ -180,6 +168,7 @@ public class JGAddress extends UUID {
     return leastSigBits;
   }
 
+  @Override
   public void readFrom(DataInput in) throws Exception {
       int len=in.readByte();
       if(len > 0 && (len != Global.IPV4_SIZE && len != Global.IPV6_SIZE))
@@ -202,6 +191,7 @@ public class JGAddress extends UUID {
       leastSigBits = in.readLong();
   }
 
+  @Override
   public int size() {
       // length (1 bytes) + 4 bytes for port
       int tmp_size=Global.BYTE_SIZE+ Global.SHORT_SIZE +Global.SHORT_SIZE
@@ -213,6 +203,7 @@ public class JGAddress extends UUID {
       return tmp_size;
   }
 
+  @Override
   public JGAddress copy() {
     JGAddress result = new JGAddress();
     result.mostSigBits = mostSigBits;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/JGroupsMessenger.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/JGroupsMessenger.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/JGroupsMessenger.java
index 4e68b63..326491a 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/JGroupsMessenger.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/JGroupsMessenger.java
@@ -53,6 +53,7 @@ import org.jgroups.Message;
 import org.jgroups.Message.Flag;
 import org.jgroups.Message.TransientFlag;
 import org.jgroups.Receiver;
+import org.jgroups.ReceiverAdapter;
 import org.jgroups.View;
 import org.jgroups.ViewId;
 import org.jgroups.conf.ClassConfigurator;
@@ -64,6 +65,7 @@ import org.jgroups.util.UUID;
 import com.gemstone.gemfire.DataSerializer;
 import com.gemstone.gemfire.ForcedDisconnectException;
 import com.gemstone.gemfire.GemFireConfigException;
+import com.gemstone.gemfire.GemFireIOException;
 import com.gemstone.gemfire.SystemConnectException;
 import com.gemstone.gemfire.distributed.DistributedSystemDisconnectedException;
 import com.gemstone.gemfire.distributed.DurableClientAttributes;
@@ -100,13 +102,6 @@ public class JGroupsMessenger implements Messenger {
   private static final Logger logger = Services.getLogger();
 
   /**
-   * The system property that specifies the name of a file from which to read
-   * Jgroups configuration information
-   */
-  public static final String JGROUPS_CONFIG = System
-      .getProperty("geode.jgroups_config_file");
-
-  /**
    * The location (in the product) of the locator Jgroups config file.
    */
   private static final String DEFAULT_JGROUPS_TCP_CONFIG = "com/gemstone/gemfire/distributed/internal/membership/gms/messenger/jgroups-config.xml";
@@ -138,7 +133,7 @@ public class JGroupsMessenger implements Messenger {
 
   private GMSPingPonger pingPonger = new GMSPingPonger();
   
-  private volatile long pongsReceived;
+  protected volatile long pongsReceived;
   
   /**
    * A set that contains addresses that we have logged JGroups IOExceptions for in the
@@ -173,23 +168,15 @@ public class JGroupsMessenger implements Messenger {
 
     InputStream is= null;
 
-    if (JGROUPS_CONFIG != null) {
-      File file = new File(JGROUPS_CONFIG);
-      if (!file.exists()) {
-        throw new GemFireConfigException(LocalizedStrings.GroupMembershipService_JGROUPS_CONFIGURATION_FILE_0_DOES_NOT_EXIST.toLocalizedString(JGROUPS_CONFIG));
-      }
+    String r = null;
+    if (transport.isMcastEnabled()) {
+      r = DEFAULT_JGROUPS_MCAST_CONFIG;
+    } else {
+      r = DEFAULT_JGROUPS_TCP_CONFIG;
     }
-    else {
-      String r = null;
-      if (transport.isMcastEnabled()) {
-        r = DEFAULT_JGROUPS_MCAST_CONFIG;
-      } else {
-        r = DEFAULT_JGROUPS_TCP_CONFIG;
-      }
-      is = ClassPathLoader.getLatest().getResourceAsStream(getClass(), r);
-      if (is == null) {
-        throw new GemFireConfigException(LocalizedStrings.GroupMembershipService_CANNOT_FIND_0.toLocalizedString(r));
-      }
+    is = ClassPathLoader.getLatest().getResourceAsStream(getClass(), r);
+    if (is == null) {
+      throw new GemFireConfigException(LocalizedStrings.GroupMembershipService_CANNOT_FIND_0.toLocalizedString(r));
     }
 
     String properties;
@@ -198,11 +185,7 @@ public class JGroupsMessenger implements Messenger {
       //properties = config.getProtocolStackString();
       StringBuffer sb = new StringBuffer(3000);
       BufferedReader br;
-      if (JGROUPS_CONFIG != null) {
-        br = new BufferedReader(new InputStreamReader(is));
-      } else {
-        br = new BufferedReader(new InputStreamReader(is, "US-ASCII"));
-      }
+      br = new BufferedReader(new InputStreamReader(is, "US-ASCII"));
       String input;
       while ((input=br.readLine()) != null) {
         sb.append(input);
@@ -354,7 +337,7 @@ public class JGroupsMessenger implements Messenger {
   public void stop() {
     if (this.myChannel != null) {
       if ((services.isShutdownDueToForcedDisconnect() && services.isAutoReconnectEnabled()) || services.getManager().isReconnectingDS()) {
-        
+        // leave the channel open for reconnect attempts
       }
       else {
         this.myChannel.close();
@@ -396,12 +379,11 @@ public class JGroupsMessenger implements Messenger {
    * recipient.<p>
    * see Transport._send()
    */
-  public void handleJGroupsIOException(IOException e, Message msg, Address dest) {
+  public void handleJGroupsIOException(IOException e, Address dest) {
     if (addressesWithioExceptionsProcessed.contains(dest)) {
       return;
     }
     addressesWithioExceptionsProcessed.add(dest);
-    logger.info("processing JGroups IOException: " + e.getMessage());
     NetView v = this.view;
     JGAddress jgMbr = (JGAddress)dest;
     if (jgMbr != null && v != null) {
@@ -444,18 +426,18 @@ public class JGroupsMessenger implements Messenger {
         logger.info("Unable to find getPhysicallAddress method in UDP - parsing its address instead");
       }
       
-      if (this.jgAddress == null) {
-        String addr = udp.getLocalPhysicalAddress();
-        int cidx = addr.lastIndexOf(':');  // IPv6 literals might have colons
-        String host = addr.substring(0, cidx);
-        int jgport = Integer.parseInt(addr.substring(cidx+1, addr.length()));
-        try {
-          this.jgAddress = new JGAddress(logicalAddress, new IpAddress(InetAddress.getByName(host), jgport));
-        } catch (UnknownHostException e) {
-          myChannel.disconnect();
-          throw new SystemConnectException("unable to initialize jgroups address", e);
-        }
-      }
+//      if (this.jgAddress == null) {
+//        String addr = udp.getLocalPhysicalAddress();
+//        int cidx = addr.lastIndexOf(':');  // IPv6 literals might have colons
+//        String host = addr.substring(0, cidx);
+//        int jgport = Integer.parseInt(addr.substring(cidx+1, addr.length()));
+//        try {
+//          this.jgAddress = new JGAddress(logicalAddress, new IpAddress(InetAddress.getByName(host), jgport));
+//        } catch (UnknownHostException e) {
+//          myChannel.disconnect();
+//          throw new SystemConnectException("unable to initialize jgroups address", e);
+//        }
+//      }
     }
   
     // install the address in the JGroups channel protocols
@@ -563,15 +545,13 @@ public class JGroupsMessenger implements Messenger {
     
     boolean useMcast = false;
     if (services.getConfig().getTransport().isMcastEnabled()) {
-      useMcast = services.getManager().isMulticastAllowed()
-          && (msg.getMulticast() || allDestinations);
+      if (msg.getMulticast() || allDestinations) {
+        useMcast = services.getManager().isMulticastAllowed();
+      }
     }
     
     if (logger.isDebugEnabled() && reliably) {
-      String recips = "multicast";
-      if (!useMcast) {
-        recips = Arrays.toString(msg.getRecipients());
-      }
+      String recips = useMcast? "multicast" : Arrays.toString(msg.getRecipients());
       logger.debug("sending via JGroups: [{}] recipients: {}", msg, recips);
     }
     
@@ -579,22 +559,20 @@ public class JGroupsMessenger implements Messenger {
     
     if (useMcast) {
 
+      long startSer = theStats.startMsgSerialization();
+      Message jmsg = createJGMessage(msg, local, Version.CURRENT_ORDINAL);
+      theStats.endMsgSerialization(startSer);
+
       Exception problem = null;
       try {
-        long startSer = theStats.startMsgSerialization();
-        Message jmsg = createJGMessage(msg, local, Version.CURRENT_ORDINAL);
         jmsg.setTransientFlag(TransientFlag.DONT_LOOPBACK);
         if (!reliably) {
           jmsg.setFlag(Message.Flag.NO_RELIABILITY);
         }
-        theStats.endMsgSerialization(startSer);
         theStats.incSentBytes(jmsg.getLength());
         logger.trace("Sending JGroups message: {}", jmsg);
         myChannel.send(jmsg);
       }
-      catch (IllegalArgumentException e) {
-        problem = e;
-      }
       catch (Exception e) {
         logger.debug("caught unexpected exception", e);
         Throwable cause = e.getCause();
@@ -603,14 +581,12 @@ public class JGroupsMessenger implements Messenger {
         } else {
           problem = e;
         }
-      }
-      if (problem != null) {
         if (services.getShutdownCause() != null) {
-          Throwable cause = services.getShutdownCause();
+          Throwable shutdownCause = services.getShutdownCause();
           // If ForcedDisconnectException occurred then report it as actual
           // problem.
-          if (cause instanceof ForcedDisconnectException) {
-            problem = (Exception) cause;
+          if (shutdownCause instanceof ForcedDisconnectException) {
+            problem = (Exception) shutdownCause;
           } else {
             Throwable ne = problem;
             while (ne.getCause() != null) {
@@ -626,83 +602,83 @@ public class JGroupsMessenger implements Messenger {
     } // useMcast
     else { // ! useMcast
       int len = destinations.length;
-        List<GMSMember> calculatedMembers; // explicit list of members
-        int calculatedLen; // == calculatedMembers.len
-        if (len == 1 && destinations[0] == DistributionMessage.ALL_RECIPIENTS) { // send to all
-          // Grab a copy of the current membership
-          NetView v = services.getJoinLeave().getView();
-          
-          // Construct the list
-          calculatedLen = v.size();
-          calculatedMembers = new LinkedList<GMSMember>();
-          for (int i = 0; i < calculatedLen; i ++) {
-            InternalDistributedMember m = (InternalDistributedMember)v.get(i);
-            calculatedMembers.add((GMSMember)m.getNetMember());
-          }
-        } // send to all
-        else { // send to explicit list
-          calculatedLen = len;
-          calculatedMembers = new LinkedList<GMSMember>();
-          for (int i = 0; i < calculatedLen; i ++) {
-            calculatedMembers.add((GMSMember)destinations[i].getNetMember());
-          }
-        } // send to explicit list
-        Int2ObjectOpenHashMap<Message> messages = new Int2ObjectOpenHashMap<>();
-        long startSer = theStats.startMsgSerialization();
-        boolean firstMessage = true;
-        for (Iterator<GMSMember> it=calculatedMembers.iterator(); it.hasNext(); ) {
-          GMSMember mbr = it.next();
-          short version = mbr.getVersionOrdinal();
-          if ( !messages.containsKey(version) ) {
-            Message jmsg = createJGMessage(msg, local, version);
-            messages.put(version, jmsg);
-            if (firstMessage) {
-              theStats.incSentBytes(jmsg.getLength());
-              firstMessage = false;
-            }
-          }
+      List<GMSMember> calculatedMembers; // explicit list of members
+      int calculatedLen; // == calculatedMembers.len
+      if (len == 1 && destinations[0] == DistributionMessage.ALL_RECIPIENTS) { // send to all
+        // Grab a copy of the current membership
+        NetView v = services.getJoinLeave().getView();
+
+        // Construct the list
+        calculatedLen = v.size();
+        calculatedMembers = new LinkedList<GMSMember>();
+        for (int i = 0; i < calculatedLen; i ++) {
+          InternalDistributedMember m = (InternalDistributedMember)v.get(i);
+          calculatedMembers.add((GMSMember)m.getNetMember());
         }
-        theStats.endMsgSerialization(startSer);
-        Collections.shuffle(calculatedMembers);
-        int i=0;
-        for (GMSMember mbr: calculatedMembers) {
-          JGAddress to = new JGAddress(mbr);
-          short version = mbr.getVersionOrdinal();
-          Message jmsg = (Message)messages.get(version);
-          Exception problem = null;
-          try {
-            Message tmp = (i < (calculatedLen-1)) ? jmsg.copy(true) : jmsg;
-            if (!reliably) {
-              jmsg.setFlag(Message.Flag.NO_RELIABILITY);
-            }
-            tmp.setDest(to);
-            tmp.setSrc(this.jgAddress);
-            logger.trace("Unicasting to {}", to);
-            myChannel.send(tmp);
+      } // send to all
+      else { // send to explicit list
+        calculatedLen = len;
+        calculatedMembers = new LinkedList<GMSMember>();
+        for (int i = 0; i < calculatedLen; i ++) {
+          calculatedMembers.add((GMSMember)destinations[i].getNetMember());
+        }
+      } // send to explicit list
+      Int2ObjectOpenHashMap<Message> messages = new Int2ObjectOpenHashMap<>();
+      long startSer = theStats.startMsgSerialization();
+      boolean firstMessage = true;
+      for (Iterator<GMSMember> it=calculatedMembers.iterator(); it.hasNext(); ) {
+        GMSMember mbr = it.next();
+        short version = mbr.getVersionOrdinal();
+        if ( !messages.containsKey(version) ) {
+          Message jmsg = createJGMessage(msg, local, version);
+          messages.put(version, jmsg);
+          if (firstMessage) {
+            theStats.incSentBytes(jmsg.getLength());
+            firstMessage = false;
           }
-          catch (Exception e) {
-            problem = e;
+        }
+      }
+      theStats.endMsgSerialization(startSer);
+      Collections.shuffle(calculatedMembers);
+      int i=0;
+      for (GMSMember mbr: calculatedMembers) {
+        JGAddress to = new JGAddress(mbr);
+        short version = mbr.getVersionOrdinal();
+        Message jmsg = (Message)messages.get(version);
+        Exception problem = null;
+        try {
+          Message tmp = (i < (calculatedLen-1)) ? jmsg.copy(true) : jmsg;
+          if (!reliably) {
+            jmsg.setFlag(Message.Flag.NO_RELIABILITY);
           }
-          if (problem != null) {
-            if (services.getManager().getShutdownCause() != null) {
-              Throwable cause = services.getManager().getShutdownCause();
-              // If ForcedDisconnectException occurred then report it as actual
-              // problem.
-              if (cause instanceof ForcedDisconnectException) {
-                problem = (Exception) cause;
-              } else {
-                Throwable ne = problem;
-                while (ne.getCause() != null) {
-                  ne = ne.getCause();
-                }
-                ne.initCause(services.getManager().getShutdownCause());
+          tmp.setDest(to);
+          tmp.setSrc(this.jgAddress);
+          logger.trace("Unicasting to {}", to);
+          myChannel.send(tmp);
+        }
+        catch (Exception e) {
+          problem = e;
+        }
+        if (problem != null) {
+          Throwable cause = services.getShutdownCause();
+          if (cause != null) {
+            // If ForcedDisconnectException occurred then report it as actual
+            // problem.
+            if (cause instanceof ForcedDisconnectException) {
+              problem = (Exception) cause;
+            } else {
+              Throwable ne = problem;
+              while (ne.getCause() != null) {
+                ne = ne.getCause();
               }
+              ne.initCause(cause);
             }
+          }
           final String channelClosed = LocalizedStrings.GroupMembershipService_CHANNEL_CLOSED.toLocalizedString();
-//          services.getManager().membershipFailure(channelClosed, problem);
+          //          services.getManager().membershipFailure(channelClosed, problem);
           throw new DistributedSystemDisconnectedException(channelClosed, problem);
-          }
-        } // send individually
+        }
+      } // send individually
     } // !useMcast
 
     // The contract is that every destination enumerated in the
@@ -769,12 +745,16 @@ public class JGroupsMessenger implements Messenger {
       msg.setBuffer(out_stream.toByteArray());
       services.getStatistics().endMsgSerialization(start);
     }
-    catch(IOException ex) {
-        IllegalArgumentException ia = new
-          IllegalArgumentException("Error serializing message");
-        ia.initCause(ex);
-        throw ia;
-        //throw new IllegalArgumentException(ex.toString());
+    catch(IOException | GemFireIOException ex) {
+      logger.warn("Error serializing message", ex);
+      if (ex instanceof GemFireIOException) {
+        throw (GemFireIOException)ex;
+      } else {
+        GemFireIOException ioe = new
+          GemFireIOException("Error serializing message");
+        ioe.initCause(ex);
+        throw ioe;
+      }
     }
     return msg;
   }
@@ -820,19 +800,19 @@ public class JGroupsMessenger implements Messenger {
       GMSMember m = DataSerializer.readObject(dis);
 
       result = DataSerializer.readObject(dis);
-      if (result instanceof DistributionMessage) {
-        DistributionMessage dm = (DistributionMessage)result;
-        // JoinRequestMessages are sent with an ID that may have been
-        // reused from a previous life by way of auto-reconnect,
-        // so we don't want to find a canonical reference for the
-        // request's sender ID
-        if (dm.getDSFID() == JOIN_REQUEST) {
-          sender = ((JoinRequestMessage)dm).getMemberID();
-        } else {
-          sender = getMemberFromView(m, ordinal);
-        }
-        ((DistributionMessage)result).setSender(sender);
+
+      DistributionMessage dm = (DistributionMessage)result;
+      
+      // JoinRequestMessages are sent with an ID that may have been
+      // reused from a previous life by way of auto-reconnect,
+      // so we don't want to find a canonical reference for the
+      // request's sender ID
+      if (dm.getDSFID() == JOIN_REQUEST) {
+        sender = ((JoinRequestMessage)dm).getMemberID();
+      } else {
+        sender = getMemberFromView(m, ordinal);
       }
+      ((DistributionMessage)result).setSender(sender);
       
       services.getStatistics().endMsgDeserialization(start);
     }
@@ -850,17 +830,23 @@ public class JGroupsMessenger implements Messenger {
   
   
   /** look for certain messages that may need to be altered before being sent */
-  private void filterOutgoingMessage(DistributionMessage m) {
+  void filterOutgoingMessage(DistributionMessage m) {
     switch (m.getDSFID()) {
     case JOIN_RESPONSE:
       JoinResponseMessage jrsp = (JoinResponseMessage)m;
       
-      if (jrsp.getRejectionMessage() != null
+      if (jrsp.getRejectionMessage() == null
           &&  services.getConfig().getTransport().isMcastEnabled()) {
         // get the multicast message digest and pass it with the join response
         Digest digest = (Digest)this.myChannel.getProtocolStack()
             .getTopProtocol().down(Event.GET_DIGEST_EVT);
-        jrsp.setMessengerData(digest);
+        HeapDataOutputStream hdos = new HeapDataOutputStream(500, Version.CURRENT);
+        try {
+          digest.writeTo(hdos);
+        } catch (Exception e) {
+          logger.fatal("Unable to serialize JGroups messaging digest", e);
+        }
+        jrsp.setMessengerData(hdos.toByteArray());
       }
       break;
     default:
@@ -868,18 +854,27 @@ public class JGroupsMessenger implements Messenger {
     }
   }
   
-  private void filterIncomingMessage(DistributionMessage m) {
+  void filterIncomingMessage(DistributionMessage m) {
     switch (m.getDSFID()) {
     case JOIN_RESPONSE:
       JoinResponseMessage jrsp = (JoinResponseMessage)m;
       
-      if (jrsp.getRejectionMessage() != null
+      if (jrsp.getRejectionMessage() == null
           &&  services.getConfig().getTransport().isMcastEnabled()) {
-        Digest digest = (Digest)jrsp.getMessengerData();
-        if (digest != null) {
-          logger.trace("installing JGroups message digest {}", digest);
-          this.myChannel.getProtocolStack()
-              .getTopProtocol().down(new Event(Event.SET_DIGEST, digest));
+        byte[] serializedDigest = jrsp.getMessengerData();
+        ByteArrayInputStream bis = new ByteArrayInputStream(serializedDigest);
+        DataInputStream dis = new DataInputStream(bis);
+        try {
+          Digest digest = new Digest();
+          digest.readFrom(dis);
+          if (digest != null) {
+            logger.trace("installing JGroups message digest {}", digest);
+            this.myChannel.getProtocolStack()
+                .getTopProtocol().down(new Event(Event.SET_DIGEST, digest));
+            jrsp.setMessengerData(null);
+          }
+        } catch (Exception e) {
+          logger.fatal("Unable to read JGroups messaging digest", e);
         }
       }
       break;
@@ -894,13 +889,20 @@ public class JGroupsMessenger implements Messenger {
   }
   
   /**
-   * returns the JGroups configuration string
+   * returns the JGroups configuration string, for testing
    */
   public String getJGroupsStackConfig() {
     return this.jgStackConfig;
   }
   
   /**
+   * returns the pinger, for testing
+   */
+  public GMSPingPonger getPingPonger() {
+    return this.pingPonger;
+  }
+  
+  /**
    * for unit testing we need to replace UDP with a fake UDP protocol 
    */
   public void setJGroupsStackConfigForTesting(String config) {
@@ -954,9 +956,10 @@ public class JGroupsMessenger implements Messenger {
     return qc;
   }
   /**
-   * Puller receives incoming JGroups messages and passes them to a handler
+   * JGroupsReceiver receives incoming JGroups messages and passes them to a handler.
+   * It may be accessed through JChannel.getReceiver().
    */
-  class JGroupsReceiver implements Receiver  {
+  class JGroupsReceiver extends ReceiverAdapter  {
   
     @Override
     public void receive(Message jgmsg) {
@@ -970,6 +973,9 @@ public class JGroupsMessenger implements Messenger {
       
       //Respond to ping messages sent from other systems that are in a auto reconnect state
       byte[] contents = jgmsg.getBuffer();
+      if (contents == null) {
+        return;
+      }
       if (pingPonger.isPingMessage(contents)) {
         try {
           pingPonger.sendPongMessage(myChannel, jgAddress, jgmsg.getSrc());
@@ -985,45 +991,27 @@ public class JGroupsMessenger implements Messenger {
       
       Object o = readJGMessage(jgmsg);
       if (o == null) {
-        logger.warn(LocalizedMessage.create(
-            LocalizedStrings.GroupMembershipService_MEMBERSHIP_GEMFIRE_RECEIVED_NULL_MESSAGE_FROM__0, String.valueOf(jgmsg)));
-        logger.warn(LocalizedMessage.create(
-            LocalizedStrings.GroupMembershipService_MEMBERSHIP_MESSAGE_HEADERS__0, jgmsg.printObjectHeaders()));
-        return;
-      } else if ( !(o instanceof DistributionMessage) ) {
-        logger.warn("Received something other than a message from " + jgmsg.getSrc() + ": " + o);
         return;
       }
 
       DistributionMessage msg = (DistributionMessage)o;
+      assert msg.getSender() != null;
       
       // admin-only VMs don't have caches, so we ignore cache operations
       // multicast to them, avoiding deserialization cost and classpath
       // problems
       if ( (services.getConfig().getTransport().getVmKind() == DistributionManager.ADMIN_ONLY_DM_TYPE)
            && (msg instanceof DistributedCacheOperation.CacheOperationMessage)) {
-        if (logger.isTraceEnabled())
-          logger.trace("Membership: admin VM discarding cache operation message {}", jgmsg.getObject());
         return;
       }
 
       msg.resetTimestamp();
       msg.setBytesRead(jgmsg.getLength());
             
-      if (msg.getSender() == null) {
-        Exception e = new Exception(LocalizedStrings.GroupMembershipService_NULL_SENDER.toLocalizedString());
-        logger.warn(LocalizedMessage.create(
-            LocalizedStrings.GroupMembershipService_MEMBERSHIP_GEMFIRE_RECEIVED_A_MESSAGE_WITH_NO_SENDER_ADDRESS), e);
-      }
-      
       try {
-        if (logger.isTraceEnabled()) {
-          logger.trace("JGroupsMessenger dispatching {} from {}", msg, msg.getSender());
-        }
+        logger.trace("JGroupsMessenger dispatching {} from {}", msg, msg.getSender());
         filterIncomingMessage(msg);
-        MessageHandler h = getMessageHandler(msg);
-        logger.trace("Handler for this message is {}", h);
-        h.processMessage(msg);
+        getMessageHandler(msg).processMessage(msg);
       }
       catch (MemberShunnedException e) {
         // message from non-member - ignore
@@ -1053,36 +1041,7 @@ public class JGroupsMessenger implements Messenger {
       }
       return h;
     }
-    
-    
-    @Override
-    public void block() {
-    }
-    
-
-    @Override
-    public void viewAccepted(View new_view) {
-    }
-
-
-    @Override
-    public void getState(OutputStream output) throws Exception {
-    }
-
-    @Override
-    public void setState(InputStream input) throws Exception {
-    }
-
-    @Override
-    public void suspect(Address suspected_mbr) {
-    }
-
-    @Override
-    public void unblock() {
-    }
-    
-        
-  } // Puller class
+  }
   
   
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/Transport.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/Transport.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/Transport.java
index 8ba59b6..1687261 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/Transport.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/messenger/Transport.java
@@ -97,7 +97,7 @@ public class Transport extends UDP {
     catch (IOException e) {
       if (messenger != null
           /*&& e.getMessage().contains("Operation not permitted")*/) { // this is the english Oracle JDK exception condition we really want to catch
-        messenger.handleJGroupsIOException(e, msg, dest);
+        messenger.handleJGroupsIOException(e, dest);
       }
     }
     catch(Throwable e) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServer.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServer.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServer.java
index 516fe8d..92793ae 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServer.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServer.java
@@ -362,7 +362,7 @@ public class TcpServer {
             versionOrdinal = input.readShort();
           }
 
-          if (log.isDebugEnabled()) {
+          if (log.isDebugEnabled() && versionOrdinal != Version.CURRENT_ORDINAL) {
             log.debug("Locator reading request from " + sock.getInetAddress() + " with version " + Version.fromOrdinal(versionOrdinal, false));
           }
           input = new VersionedDataInputStream(input, Version.fromOrdinal(

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/main/java/com/gemstone/gemfire/internal/i18n/ParentLocalizedStrings.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/i18n/ParentLocalizedStrings.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/i18n/ParentLocalizedStrings.java
index 150b408..7bb97b9 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/i18n/ParentLocalizedStrings.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/i18n/ParentLocalizedStrings.java
@@ -730,9 +730,9 @@ class ParentLocalizedStrings {
   public static final StringId GroupMembershipService_MEMBERSHIP_FAULT_WHILE_PROCESSING_VIEW_REMOVAL_OF__0 = new StringId(1722, "Membership: Fault while processing view removal of  {0}");
   public static final StringId GroupMembershipService_MEMBERSHIP_FINISHED_VIEW_PROCESSING_VIEWID___0 = new StringId(1723, "Membership: Finished view processing viewID =  {0}");
   public static final StringId GroupMembershipService_MEMBERSHIP_GEMFIRE_RECEIVED_A_MESSAGE_WITH_NO_SENDER_ADDRESS = new StringId(1724, "Membership: GemFire received a message with no sender address");
-  public static final StringId GroupMembershipService_MEMBERSHIP_GEMFIRE_RECEIVED_NULL_MESSAGE_FROM__0 = new StringId(1725, "Membership: GemFire received null message from  {0}");
+  // ok to reuse 1725
   public static final StringId GroupMembershipService_MEMBERSHIP_IGNORING_SURPRISE_CONNECT_FROM_SHUNNED_MEMBER_0 = new StringId(1726, "Membership: Ignoring surprise connect from shunned member <{0}>");
-  public static final StringId GroupMembershipService_MEMBERSHIP_MESSAGE_HEADERS__0 = new StringId(1727, "Membership: message headers:  {0}");
+  // ok to reuse 1727
   // ok to reuse 1728
   public static final StringId GroupMembershipService_MEMBERSHIP_PAUSING_TO_ALLOW_OTHER_CONCURRENT_PROCESSES_TO_JOIN_THE_DISTRIBUTED_SYSTEM = new StringId(1729, "Membership: Pausing to allow other concurrent processes to join the distributed system");
   public static final StringId GroupMembershipService_MEMBERSHIP_PROCESSING_ADDITION__0_ = new StringId(1730, "Membership: Processing addition < {0} >");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ReconnectDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ReconnectDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ReconnectDUnitTest.java
index dd6f1fa..9d0f69f 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ReconnectDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ReconnectDUnitTest.java
@@ -581,7 +581,7 @@ public class ReconnectDUnitTest extends CacheTestCase
     Properties config = getDistributedSystemProperties();
     config.put(DistributionConfig.ROLES_NAME, "");
     config.put(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
-    config.put("log-file", "roleLossController.log");
+//    config.put("log-file", "roleLossController.log");
     //creating the DS
     getSystem(config);
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerDUnitTest.java
index 82dfdb7..51771cb 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerDUnitTest.java
@@ -347,6 +347,7 @@ public class DistributionManagerDUnitTest extends DistributedTestCase {
       public void afterCreate(EntryEvent event) {
         try {
           if (playDead) {
+            MembershipManagerHelper.beSickMember(system);
             MembershipManagerHelper.playDead(system);
           }
           Thread.sleep(15000);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/MembershipJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/MembershipJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/MembershipJUnitTest.java
index 2ce1ca7..bee2367 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/MembershipJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/MembershipJUnitTest.java
@@ -16,16 +16,21 @@
  */
 package com.gemstone.gemfire.distributed.internal.membership;
 
+import static org.mockito.Mockito.isA;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
 
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.net.InetAddress;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
@@ -42,16 +47,28 @@ import org.junit.experimental.categories.Category;
 
 import com.gemstone.gemfire.GemFireConfigException;
 import com.gemstone.gemfire.distributed.Locator;
+import com.gemstone.gemfire.distributed.internal.DM;
 import com.gemstone.gemfire.distributed.internal.DMStats;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.DistributionConfigImpl;
 import com.gemstone.gemfire.distributed.internal.DistributionManager;
+import com.gemstone.gemfire.distributed.internal.DistributionMessage;
 import com.gemstone.gemfire.distributed.internal.InternalLocator;
+import com.gemstone.gemfire.distributed.internal.SerialAckedMessage;
 import com.gemstone.gemfire.distributed.internal.membership.gms.GMSUtil;
 import com.gemstone.gemfire.distributed.internal.membership.gms.ServiceConfig;
 import com.gemstone.gemfire.distributed.internal.membership.gms.Services;
 import com.gemstone.gemfire.distributed.internal.membership.gms.interfaces.JoinLeave;
 import com.gemstone.gemfire.distributed.internal.membership.gms.membership.GMSJoinLeave;
+import com.gemstone.gemfire.distributed.internal.membership.gms.messages.HeartbeatMessage;
+import com.gemstone.gemfire.distributed.internal.membership.gms.messages.HeartbeatRequestMessage;
+import com.gemstone.gemfire.distributed.internal.membership.gms.messages.InstallViewMessage;
+import com.gemstone.gemfire.distributed.internal.membership.gms.messages.JoinRequestMessage;
+import com.gemstone.gemfire.distributed.internal.membership.gms.messages.JoinResponseMessage;
+import com.gemstone.gemfire.distributed.internal.membership.gms.messages.LeaveRequestMessage;
+import com.gemstone.gemfire.distributed.internal.membership.gms.messages.RemoveMemberMessage;
+import com.gemstone.gemfire.distributed.internal.membership.gms.messages.SuspectMembersMessage;
+import com.gemstone.gemfire.distributed.internal.membership.gms.messages.ViewAckMessage;
 import com.gemstone.gemfire.distributed.internal.membership.gms.mgr.GMSMembershipManager;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.SocketCreator;
@@ -159,6 +176,7 @@ public class MembershipJUnitTest {
     
     MembershipManager m1=null, m2=null;
     Locator l = null;
+    int mcastPort = AvailablePortHelper.getRandomAvailableUDPPort();
     
     try {
       
@@ -175,9 +193,11 @@ public class MembershipJUnitTest {
       // create configuration objects
       Properties nonDefault = new Properties();
       nonDefault.put(DistributionConfig.DISABLE_TCP_NAME, "true");
-      nonDefault.put(DistributionConfig.MCAST_PORT_NAME, "0");
+      nonDefault.put(DistributionConfig.MCAST_PORT_NAME, String.valueOf(mcastPort));
       nonDefault.put(DistributionConfig.LOG_FILE_NAME, "");
-//      nonDefault.put(DistributionConfig.LOG_LEVEL_NAME, "finest");
+      nonDefault.put(DistributionConfig.LOG_LEVEL_NAME, "fine");
+      nonDefault.put(DistributionConfig.GROUPS_NAME, "red, blue");
+      nonDefault.put(DistributionConfig.MEMBER_TIMEOUT_NAME, "2000");
       nonDefault.put(DistributionConfig.LOCATORS_NAME, localHost.getHostName()+'['+port+']');
       DistributionConfigImpl config = new DistributionConfigImpl(nonDefault);
       RemoteTransportConfig transport = new RemoteTransportConfig(config,
@@ -222,7 +242,38 @@ public class MembershipJUnitTest {
           }
         }
       }
-        
+      
+      System.out.println("testing multicast availability");
+      assertTrue(m1.testMulticast());
+      
+      System.out.println("multicasting SerialAckedMessage from m1 to m2");
+      SerialAckedMessage msg = new SerialAckedMessage();
+      msg.setRecipient(m2.getLocalMember());
+      msg.setMulticast(true);
+      m1.send(new InternalDistributedMember[] {m2.getLocalMember()}, msg, null);
+      giveUp = System.currentTimeMillis() + 5000;
+      boolean verified = false;
+      Throwable problem = null;
+      while (giveUp > System.currentTimeMillis()) {
+        try {
+          verify(listener2).messageReceived(isA(SerialAckedMessage.class));
+          verified = true;
+          break;
+        } catch (Error e) {
+          problem = e;
+          Thread.sleep(500);
+        }
+      }
+      if (!verified) {
+        if (problem != null) {
+          problem.printStackTrace();
+        }
+        fail("Expected a multicast message to be received");
+      }
+      
+      // let the managers idle for a while and get used to each other
+      Thread.sleep(4000l);
+      
       m2.shutdown();
       assertTrue(!m2.isConnected());
       
@@ -284,5 +335,64 @@ public class MembershipJUnitTest {
     assertEquals(600+4, str.length());
   }
   
+  @Test
+  public void testMessagesThrowExceptionIfProcessed() throws Exception {
+    DistributionManager dm = null;
+    try {
+      new HeartbeatMessage().process(dm);
+      fail("expected an exception to be thrown");
+    } catch (Exception e) {
+      // okay
+    }
+    try {
+      new HeartbeatRequestMessage().process(dm);
+      fail("expected an exception to be thrown");
+    } catch (Exception e) {
+      // okay
+    }
+    try {
+      new InstallViewMessage().process(dm);
+      fail("expected an exception to be thrown");
+    } catch (Exception e) {
+      // okay
+    }
+    try {
+      new JoinRequestMessage().process(dm);
+      fail("expected an exception to be thrown");
+    } catch (Exception e) {
+      // okay
+    }
+    try {
+      new JoinResponseMessage().process(dm);
+      fail("expected an exception to be thrown");
+    } catch (Exception e) {
+      // okay
+    }
+    try {
+      new LeaveRequestMessage().process(dm);
+      fail("expected an exception to be thrown");
+    } catch (Exception e) {
+      // okay
+    }
+    try {
+      new RemoveMemberMessage().process(dm);
+      fail("expected an exception to be thrown");
+    } catch (Exception e) {
+      // okay
+    }
+    try {
+      new SuspectMembersMessage().process(dm);
+      fail("expected an exception to be thrown");
+    } catch (Exception e) {
+      // okay
+    }
+    try {
+      new ViewAckMessage().process(dm);
+      fail("expected an exception to be thrown");
+    } catch (Exception e) {
+      // okay
+    }
+  }
+  
   
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/bd43c341/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/MembershipManagerHelper.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/MembershipManagerHelper.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/MembershipManagerHelper.java
index c5141de..f764ef9 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/MembershipManagerHelper.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/MembershipManagerHelper.java
@@ -156,6 +156,7 @@ public class MembershipManagerHelper
   public static void crashDistributedSystem(final DistributedSystem msys) {
     msys.getLogWriter().info("crashing distributed system: " + msys);
     MembershipManagerHelper.inhibitForcedDisconnectLogging(true);
+    MembershipManagerHelper.beSickMember(msys);
     MembershipManagerHelper.playDead(msys);
     GMSMembershipManager mgr = ((GMSMembershipManager)getMembershipManager(msys));
     mgr.forceDisconnect("for testing");