You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by ji...@apache.org on 2020/10/14 18:22:15 UTC

[geode] branch feature/GEODE-7665 updated (41bd59b -> 30a699d)

This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a change to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git.


 discard 41bd59b  GEODE-7845: Adding a cleaner simpler test. (#5622)
    omit 55550d3  GEODE-7845 blocking PR region clear if one or more server versions are too old (#5577)
    omit faed099  fix rebase compiling error
    omit c9e4a62  GEODE-7672: add dunit test to verify OQL index after PR clear. (#5436)
    omit b21b29b  GEODE-7846: Adding Stats for Partitioned Region Clear (#5391)
    omit 62701f9  GEODE-7680: PR.clear must be successful when interacting with rebalance (#5095)
    omit f81724f  GEODE-7670: PR Clear with Concurrent Ops DUnitTests (#4848)
    omit a0c0a68  GEODE-8361: Use Set instead of List to track cleared buckets (#5379)
    omit eaa1322  GEODE-8334: PR.clear should sync with putAll or removeAll on rvvLock (#5365)
    omit 2710545  GEODE-8173: Add unit test (coverage) for PartitionedRegionClear class. (#5208)
    omit 2aa3402  GEODE-7669 Test coverage for Partitioned Region clear with Overflow enabled (#5189)
    omit b4ae8f4  GEODE-7678 (2nd PR) - Support for cache-listener and client-notification for Partitioned Region Clear operation  (#5124)
    omit 86528ca  GEODE-7667: Fixing test to include PR clear help text.
    omit 8ef7564  GEODE-7894: Moving expiry tasks to AbstractRegion.
    omit a6874ef  GEODE-7676: Conversion of duration to seconds.
    omit af79a1b  GEODE-7667: Add a 'clear' gfsh command for PR and RR clear (#4818)
    omit ebad3ad  GEODE-7676: Add PR clear with expiration tests (#4970)
    omit c277e07  GEODE-7983: Clear region writer callbacks should not be invoked for bucket regions (#4954)
    omit 9a0b0cb  GEODE-7912: cacheWriter should be triggered when PR.clear (#4882)
    omit e030689  PR.clear's event id should be created and used in BR (#4805)
    omit 99ef1ae  GEODE-7682: add PR.clear  API (#4755)
    omit 7ff925a  GEODE-7683: introduce BR.cmnClearRegion
     add ae0d6bc  GEODE-8550: Rename DistributedCloseableReference (#5579)
     add eccd4f0  GEODE-8536: Allow limited retries when creating Lucene IndexWriter (#5553)
     add c91e915  Revert "GEODE-8536: Allow limited retries when creating Lucene IndexWriter (#5553)" (#5588)
     add fb3bcac  GEODE-8566: Redis native tests should not also stand up a Geode server (#5584)
     add b8147bb  GEODE-8577: PubSubNativeRedisAcceptanceTest is flaky (#5593)
     add 9bc288a  GEODE-8421: replace clean with destroy region (#5445)
     add f8dae61  GEODE-8577: Fix flaky PubSubNativeRedisAcceptanceTest (#5597)
     add 0c41271  GEODE-8538: Create test to validate ordering of redis pipeline commands (#5552)
     add e4c077a  GEODE-8576: fix doc on "security-peer-auth-init" (#5592)
     add 8f4566c  GEODE-8525: Ensure that all pubsub related commands run on the correct EventLoopGroup (#5591)
     add dbd1803  GEODE-8572: Make LogExporter not read dirs (#5595)
     add 74a850b  GEODE-8172_2: refactoring of failing testcase (#5599)
     add 6c8bc5e  GEODE-8216: modify test (#5335)
     add 73f6783  GEODE-8574: ClusterManagementService should not throw ClassCastExcept… (#5596)
     add bcdf3ca  GEODE-8582: Redis SCAN returns internal server error (#5603)
     add f4d44d6  GEODE-8584: Message transmission fails with IllegalStateException in socket i/o code (#5605)
     add faef811  GEODE-8579: Stop waiting locator-wait-time if all locators are available
     add c9ba7fa  GEODE-8587: Redis glob pattern does not match carriage return, line feed, and tab (#5608)
     add f3fb150  GEODE-8581: "Define index" and "create defined index" should work if … (#5602)
     add 6476002  GEODE-8590: Cleanup dependencies in the session state modules (#5610)
     add 30782f1  GEODE-8586: Redis SPOP with count on empty set returns NIL instead of empty array (#5609)
     add 0cc4fa0  GEODE-8559: Compute interest routing info after transaction committed. (#5581)
     add e85a1bd  GEODE-8517: GatewaySenderEventImpl's 2 new attributes were introduced… (#5530)
     add 1cab75d  GEODE-8600: Fix for faulty statistics QueueSize (#5616)
     add b167094  GEODE-8556: Remove outdated msg from docs (#5614)
     new 09638b8  GEODE-7683: introduce BR.cmnClearRegion
     new 04df003  GEODE-7682: add PR.clear  API (#4755)
     new 9674275  PR.clear's event id should be created and used in BR (#4805)
     new 647b8e0  GEODE-7912: cacheWriter should be triggered when PR.clear (#4882)
     new c89af00  GEODE-7983: Clear region writer callbacks should not be invoked for bucket regions (#4954)
     new 06d77ba  GEODE-7676: Add PR clear with expiration tests (#4970)
     new a7adc42  GEODE-7667: Add a 'clear' gfsh command for PR and RR clear (#4818)
     new 3522100  GEODE-7676: Conversion of duration to seconds.
     new 9999300  GEODE-7894: Moving expiry tasks to AbstractRegion.
     new d765608  GEODE-7667: Fixing test to include PR clear help text.
     new 0888d88  GEODE-7678 (2nd PR) - Support for cache-listener and client-notification for Partitioned Region Clear operation  (#5124)
     new be03f1c  GEODE-7669 Test coverage for Partitioned Region clear with Overflow enabled (#5189)
     new 65a9016  GEODE-8173: Add unit test (coverage) for PartitionedRegionClear class. (#5208)
     new 5948b18  GEODE-8334: PR.clear should sync with putAll or removeAll on rvvLock (#5365)
     new 2639633  GEODE-8361: Use Set instead of List to track cleared buckets (#5379)
     new 7d6c050  GEODE-7670: PR Clear with Concurrent Ops DUnitTests (#4848)
     new 02336cd  GEODE-7680: PR.clear must be successful when interacting with rebalance (#5095)
     new 91b19cc  GEODE-7846: Adding Stats for Partitioned Region Clear (#5391)
     new 4bf638a  GEODE-7672: add dunit test to verify OQL index after PR clear. (#5436)
     new 0589b0d  fix rebase compiling error
     new 68d7c30  GEODE-7845 blocking PR region clear if one or more server versions are too old (#5577)
     new 30a699d  GEODE-7845: Adding a cleaner simpler test. (#5622)

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (41bd59b)
            \
             N -- N -- N   refs/heads/feature/GEODE-7665 (30a699d)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 22 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../src/test/resources/expected-pom.xml            |  12 +
 .../geode-modules-session-internal/build.gradle    |  22 +-
 extensions/geode-modules-test/build.gradle         |  41 +-
 extensions/geode-modules-tomcat7/build.gradle      |  59 +-
 extensions/geode-modules-tomcat8/build.gradle      |  86 +-
 .../src/test/resources/expected-pom.xml            |  60 ++
 extensions/geode-modules-tomcat9/build.gradle      |  48 +-
 extensions/geode-modules/build.gradle              |  56 +-
 .../src/test/resources/expected-pom.xml            |  75 ++
 geode-assembly/build.gradle                        |   1 +
 .../shell/StatusLocatorExitCodeAcceptanceTest.java |   7 +-
 .../shell/StatusServerExitCodeAcceptanceTest.java  |   5 +-
 .../ReplicateRegionNetsearchDistributedTest.java   |   8 +-
 .../fixed/FixedPartitioningDUnitTest.java          |   4 +-
 .../internal/util/ManagementUtilsDUnitTest.java    |  11 +
 .../RestrictUseOfInetAddressJUnitTest.java         |   1 -
 .../codeAnalysis/sanctionedDataSerializables.txt   |   4 +-
 .../distributed/internal/DistributionManager.java  |   2 +
 .../internal/LonerDistributionManager.java         |   4 +
 .../geode/internal/cache/EntryEventImpl.java       |   5 +
 .../apache/geode/internal/cache/FilterProfile.java |  65 +-
 .../geode/internal/cache/FilterRoutingInfo.java    |  15 +
 .../geode/internal/cache/InternalCacheEvent.java   |   3 +
 .../apache/geode/internal/cache/LocalRegion.java   |  13 +-
 .../geode/internal/cache/PartitionedRegion.java    |   2 +-
 .../geode/internal/cache/TXCommitMessage.java      |  13 +-
 .../apache/geode/internal/cache/TXEntryState.java  |   5 +
 .../org/apache/geode/internal/cache/TXState.java   |  12 +-
 .../internal/cache/TxCallbackEventFactoryImpl.java |   5 +-
 .../ClientRegistrationEventQueueManager.java       |   2 +-
 .../geode/internal/cache/tx/DistTxEntryEvent.java  |   5 +
 .../internal/cache/wan/GatewaySenderEventImpl.java |  12 +-
 .../wan/parallel/ParallelGatewaySenderQueue.java   |  31 +-
 .../org/apache/geode/internal/net/BufferPool.java  |   7 +-
 .../org/apache/geode/internal/net/NioFilter.java   |  22 +-
 .../apache/geode/internal/net/NioSslEngine.java    |  17 +-
 .../org/apache/geode/internal/tcp/Connection.java  | 333 ++++----
 .../org/apache/geode/internal/tcp/MsgReader.java   |  82 +-
 .../api/LocatorClusterManagementService.java       |  40 +-
 .../management/internal/util/ManagementUtils.java  |   5 +
 .../geode/internal/cache/EntryEventImplTest.java   | 198 ++---
 .../geode/internal/cache/FilterProfileTest.java    | 258 ++++++
 .../geode/internal/cache/LocalRegionTest.java      |  55 ++
 .../geode/internal/cache/TXCommitMessageTest.java  |  23 +
 .../apache/geode/internal/cache/TXStateTest.java   |  24 +
 .../tier/sockets/CacheClientNotifierTest.java      |   2 +-
 .../ClientRegistrationEventQueueManagerTest.java   |   2 +-
 .../cache/wan/GatewaySenderEventImplTest.java      |  79 ++
 .../geode/internal/net/NioSslEngineTest.java       |  26 +-
 .../api/LocatorClusterManagementServiceTest.java   |  24 +-
 .../cache/RemoteCQTransactionDUnitTest.java        | 310 ++++----
 .../running/starting_up_shutting_down.html.md.erb  |   8 +-
 .../topics/gemfire_properties.html.md.erb          |   2 +-
 ...ributedCloseableReferenceCacheExampleTest.java} |  18 +-
 ...seableReferenceLocatorLauncherExampleTest.java} |  19 +-
 ...oseableReferenceServerLauncherExampleTest.java} |  21 +-
 ...ibutedCloseableReferenceSystemExampleTest.java} |  19 +-
 ...java => DistributedCloseableReferenceTest.java} | 244 +++---
 .../geode/test/dunit/DistributedTestUtils.java     |  71 +-
 ...nce.java => DistributedCloseableReference.java} | 100 ++-
 .../geode/test/dunit/rules/DistributedRule.java    |  13 +-
 ...xesCommandWithMultipleGfshSessionDUnitTest.java | 118 +++
 .../cli/util/LogExporterFileIntegrationTest.java   |  10 +
 .../cli/util/LogExporterIntegrationTest.java       | 203 ++---
 .../apache/geode/management/cli/GfshCommand.java   |  12 +
 .../cli/commands/ClearDefinedIndexesCommand.java   |  11 +
 .../internal/cli/commands/DefineIndexCommand.java  |  11 +
 .../internal/cli/commands/IndexDefinition.java     |   5 +-
 .../functions/ManageIndexDefinitionFunction.java   |  40 +
 .../management/internal/cli/util/LogExporter.java  |  10 +-
 .../sanctioned-geode-gfsh-serializables.txt        |   1 +
 .../cli/commands/GfshCommandJUnitTest.java         |  28 +
 .../ManageIndexDefinitionFunctionTest.java         |  68 ++
 geode-junit/build.gradle                           |   1 +
 .../apache/geode/internal/AvailablePortHelper.java |   2 +-
 .../geode/test/junit/rules/CloseableReference.java |  17 +-
 .../membership/gms/MembershipIntegrationTest.java  |  63 ++
 .../membership/gms/membership/GMSJoinLeave.java    |   6 +-
 .../org/apache/geode/internal/AvailablePort.java   |  22 +-
 geode-memcached/build.gradle                       |   1 +
 geode-pulse/geode-pulse-test/build.gradle          |   1 +
 ...ommandPipeliningNativeRedisAcceptanceTest.java} |  12 +-
 .../GlobPatternNativeRedisAcceptanceTest.java      |  12 +-
 .../connection/PingNativeRedisAcceptanceTest.java  |  11 +-
 .../hash/HashesNativeRedisAcceptanceTest.java      |  20 +-
 .../executor/key/DelNativeRedisAcceptanceTest.java |  12 +-
 .../key/ExistsNativeRedisAcceptanceTest.java       |  20 +-
 .../key/ExpireAtNativeRedisAcceptanceTest.java     |  17 +-
 .../key/ExpireNativeRedisAcceptanceTest.java       |  17 +-
 .../key/KeysNativeRedisAcceptanceTest.java         |  11 +-
 .../key/PTTLNativeRedisAcceptanceTest.java         |  12 +-
 .../key/PersistNativeRedisAcceptanceTest.java      |  19 +-
 .../key/PexpireNativeRedisAcceptanceTest.java      |  17 +-
 .../key/RenameNativeRedisAcceptanceTest.java       |  22 +-
 .../ScanNativeRedisAcceptanceTest.java}            |  11 +-
 .../executor/key/TTLNativeRedisAcceptanceTest.java |  12 +-
 .../key/TypeNativeRedisAcceptanceTest.java         |  11 +-
 .../LettucePubSubNativeRedisAcceptanceTest.java    |  12 +-
 .../pubsub/PubSubNativeRedisAcceptanceTest.java    |  22 +-
 .../SubscriptionsNativeRedisAcceptanceTest.java    |   8 +-
 .../FlushAllNativeRedisAcceptanceTest.java}        |  11 +-
 .../server/InfoNativeRedisAcceptanceTest.java      |  14 +-
 .../TimeNativeRedisAcceptanceTest.java}            |  11 +-
 .../set/SDiffNativeRedisAcceptanceTest.java        |  18 +-
 .../set/SInterNativeRedisAcceptanceTest.java       |  18 +-
 .../set/SIsMemberNativeRedisAcceptanceTest.java    |  13 +-
 .../set/SMoveNativeRedisAcceptanceTest.java        |  18 +-
 .../set/SPopNativeRedisAcceptanceTest.java         |  17 +-
 .../set/SRemNativeRedisAcceptanceTest.java         |  18 +-
 .../set/SUnionNativeRedisAcceptanceTest.java       |  17 +-
 .../set/SetsNativeRedisAcceptanceTest.java         |  18 +-
 .../string/AppendNativeRedisAcceptanceTest.java    |  14 +-
 .../string/BitCountNativeRedisAcceptanceTest.java  |  11 +-
 .../string/BitOpNativeRedisAcceptanceTest.java     |  12 +-
 .../string/BitPosNativeRedisAcceptanceTest.java    |  12 +-
 .../string/DecrByNativeRedisAcceptanceTest.java    |  15 +-
 .../string/DecrNativeRedisAcceptanceTest.java      |  13 +-
 .../string/GetBitNativeRedisAcceptanceTest.java    |  12 +-
 .../string/GetNativeRedisAcceptanceTest.java       |  12 +-
 .../string/GetRangeNativeRedisAcceptanceTest.java  |  12 +-
 .../string/GetSetNativeRedisAcceptanceTest.java    |  13 +-
 .../IncrByFloatNativeRedisAcceptanceTest.java      |  15 +-
 .../string/IncrByNativeRedisAcceptanceTest.java    |  15 +-
 .../string/IncrNativeRedisAcceptanceTest.java      |  13 +-
 .../string/MGetNativeRedisAcceptanceTest.java      |  13 +-
 .../string/MSetNXNativeRedisAcceptanceTest.java    |  12 +-
 .../string/MSetNativeRedisAcceptanceTest.java      |  13 +-
 .../string/PSetEXNativeRedisAcceptanceTest.java    |  12 +-
 .../string/SetBitNativeRedisAcceptanceTest.java    |  12 +-
 .../string/SetExNativeRedisAcceptanceTest.java     |  12 +-
 .../string/SetNXNativeRedisAcceptanceTest.java     |  12 +-
 .../string/SetNativeRedisAcceptanceTest.java       |  14 +-
 .../string/SetRangeNativeRedisAcceptanceTest.java  |  12 +-
 .../string/StrLenNativeRedisAcceptanceTest.java    |  12 +-
 .../geode/test/dunit/rules/RedisPortSupplier.java} |  10 +-
 ... AbstractCommandPipeliningIntegrationTest.java} |  67 +-
 .../redis/CommandPipeliningIntegrationTest.java    |  76 +-
 ...ava => AbstractGlobPatternIntegrationTest.java} |  27 +-
 .../executor/GlobPatternIntegrationTest.java       | 240 +-----
 .../internal/executor/UnknownIntegrationTest.java  |   2 +-
 ...nTest.java => AbstractPingIntegrationTest.java} |  28 +-
 .../executor/connection/PingIntegrationTest.java   |  56 +-
 .../executor/connection/QuitIntegrationTest.java   |   5 +-
 ...est.java => AbstractHashesIntegrationTest.java} |  35 +-
 .../executor/hash/HashesIntegrationTest.java       | 719 +----------------
 ...onTest.java => AbstractDelIntegrationTest.java} |  31 +-
 ...est.java => AbstractExistsIntegrationTest.java} |  35 +-
 ...t.java => AbstractExpireAtIntegrationTest.java} |  31 +-
 ...est.java => AbstractExpireIntegrationTest.java} |  27 +-
 ...nTest.java => AbstractKeysIntegrationTest.java} |  37 +-
 ...nTest.java => AbstractPTTLIntegrationTest.java} |  27 +-
 ...st.java => AbstractPersistIntegrationTest.java} |  31 +-
 ...st.java => AbstractPexpireIntegrationTest.java} |  28 +-
 ...est.java => AbstractRenameIntegrationTest.java} |  59 +-
 .../executor/key/AbstractScanIntegrationTest.java  | 112 +++
 ...onTest.java => AbstractTTLIntegrationTest.java} |  27 +-
 ...nTest.java => AbstractTypeIntegrationTest.java} |  27 +-
 .../internal/executor/key/DelIntegrationTest.java  | 103 +--
 .../executor/key/ExistsIntegrationTest.java        | 174 +---
 .../executor/key/ExpireAtIntegrationTest.java      | 110 +--
 .../executor/key/ExpireIntegrationTest.java        | 324 +-------
 .../internal/executor/key/KeysIntegrationTest.java |  75 +-
 .../internal/executor/key/PTTLIntegrationTest.java |  59 +-
 .../executor/key/PersistIntegrationTest.java       | 128 +--
 .../executor/key/PexpireIntegrationTest.java       |  75 +-
 .../executor/key/RenameIntegrationTest.java        | 398 +---------
 .../executor/key/ScanIntegrationTest.java}         |  15 +-
 .../internal/executor/key/TTLIntegrationTest.java  |  59 +-
 .../internal/executor/key/TypeIntegrationTest.java |  68 +-
 ...a => AbstractLettucePubSubIntegrationTest.java} |   9 +-
 ...est.java => AbstractPubSubIntegrationTest.java} |  48 +-
 ...a => AbstractSubscriptionsIntegrationTest.java} |  31 +-
 .../pubsub/LettucePubSubIntegrationTest.java       | 354 +--------
 .../executor/pubsub/PubSubIntegrationTest.java     | 877 +--------------------
 .../pubsub/SubscriptionsIntegrationTest.java       |  81 +-
 ...t.java => AbstractFlushAllIntegrationTest.java} |  28 +-
 ...nTest.java => AbstractInfoIntegrationTest.java} |  60 +-
 ...nTest.java => AbstractTimeIntegrationTest.java} |  18 +-
 .../executor/server/FlushAllIntegrationTest.java   |  35 +-
 .../executor/server/InfoIntegrationTest.java       | 157 +---
 .../executor/server/ShutdownIntegrationTest.java   |   4 +-
 .../executor/server/TimeIntegrationTest.java       |  42 +-
 ...Test.java => AbstractSDiffIntegrationTest.java} |  36 +-
 ...est.java => AbstractSInterIntegrationTest.java} |  35 +-
 ....java => AbstractSIsMemberIntegrationTest.java} |  27 +-
 ...Test.java => AbstractSMoveIntegrationTest.java} |  36 +-
 ...nTest.java => AbstractSPopIntegrationTest.java} |  79 +-
 ...nTest.java => AbstractSRemIntegrationTest.java} |  36 +-
 ...est.java => AbstractSUnionIntegrationTest.java} |  36 +-
 ...nTest.java => AbstractSetsIntegrationTest.java} |  37 +-
 .../executor/set/SDiffIntegrationTest.java         | 179 +----
 .../executor/set/SInterIntegrationTest.java        | 179 +----
 .../executor/set/SIsMemberIntegrationTest.java     |  68 +-
 .../executor/set/SMoveIntegrationTest.java         | 117 +--
 .../internal/executor/set/SPopIntegrationTest.java | 163 +---
 .../internal/executor/set/SRemIntegrationTest.java | 161 +---
 .../executor/set/SUnionIntegrationTest.java        | 172 +---
 .../internal/executor/set/SetsIntegrationTest.java | 236 +-----
 ...est.java => AbstractAppendIntegrationTest.java} |  27 +-
 ...t.java => AbstractBitCountIntegrationTest.java} |  25 +-
 ...Test.java => AbstractBitOpIntegrationTest.java} |  25 +-
 ...est.java => AbstractBitPosIntegrationTest.java} |  25 +-
 ...est.java => AbstractDecrByIntegrationTest.java} |  27 +-
 ...nTest.java => AbstractDecrIntegrationTest.java} |  29 +-
 ...est.java => AbstractGetBitIntegrationTest.java} |  25 +-
 ...onTest.java => AbstractGetIntegrationTest.java} |  25 +-
 ...t.java => AbstractGetRangeIntegrationTest.java} |  25 +-
 ...est.java => AbstractGetSetIntegrationTest.java} |  29 +-
 ...ava => AbstractIncrByFloatIntegrationTest.java} |  27 +-
 ...est.java => AbstractIncrByIntegrationTest.java} |  27 +-
 ...nTest.java => AbstractIncrIntegrationTest.java} |  29 +-
 ...nTest.java => AbstractMGetIntegrationTest.java} |  25 +-
 ...nTest.java => AbstractMSetIntegrationTest.java} |  29 +-
 ...est.java => AbstractMSetNXIntegrationTest.java} |  25 +-
 ...est.java => AbstractPSetEXIntegrationTest.java} |  25 +-
 ...est.java => AbstractSetBitIntegrationTest.java} |  25 +-
 ...Test.java => AbstractSetEXIntegrationTest.java} |  25 +-
 ...onTest.java => AbstractSetIntegrationTest.java} |  29 +-
 ...Test.java => AbstractSetNXIntegrationTest.java} |  25 +-
 ...t.java => AbstractSetRangeIntegrationTest.java} |  25 +-
 ...est.java => AbstractStrLenIntegrationTest.java} |  25 +-
 .../executor/string/AppendIntegrationTest.java     | 101 +--
 .../executor/string/BitCountIntegrationTest.java   | 135 +---
 .../executor/string/BitOpIntegrationTest.java      | 186 +----
 .../executor/string/BitPosIntegrationTest.java     |  52 +-
 .../executor/string/DecrByIntegrationTest.java     |  63 +-
 .../executor/string/DecrIntegrationTest.java       |  68 +-
 .../executor/string/GetBitIntegrationTest.java     |  63 +-
 .../executor/string/GetIntegrationTest.java        |  68 +-
 .../executor/string/GetRangeIntegrationTest.java   | 121 +--
 .../executor/string/GetSetIntegrationTest.java     | 140 +---
 .../string/IncrByFloatIntegrationTest.java         |  51 +-
 .../executor/string/IncrByIntegrationTest.java     |  62 +-
 .../executor/string/IncrIntegrationTest.java       |  98 +--
 .../executor/string/MGetIntegrationTest.java       |  43 +-
 .../executor/string/MSetIntegrationTest.java       | 137 +---
 .../executor/string/MSetNXIntegrationTest.java     |  50 +-
 .../executor/string/PSetEXIntegrationTest.java     |  33 +-
 .../executor/string/SetBitIntegrationTest.java     |  72 +-
 .../executor/string/SetEXIntegrationTest.java      |  40 +-
 .../executor/string/SetIntegrationTest.java        | 491 +-----------
 .../executor/string/SetNXIntegrationTest.java      |  47 +-
 .../executor/string/SetRangeIntegrationTest.java   |  84 +-
 .../executor/string/StrLenIntegrationTest.java     |  64 +-
 .../geode/redis/internal/GeodeRedisServer.java     |   2 +-
 .../geode/redis/internal/executor/GlobPattern.java |   2 +-
 .../internal/executor/connection/PingExecutor.java |   2 +
 .../internal/executor/connection/QuitExecutor.java |   3 +
 .../redis/internal/executor/key/ScanExecutor.java  |  37 +-
 .../executor/pubsub/PsubscribeExecutor.java        |   5 +
 .../executor/pubsub/PunsubscribeExecutor.java      |   2 +
 .../executor/pubsub/SubscribeExecutor.java         |   7 +
 .../executor/pubsub/UnsubscribeExecutor.java       |   2 +
 .../redis/internal/executor/set/SPopExecutor.java  |  15 +-
 .../internal/netty/ExecutionHandlerContext.java    |  30 +
 .../geode/internal/cache/wan/WANTestBase.java      |   9 +
 ...ANPersistenceEnabledGatewaySenderDUnitTest.java |  14 +-
 .../wan/parallel/ParallelWANStatsDUnitTest.java    |  43 +
 ...ANPersistenceEnabledGatewaySenderDUnitTest.java |  19 +-
 gradle/publish-java.gradle                         |   5 +-
 260 files changed, 3599 insertions(+), 10226 deletions(-)
 create mode 100644 extensions/geode-modules-tomcat8/src/test/resources/expected-pom.xml
 create mode 100644 extensions/geode-modules/src/test/resources/expected-pom.xml
 create mode 100644 geode-core/src/test/java/org/apache/geode/internal/cache/FilterProfileTest.java
 rename geode-dunit/src/distributedTest/java/org/apache/geode/test/dunit/examples/{DistributedReferenceCacheExampleTest.java => DistributedCloseableReferenceCacheExampleTest.java} (72%)
 rename geode-dunit/src/distributedTest/java/org/apache/geode/test/dunit/examples/{DistributedReferenceLocatorLauncherExampleTest.java => DistributedCloseableReferenceLocatorLauncherExampleTest.java} (83%)
 rename geode-dunit/src/distributedTest/java/org/apache/geode/test/dunit/examples/{DistributedReferenceServerLauncherExampleTest.java => DistributedCloseableReferenceServerLauncherExampleTest.java} (78%)
 rename geode-dunit/src/distributedTest/java/org/apache/geode/test/dunit/examples/{DistributedReferenceSystemExampleTest.java => DistributedCloseableReferenceSystemExampleTest.java} (71%)
 rename geode-dunit/src/distributedTest/java/org/apache/geode/test/dunit/rules/tests/{DistributedReferenceTest.java => DistributedCloseableReferenceTest.java} (55%)
 rename geode-dunit/src/main/java/org/apache/geode/test/dunit/rules/{DistributedReference.java => DistributedCloseableReference.java} (58%)
 create mode 100644 geode-gfsh/src/distributedTest/java/org/apache/geode/management/internal/cli/commands/CreateDefinedIndexesCommandWithMultipleGfshSessionDUnitTest.java
 create mode 100644 geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/functions/ManageIndexDefinitionFunction.java
 create mode 100644 geode-gfsh/src/test/java/org/apache/geode/management/internal/cli/functions/ManageIndexDefinitionFunctionTest.java
 rename {geode-core => geode-membership}/src/main/java/org/apache/geode/internal/AvailablePort.java (96%)
 copy geode-redis/src/acceptanceTest/java/org/apache/geode/redis/{internal/executor/pubsub/SubscriptionsNativeRedisAcceptanceTest.java => CommandPipeliningNativeRedisAcceptanceTest.java} (81%)
 mode change 100755 => 100644
 copy geode-redis/src/acceptanceTest/java/org/apache/geode/redis/internal/executor/{pubsub/SubscriptionsNativeRedisAcceptanceTest.java => key/ScanNativeRedisAcceptanceTest.java} (82%)
 mode change 100755 => 100644
 copy geode-redis/src/acceptanceTest/java/org/apache/geode/redis/internal/executor/{pubsub/SubscriptionsNativeRedisAcceptanceTest.java => server/FlushAllNativeRedisAcceptanceTest.java} (81%)
 mode change 100755 => 100644
 copy geode-redis/src/acceptanceTest/java/org/apache/geode/redis/internal/executor/{pubsub/SubscriptionsNativeRedisAcceptanceTest.java => server/TimeNativeRedisAcceptanceTest.java} (81%)
 mode change 100755 => 100644
 copy geode-redis/src/{acceptanceTest/java/org/apache/geode/redis/internal/executor/pubsub/SubscriptionsNativeRedisAcceptanceTest.java => commonTest/java/org/apache/geode/test/dunit/rules/RedisPortSupplier.java} (72%)
 mode change 100755 => 100644
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/{CommandPipeliningIntegrationTest.java => AbstractCommandPipeliningIntegrationTest.java} (62%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/{GlobPatternIntegrationTest.java => AbstractGlobPatternIntegrationTest.java} (92%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/connection/{PingIntegrationTest.java => AbstractPingIntegrationTest.java} (77%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/hash/{HashesIntegrationTest.java => AbstractHashesIntegrationTest.java} (96%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/key/{DelIntegrationTest.java => AbstractDelIntegrationTest.java} (83%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/key/{ExistsIntegrationTest.java => AbstractExistsIntegrationTest.java} (86%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/key/{ExpireAtIntegrationTest.java => AbstractExpireAtIntegrationTest.java} (85%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/key/{ExpireIntegrationTest.java => AbstractExpireIntegrationTest.java} (93%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/key/{KeysIntegrationTest.java => AbstractKeysIntegrationTest.java} (82%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/key/{PTTLIntegrationTest.java => AbstractPTTLIntegrationTest.java} (79%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/key/{PersistIntegrationTest.java => AbstractPersistIntegrationTest.java} (86%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/key/{PexpireIntegrationTest.java => AbstractPexpireIntegrationTest.java} (80%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/key/{RenameIntegrationTest.java => AbstractRenameIntegrationTest.java} (91%)
 create mode 100644 geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/key/AbstractScanIntegrationTest.java
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/key/{TTLIntegrationTest.java => AbstractTTLIntegrationTest.java} (79%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/key/{TypeIntegrationTest.java => AbstractTypeIntegrationTest.java} (81%)
 copy geode-redis/src/{acceptanceTest/java/org/apache/geode/redis/internal/executor/pubsub/SubscriptionsNativeRedisAcceptanceTest.java => integrationTest/java/org/apache/geode/redis/internal/executor/key/ScanIntegrationTest.java} (72%)
 mode change 100755 => 100644
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/pubsub/{LettucePubSubIntegrationTest.java => AbstractLettucePubSubIntegrationTest.java} (98%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/pubsub/{PubSubIntegrationTest.java => AbstractPubSubIntegrationTest.java} (96%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/pubsub/{SubscriptionsIntegrationTest.java => AbstractSubscriptionsIntegrationTest.java} (79%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/server/{FlushAllIntegrationTest.java => AbstractFlushAllIntegrationTest.java} (72%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/server/{InfoIntegrationTest.java => AbstractInfoIntegrationTest.java} (75%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/server/{TimeIntegrationTest.java => AbstractTimeIntegrationTest.java} (76%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/set/{SDiffIntegrationTest.java => AbstractSDiffIntegrationTest.java} (90%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/set/{SInterIntegrationTest.java => AbstractSInterIntegrationTest.java} (91%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/set/{SIsMemberIntegrationTest.java => AbstractSIsMemberIntegrationTest.java} (83%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/set/{SMoveIntegrationTest.java => AbstractSMoveIntegrationTest.java} (85%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/set/{SPopIntegrationTest.java => AbstractSPopIntegrationTest.java} (76%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/set/{SRemIntegrationTest.java => AbstractSRemIntegrationTest.java} (86%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/set/{SUnionIntegrationTest.java => AbstractSUnionIntegrationTest.java} (89%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/set/{SetsIntegrationTest.java => AbstractSetsIntegrationTest.java} (91%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{AppendIntegrationTest.java => AbstractAppendIntegrationTest.java} (85%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{BitCountIntegrationTest.java => AbstractBitCountIntegrationTest.java} (89%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{BitOpIntegrationTest.java => AbstractBitOpIntegrationTest.java} (92%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{BitPosIntegrationTest.java => AbstractBitPosIntegrationTest.java} (82%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{DecrByIntegrationTest.java => AbstractDecrByIntegrationTest.java} (79%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{DecrIntegrationTest.java => AbstractDecrIntegrationTest.java} (81%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{GetBitIntegrationTest.java => AbstractGetBitIntegrationTest.java} (81%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{GetIntegrationTest.java => AbstractGetIntegrationTest.java} (81%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{GetRangeIntegrationTest.java => AbstractGetRangeIntegrationTest.java} (90%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{GetSetIntegrationTest.java => AbstractGetSetIntegrationTest.java} (89%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{IncrByFloatIntegrationTest.java => AbstractIncrByFloatIntegrationTest.java} (77%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{IncrByIntegrationTest.java => AbstractIncrByIntegrationTest.java} (79%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{IncrIntegrationTest.java => AbstractIncrIntegrationTest.java} (85%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{MGetIntegrationTest.java => AbstractMGetIntegrationTest.java} (76%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{MSetIntegrationTest.java => AbstractMSetIntegrationTest.java} (89%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{MSetNXIntegrationTest.java => AbstractMSetNXIntegrationTest.java} (78%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{PSetEXIntegrationTest.java => AbstractPSetEXIntegrationTest.java} (71%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{SetBitIntegrationTest.java => AbstractSetBitIntegrationTest.java} (83%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{SetEXIntegrationTest.java => AbstractSetEXIntegrationTest.java} (75%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{SetIntegrationTest.java => AbstractSetIntegrationTest.java} (96%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{SetNXIntegrationTest.java => AbstractSetNXIntegrationTest.java} (76%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{SetRangeIntegrationTest.java => AbstractSetRangeIntegrationTest.java} (85%)
 copy geode-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/string/{StrLenIntegrationTest.java => AbstractStrLenIntegrationTest.java} (81%)


[geode] 11/22: GEODE-7678 (2nd PR) - Support for cache-listener and client-notification for Partitioned Region Clear operation (#5124)

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 0888d88c2e8af7f9d3553c4892baeff7f6d2809d
Author: agingade <ag...@pivotal.io>
AuthorDate: Wed May 20 16:08:07 2020 -0700

    GEODE-7678 (2nd PR) - Support for cache-listener and client-notification for Partitioned Region Clear operation  (#5124)
    
    * GEODE-7678: Add support for cache listener and client notification for PR clear
    
    The changes are made to PR clear messaging and locking mechanism to preserve
    cache-listener and client-events ordering during concurrent cache operation
    while clear in progress.
---
 .../integrationTest/resources/assembly_content.txt |   1 +
 .../cache/PRCacheListenerDistributedTest.java      | 250 +++++++++++-
 .../ReplicateCacheListenerDistributedTest.java     | 111 +++++-
 ...ionedRegionAfterClearNotificationDUnitTest.java | 372 ++++++++++++++++++
 .../cache/PartitionedRegionClearDUnitTest.java     |   1 -
 ...titionedRegionClearWithExpirationDUnitTest.java |  69 ++--
 ...itionedRegionClearWithExpirationDUnitTest.java} |  58 +--
 .../cache/PartitionedRegionIntegrationTest.java    |  45 +++
 .../codeAnalysis/sanctionedDataSerializables.txt   |   8 +
 .../PartitionedRegionPartialClearException.java    |  37 ++
 .../main/java/org/apache/geode/cache/Region.java   |   4 +-
 .../org/apache/geode/internal/DSFIDFactory.java    |   5 +
 .../apache/geode/internal/cache/BucketAdvisor.java |   2 +-
 .../apache/geode/internal/cache/BucketRegion.java  |  17 +-
 .../internal/cache/DistributedClearOperation.java  |  10 +-
 .../geode/internal/cache/DistributedRegion.java    |   9 +-
 .../geode/internal/cache/InternalRegion.java       |   3 +
 .../apache/geode/internal/cache/LocalRegion.java   |   3 +-
 .../geode/internal/cache/PartitionedRegion.java    | 217 ++---------
 .../internal/cache/PartitionedRegionClear.java     | 419 +++++++++++++++++++++
 .../cache/PartitionedRegionClearMessage.java       | 287 ++++++++++++++
 .../internal/cache/PartitionedRegionDataStore.java |   8 +
 .../internal/cache/partitioned/RegionAdvisor.java  |  11 +
 .../sanctioned-geode-core-serializables.txt        |   2 +
 .../internal/cache/BucketRegionJUnitTest.java      |   4 +-
 .../internal/cache/PartitionedRegionTest.java      |  39 --
 .../serialization/DataSerializableFixedID.java     |   2 +
 27 files changed, 1679 insertions(+), 315 deletions(-)

diff --git a/geode-assembly/src/integrationTest/resources/assembly_content.txt b/geode-assembly/src/integrationTest/resources/assembly_content.txt
index 89d9008..549150f 100644
--- a/geode-assembly/src/integrationTest/resources/assembly_content.txt
+++ b/geode-assembly/src/integrationTest/resources/assembly_content.txt
@@ -221,6 +221,7 @@ javadoc/org/apache/geode/cache/PartitionAttributes.html
 javadoc/org/apache/geode/cache/PartitionAttributesFactory.html
 javadoc/org/apache/geode/cache/PartitionResolver.html
 javadoc/org/apache/geode/cache/PartitionedRegionDistributionException.html
+javadoc/org/apache/geode/cache/PartitionedRegionPartialClearException.html
 javadoc/org/apache/geode/cache/PartitionedRegionStorageException.html
 javadoc/org/apache/geode/cache/Region.Entry.html
 javadoc/org/apache/geode/cache/Region.html
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/PRCacheListenerDistributedTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/PRCacheListenerDistributedTest.java
index 559def7..f4a9ac9 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/PRCacheListenerDistributedTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/PRCacheListenerDistributedTest.java
@@ -14,14 +14,21 @@
  */
 package org.apache.geode.cache;
 
+import static org.apache.geode.test.dunit.VM.getVM;
+import static org.apache.geode.test.dunit.VM.getVMCount;
+import static org.assertj.core.api.Assertions.assertThat;
+
 import java.util.Arrays;
+import java.util.Collection;
 
+import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameter;
 import org.junit.runners.Parameterized.Parameters;
 import org.junit.runners.Parameterized.UseParametersRunnerFactory;
 
+import org.apache.geode.logging.internal.log4j.api.LogService;
 import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactory;
 
 /**
@@ -38,28 +45,60 @@ import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactor
 @SuppressWarnings("serial")
 public class PRCacheListenerDistributedTest extends ReplicateCacheListenerDistributedTest {
 
-  @Parameters(name = "{index}: redundancy={0}")
-  public static Iterable<Integer> data() {
-    return Arrays.asList(0, 3);
+  @Parameters
+  public static Collection<Object[]> data() {
+    return Arrays.asList(new Object[][] {
+        {1, Boolean.FALSE},
+        {3, Boolean.TRUE},
+    });
   }
 
   @Parameter
   public int redundancy;
 
+  @Parameter(1)
+  public Boolean withData;
+
   @Override
   protected Region<String, Integer> createRegion(final String name,
       final CacheListener<String, Integer> listener) {
+    return createPartitionedRegion(name, listener, false);
+  }
+
+  protected Region<String, Integer> createAccessorRegion(final String name,
+      final CacheListener<String, Integer> listener) {
+    return createPartitionedRegion(name, listener, true);
+  }
+
+  private Region<String, Integer> createPartitionedRegion(String name,
+      CacheListener<String, Integer> listener, boolean accessor) {
+    LogService.getLogger()
+        .info("Params [Redundancy: " + redundancy + " withData:" + withData + "]");
     PartitionAttributesFactory<String, Integer> paf = new PartitionAttributesFactory<>();
     paf.setRedundantCopies(redundancy);
 
+    if (accessor) {
+      paf.setLocalMaxMemory(0);
+    }
     RegionFactory<String, Integer> regionFactory = cacheRule.getCache().createRegionFactory();
-    regionFactory.addCacheListener(listener);
+    if (listener != null) {
+      regionFactory.addCacheListener(listener);
+    }
     regionFactory.setDataPolicy(DataPolicy.PARTITION);
     regionFactory.setPartitionAttributes(paf.create());
 
     return regionFactory.create(name);
   }
 
+  private void withData(Region region) {
+    if (withData) {
+      // Fewer buckets.
+      // Covers case where node doesn't have any buckets depending on redundancy.
+      region.put("key1", "value1");
+      region.put("key2", "value2");
+    }
+  }
+
   @Override
   protected int expectedCreates() {
     return 1;
@@ -79,4 +118,207 @@ public class PRCacheListenerDistributedTest extends ReplicateCacheListenerDistri
   protected int expectedDestroys() {
     return 1;
   }
+
+  @Test
+  public void afterRegionDestroyIsInvokedInEveryMember() {
+    CacheListener<String, Integer> listener = new RegionDestroyCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, listener));
+      });
+    }
+
+    region.destroyRegion();
+
+    assertThat(sharedCountersRule.getTotal(REGION_DESTROY)).isEqualTo(expectedRegionDestroys());
+  }
+
+  @Test
+  public void afterRegionDestroyIsInvokedOnNodeWithListener() {
+    CacheListener<String, Integer> listener = new RegionDestroyCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, null));
+      });
+    }
+
+    region.destroyRegion();
+
+    assertThat(sharedCountersRule.getTotal(REGION_DESTROY)).isEqualTo(1);
+  }
+
+  @Test
+  public void afterRegionDestroyIsInvokedOnRemoteNodeWithListener() {
+    CacheListener<String, Integer> listener = new RegionDestroyCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, null);
+
+    getVM(0).invoke(() -> {
+      createRegion(regionName, listener);
+    });
+
+    for (int i = 1; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, null));
+      });
+    }
+
+    region.destroyRegion();
+
+    assertThat(sharedCountersRule.getTotal(REGION_DESTROY)).isEqualTo(1);
+  }
+
+  @Test
+  public void afterRegionDestroyIsInvokedOnAccessorAndDataMembers() {
+    CacheListener<String, Integer> listener = new RegionDestroyCountingCacheListener();
+    Region<String, Integer> region = createAccessorRegion(regionName, listener);
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, listener));
+      });
+    }
+
+    region.destroyRegion();
+
+    assertThat(sharedCountersRule.getTotal(REGION_DESTROY))
+        .isGreaterThanOrEqualTo(expectedRegionDestroys());
+  }
+
+  @Test
+  public void afterRegionDestroyIsInvokedOnAccessor() {
+    CacheListener<String, Integer> listener = new RegionDestroyCountingCacheListener();
+    Region<String, Integer> region = createAccessorRegion(regionName, listener);
+
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, null));
+      });
+    }
+
+    region.destroyRegion();
+
+    assertThat(sharedCountersRule.getTotal(REGION_DESTROY)).isEqualTo(1);
+  }
+
+  @Test
+  public void afterRegionDestroyIsInvokedOnNonAccessor() {
+    CacheListener<String, Integer> listener = new RegionDestroyCountingCacheListener();
+    Region<String, Integer> region = createAccessorRegion(regionName, null);
+    getVM(0).invoke(() -> {
+      createRegion(regionName, listener);
+    });
+    for (int i = 1; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, null));
+      });
+    }
+
+    region.destroyRegion();
+
+    assertThat(sharedCountersRule.getTotal(REGION_DESTROY)).isEqualTo(1);
+  }
+
+  @Test
+  public void afterRegionClearIsInvokedInEveryMember() {
+    CacheListener<String, Integer> listener = new ClearCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, listener));
+      });
+    }
+
+    region.clear();
+
+    assertThat(sharedCountersRule.getTotal(CLEAR)).isEqualTo(expectedClears());
+  }
+
+  @Test
+  public void afterClearIsInvokedOnNodeWithListener() {
+    CacheListener<String, Integer> listener = new ClearCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, null));
+      });
+    }
+
+    region.clear();
+
+    assertThat(sharedCountersRule.getTotal(CLEAR)).isEqualTo(1);
+  }
+
+  @Test
+  public void afterRegionClearIsInvokedOnRemoteNodeWithListener() {
+    CacheListener<String, Integer> listener = new ClearCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, null);
+    getVM(0).invoke(() -> {
+      createRegion(regionName, listener);
+    });
+    for (int i = 1; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, null));
+      });
+    }
+
+    region.clear();
+
+    assertThat(sharedCountersRule.getTotal(CLEAR)).isEqualTo(1);
+  }
+
+  @Test
+  public void afterRegionClearIsInvokedOnAccessorAndDataMembers() {
+    CacheListener<String, Integer> listener = new ClearCountingCacheListener();
+    Region<String, Integer> region = createAccessorRegion(regionName, listener);
+
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, listener));
+      });
+    }
+
+    region.clear();
+
+    assertThat(sharedCountersRule.getTotal(CLEAR)).isEqualTo(expectedClears());
+  }
+
+  @Test
+  public void afterRegionClearIsInvokedOnAccessor() {
+    CacheListener<String, Integer> listener = new ClearCountingCacheListener();
+    Region<String, Integer> region = createAccessorRegion(regionName, listener);
+
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, null));
+      });
+    }
+
+    region.clear();
+
+    assertThat(sharedCountersRule.getTotal(CLEAR)).isEqualTo(1);
+  }
+
+  @Test
+  public void afterRegionClearIsInvokedOnNonAccessor() {
+    CacheListener<String, Integer> listener = new ClearCountingCacheListener();
+    Region<String, Integer> region = createAccessorRegion(regionName, null);
+
+    getVM(0).invoke(() -> {
+      createRegion(regionName, listener);
+    });
+    for (int i = 1; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, null));
+      });
+    }
+
+    region.clear();
+
+    assertThat(sharedCountersRule.getTotal(CLEAR)).isEqualTo(1);
+  }
+
 }
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/ReplicateCacheListenerDistributedTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/ReplicateCacheListenerDistributedTest.java
index 3eedcef..6612833 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/ReplicateCacheListenerDistributedTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/ReplicateCacheListenerDistributedTest.java
@@ -51,13 +51,15 @@ public class ReplicateCacheListenerDistributedTest implements Serializable {
   private static final String UPDATES = "UPDATES";
   private static final String INVALIDATES = "INVALIDATES";
   private static final String DESTROYS = "DESTROYS";
+  protected static final String CLEAR = "CLEAR";
+  protected static final String REGION_DESTROY = "REGION_DESTROY";
 
   private static final int ENTRY_VALUE = 0;
   private static final int UPDATED_ENTRY_VALUE = 1;
 
   private static final String KEY = "key-1";
 
-  private String regionName;
+  protected String regionName;
 
   @Rule
   public DistributedRule distributedRule = new DistributedRule();
@@ -82,6 +84,8 @@ public class ReplicateCacheListenerDistributedTest implements Serializable {
     distributedCounters.initialize(DESTROYS);
     distributedCounters.initialize(INVALIDATES);
     distributedCounters.initialize(UPDATES);
+    distributedCounters.initialize(CLEAR);
+    distributedCounters.initialize(REGION_DESTROY);
   }
 
   @Test
@@ -148,6 +152,36 @@ public class ReplicateCacheListenerDistributedTest implements Serializable {
     assertThat(distributedCounters.getTotal(DESTROYS)).isEqualTo(expectedDestroys());
   }
 
+  @Test
+  public void afterClearIsInvokedInEveryMember() {
+    CacheListener<String, Integer> listener = new ClearCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        createRegion(regionName, listener);
+      });
+    }
+
+    region.clear();
+
+    assertThat(distributedCounters.getTotal(CLEAR)).isEqualTo(expectedClears());
+  }
+
+  @Test
+  public void afterRegionDestroyIsInvokedInEveryMember() {
+    CacheListener<String, Integer> listener = new RegionDestroyCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        createRegion(regionName, listener);
+      });
+    }
+
+    region.destroyRegion();
+
+    assertThat(distributedCounters.getTotal(REGION_DESTROY)).isEqualTo(expectedRegionDestroys());
+  }
+
   protected Region<String, Integer> createRegion(final String name,
       final CacheListener<String, Integer> listener) {
     RegionFactory<String, Integer> regionFactory = cacheRule.getCache().createRegionFactory();
@@ -174,6 +208,14 @@ public class ReplicateCacheListenerDistributedTest implements Serializable {
     return getVMCount() + 1;
   }
 
+  protected int expectedClears() {
+    return getVMCount() + 1;
+  }
+
+  protected int expectedRegionDestroys() {
+    return getVMCount() + 1;
+  }
+
   /**
    * Overridden within tests to increment shared counters.
    */
@@ -283,7 +325,12 @@ public class ReplicateCacheListenerDistributedTest implements Serializable {
 
     @Override
     public void afterCreate(final EntryEvent<String, Integer> event) {
-      // ignore
+      distributedCounters.increment(CREATES);
+    }
+
+    @Override
+    public void afterUpdate(final EntryEvent<String, Integer> event) {
+      distributedCounters.increment(UPDATES);
     }
 
     @Override
@@ -302,4 +349,64 @@ public class ReplicateCacheListenerDistributedTest implements Serializable {
       errorCollector.checkThat(event.getNewValue(), nullValue());
     }
   }
+
+  protected class ClearCountingCacheListener extends BaseCacheListener {
+
+    @Override
+    public void afterCreate(final EntryEvent<String, Integer> event) {
+      distributedCounters.increment(CREATES);
+    }
+
+    @Override
+    public void afterUpdate(final EntryEvent<String, Integer> event) {
+      distributedCounters.increment(UPDATES);
+    }
+
+    @Override
+    public void afterRegionClear(RegionEvent<String, Integer> event) {
+
+      distributedCounters.increment(CLEAR);
+      if (!event.getRegion().getAttributes().getDataPolicy().withPartitioning()) {
+        if (event.isOriginRemote()) {
+          errorCollector.checkThat(event.getDistributedMember(),
+              not(cacheRule.getSystem().getDistributedMember()));
+        } else {
+          errorCollector.checkThat(event.getDistributedMember(),
+              equalTo(cacheRule.getSystem().getDistributedMember()));
+        }
+      }
+      errorCollector.checkThat(event.getOperation(), equalTo(Operation.REGION_CLEAR));
+      errorCollector.checkThat(event.getRegion().getName(), equalTo(regionName));
+    }
+  }
+
+  protected class RegionDestroyCountingCacheListener extends BaseCacheListener {
+
+    @Override
+    public void afterCreate(final EntryEvent<String, Integer> event) {
+      distributedCounters.increment(CREATES);
+    }
+
+    @Override
+    public void afterUpdate(final EntryEvent<String, Integer> event) {
+      distributedCounters.increment(UPDATES);
+    }
+
+    @Override
+    public void afterRegionDestroy(final RegionEvent<String, Integer> event) {
+      distributedCounters.increment(REGION_DESTROY);
+
+      if (!event.getRegion().getAttributes().getDataPolicy().withPartitioning()) {
+        if (event.isOriginRemote()) {
+          errorCollector.checkThat(event.getDistributedMember(),
+              not(cacheRule.getSystem().getDistributedMember()));
+        } else {
+          errorCollector.checkThat(event.getDistributedMember(),
+              equalTo(cacheRule.getSystem().getDistributedMember()));
+        }
+      }
+      errorCollector.checkThat(event.getOperation(), equalTo(Operation.REGION_DESTROY));
+      errorCollector.checkThat(event.getRegion().getName(), equalTo(regionName));
+    }
+  }
 }
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionAfterClearNotificationDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionAfterClearNotificationDUnitTest.java
new file mode 100644
index 0000000..237b6a8
--- /dev/null
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionAfterClearNotificationDUnitTest.java
@@ -0,0 +1,372 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.geode.test.dunit.rules.ClusterStartupRule.getCache;
+import static org.apache.geode.test.dunit.rules.ClusterStartupRule.getClientCache;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.Serializable;
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.IntStream;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.InterestResultPolicy;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionEvent;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.client.ClientRegionShortcut;
+import org.apache.geode.cache.server.CacheServer;
+import org.apache.geode.cache.util.CacheListenerAdapter;
+import org.apache.geode.distributed.internal.ClusterDistributionManager;
+import org.apache.geode.distributed.internal.DistributionMessage;
+import org.apache.geode.distributed.internal.DistributionMessageObserver;
+import org.apache.geode.test.awaitility.GeodeAwaitility;
+import org.apache.geode.test.dunit.AsyncInvocation;
+import org.apache.geode.test.dunit.DUnitBlackboard;
+import org.apache.geode.test.dunit.SerializableCallableIF;
+import org.apache.geode.test.dunit.rules.ClientVM;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+
+public class PartitionedRegionAfterClearNotificationDUnitTest implements Serializable {
+  protected static final String REGION_NAME = "testPR";
+  protected static final int NUM_ENTRIES = 100;
+
+  protected int locatorPort;
+  protected MemberVM locator;
+  protected MemberVM dataStore1;
+  protected MemberVM dataStore2;
+  protected MemberVM dataStore3;
+  protected MemberVM accessor;
+
+  protected ClientVM client1;
+  protected ClientVM client2;
+
+  private static volatile DUnitBlackboard blackboard;
+
+  @Rule
+  public ClusterStartupRule cluster = new ClusterStartupRule(7);
+
+  @Before
+  public void setUp() throws Exception {
+    locator = cluster.startLocatorVM(0);
+    locatorPort = locator.getPort();
+    dataStore1 = cluster.startServerVM(1, getProperties(), locatorPort);
+    dataStore2 = cluster.startServerVM(2, getProperties(), locatorPort);
+    dataStore3 = cluster.startServerVM(3, getProperties(), locatorPort);
+    accessor = cluster.startServerVM(4, getProperties(), locatorPort);
+
+    client1 = cluster.startClientVM(5,
+        c -> c.withPoolSubscription(true).withLocatorConnection((locatorPort)));
+    client2 = cluster.startClientVM(6,
+        c -> c.withPoolSubscription(true).withLocatorConnection((locatorPort)));
+
+    dataStore1.invoke(this::initDataStore);
+    dataStore2.invoke(this::initDataStore);
+    dataStore3.invoke(this::initDataStore);
+    accessor.invoke(this::initAccessor);
+
+    getBlackboard().initBlackboard();
+  }
+
+  protected RegionShortcut getRegionShortCut() {
+    return RegionShortcut.PARTITION_REDUNDANT;
+  }
+
+  protected Properties getProperties() {
+    Properties properties = new Properties();
+    return properties;
+  }
+
+  private Region getRegion(boolean isClient) {
+    if (isClient) {
+      return getClientCache().getRegion(REGION_NAME);
+    } else {
+      return getCache().getRegion(REGION_NAME);
+    }
+  }
+
+  private void verifyRegionSize(boolean isClient, int expectedNum) {
+    GeodeAwaitility.await()
+        .untilAsserted(() -> assertThat(getRegion(isClient).size()).isEqualTo(expectedNum));
+  }
+
+  private void initClientCache() {
+    Region region = getClientCache().createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
+        .create(REGION_NAME);
+    region.registerInterestForAllKeys(InterestResultPolicy.KEYS);
+  }
+
+  private void stopServers() {
+    List<CacheServer> cacheServers = getCache().getCacheServers();
+    for (CacheServer server : cacheServers) {
+      server.stop();
+    }
+  }
+
+  private void initDataStore() {
+    getCache().createRegionFactory(getRegionShortCut())
+        .setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(10).create())
+        .addCacheListener(new CountingCacheListener())
+        .create(REGION_NAME);
+  }
+
+  private void initAccessor() {
+    RegionShortcut shortcut = getRegionShortCut();
+    getCache().createRegionFactory(shortcut)
+        .setPartitionAttributes(
+            new PartitionAttributesFactory().setTotalNumBuckets(10).setLocalMaxMemory(0).create())
+        .addCacheListener(new CountingCacheListener())
+        .create(REGION_NAME);
+  }
+
+  private void feed(boolean isClient) {
+    Region region = getRegion(isClient);
+    IntStream.range(0, NUM_ENTRIES).forEach(i -> region.put(i, "value" + i));
+  }
+
+  private void verifyServerRegionSize(int expectedNum) {
+    accessor.invoke(() -> verifyRegionSize(false, expectedNum));
+    dataStore1.invoke(() -> verifyRegionSize(false, expectedNum));
+    dataStore2.invoke(() -> verifyRegionSize(false, expectedNum));
+    dataStore3.invoke(() -> verifyRegionSize(false, expectedNum));
+  }
+
+  private void verifyClientRegionSize(int expectedNum) {
+    client1.invoke(() -> verifyRegionSize(true, expectedNum));
+    client2.invoke(() -> verifyRegionSize(true, expectedNum));
+  }
+
+  private void verifyCacheListenerTriggerCount(MemberVM serverVM) {
+    SerializableCallableIF<Integer> getListenerTriggerCount = () -> {
+      CountingCacheListener countingCacheListener =
+          (CountingCacheListener) getRegion(false).getAttributes()
+              .getCacheListeners()[0];
+      return countingCacheListener.getClears();
+    };
+
+    int count = accessor.invoke(getListenerTriggerCount)
+        + dataStore1.invoke(getListenerTriggerCount)
+        + dataStore2.invoke(getListenerTriggerCount)
+        + dataStore3.invoke(getListenerTriggerCount);
+    assertThat(count).isEqualTo(4);
+
+    if (serverVM != null) {
+      assertThat(serverVM.invoke(getListenerTriggerCount)).isEqualTo(1);
+    }
+  }
+
+  @Test
+  public void invokeClearOnDataStoreAndVerifyListenerCount() {
+    accessor.invoke(() -> feed(false));
+    verifyServerRegionSize(NUM_ENTRIES);
+
+    dataStore1.invoke(() -> getRegion(false).clear());
+
+    verifyServerRegionSize(0);
+    verifyCacheListenerTriggerCount(dataStore1);
+  }
+
+  @Test
+  public void invokeClearOnAccessorAndVerifyListenerCount() {
+    accessor.invoke(() -> feed(false));
+    verifyServerRegionSize(NUM_ENTRIES);
+    accessor.invoke(() -> getRegion(false).clear());
+    verifyServerRegionSize(0);
+    verifyCacheListenerTriggerCount(accessor);
+  }
+
+  @Test
+  public void invokeClearFromClientAndVerifyListenerCount() {
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
+    client1.invoke(() -> feed(true));
+    verifyClientRegionSize(NUM_ENTRIES);
+    verifyServerRegionSize(NUM_ENTRIES);
+
+    client1.invoke(() -> getRegion(true).clear());
+
+    verifyServerRegionSize(0);
+    verifyClientRegionSize(0);
+    verifyCacheListenerTriggerCount(null);
+  }
+
+  @Test
+  public void invokeClearFromClientWithAccessorAsServer() {
+    dataStore1.invoke(this::stopServers);
+    dataStore2.invoke(this::stopServers);
+    dataStore3.invoke(this::stopServers);
+
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
+    client1.invoke(() -> feed(true));
+    verifyClientRegionSize(NUM_ENTRIES);
+    verifyServerRegionSize(NUM_ENTRIES);
+
+    client1.invoke(() -> getRegion(true).clear());
+
+    verifyServerRegionSize(0);
+    verifyClientRegionSize(0);
+    verifyCacheListenerTriggerCount(null);
+  }
+
+  @Test
+  public void invokeClearFromDataStoreWithClientInterest() {
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
+    accessor.invoke(() -> feed(false));
+    verifyServerRegionSize(NUM_ENTRIES);
+
+    dataStore1.invoke(() -> getRegion(false).clear());
+
+    verifyServerRegionSize(0);
+    verifyCacheListenerTriggerCount(dataStore1);
+  }
+
+  @Test(expected = AssertionError.class)
+  public void verifyTheLocksAreClearedWhenMemberDepartsAfterTakingClearLockOnRemoteMembers()
+      throws Exception {
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
+    accessor.invoke(() -> feed(false));
+    verifyServerRegionSize(NUM_ENTRIES);
+    dataStore2.invoke(() -> DistributionMessageObserver.setInstance(
+        testHookToKillMemberCallingClearBeforeMessageProcessed()));
+
+    AsyncInvocation ds1ClearAsync = dataStore1.invokeAsync(() -> getRegion(false).clear());
+
+    getBlackboard().waitForGate("CLOSE_CACHE", 30, SECONDS);
+
+    dataStore1.invoke(() -> getCache().close());
+    getBlackboard().signalGate("CACHE_CLOSED");
+
+    // This should not be blocked.
+    dataStore2.invoke(() -> feed(false));
+    dataStore3.invoke(() -> feed(false));
+
+    dataStore2.invoke(() -> verifyRegionSize(false, NUM_ENTRIES));
+    dataStore3.invoke(() -> verifyRegionSize(false, NUM_ENTRIES));
+
+    ds1ClearAsync.await();
+  }
+
+  @Test
+  public void verifyTheLocksAreClearedWhenMemberDepartsAfterTakingClearLockOnRemoteMembersAfterMessageProcessed()
+      throws Exception {
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
+    accessor.invoke(() -> feed(false));
+    verifyServerRegionSize(NUM_ENTRIES);
+
+    dataStore2.invoke(() -> DistributionMessageObserver.setInstance(
+        testHookToKillMemberCallingClearAfterMessageProcessed()));
+
+    AsyncInvocation ds1ClearAsync = dataStore1.invokeAsync(() -> getRegion(false).clear());
+
+    getBlackboard().waitForGate("CLOSE_CACHE", 30, SECONDS);
+
+    dataStore1.invoke(() -> getCache().close());
+    getBlackboard().signalGate("CACHE_CLOSED");
+
+    // This should not be blocked.
+    dataStore2.invoke(() -> feed(false));
+    dataStore3.invoke(() -> feed(false));
+
+    dataStore2.invoke(() -> verifyRegionSize(false, NUM_ENTRIES));
+    dataStore3.invoke(() -> verifyRegionSize(false, NUM_ENTRIES));
+
+    ds1ClearAsync.await();
+  }
+
+
+  private static class CountingCacheListener extends CacheListenerAdapter {
+    private final AtomicInteger clears = new AtomicInteger();
+
+    @Override
+    public void afterRegionClear(RegionEvent event) {
+      clears.incrementAndGet();
+    }
+
+    int getClears() {
+      return clears.get();
+
+    }
+  }
+
+  private DistributionMessageObserver testHookToKillMemberCallingClearBeforeMessageProcessed() {
+    return new DistributionMessageObserver() {
+
+      @Override
+      public void beforeProcessMessage(ClusterDistributionManager dm, DistributionMessage message) {
+        if (message instanceof PartitionedRegionClearMessage) {
+          if (((PartitionedRegionClearMessage) message)
+              .getOp() == PartitionedRegionClearMessage.OperationType.OP_LOCK_FOR_PR_CLEAR) {
+            DistributionMessageObserver.setInstance(null);
+            getBlackboard().signalGate("CLOSE_CACHE");
+            try {
+              getBlackboard().waitForGate("CACHE_CLOSED", 30, SECONDS);
+              GeodeAwaitility.await().untilAsserted(
+                  () -> assertThat(dm.isCurrentMember(message.getSender())).isFalse());
+            } catch (TimeoutException | InterruptedException e) {
+              throw new RuntimeException("Failed waiting for signal.");
+            }
+          }
+        }
+      }
+    };
+  }
+
+  private DistributionMessageObserver testHookToKillMemberCallingClearAfterMessageProcessed() {
+    return new DistributionMessageObserver() {
+      @Override
+      public void afterProcessMessage(ClusterDistributionManager dm, DistributionMessage message) {
+        if (message instanceof PartitionedRegionClearMessage) {
+          if (((PartitionedRegionClearMessage) message)
+              .getOp() == PartitionedRegionClearMessage.OperationType.OP_LOCK_FOR_PR_CLEAR) {
+            DistributionMessageObserver.setInstance(null);
+            getBlackboard().signalGate("CLOSE_CACHE");
+            try {
+              getBlackboard().waitForGate("CACHE_CLOSED", 30, SECONDS);
+            } catch (TimeoutException | InterruptedException e) {
+              throw new RuntimeException("Failed waiting for signal.");
+            }
+          }
+        }
+      }
+    };
+  }
+
+  private static DUnitBlackboard getBlackboard() {
+    if (blackboard == null) {
+      blackboard = new DUnitBlackboard();
+    }
+    return blackboard;
+  }
+
+}
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java
index e2e04eb..a3b311c 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java
@@ -80,7 +80,6 @@ public class PartitionedRegionClearDUnitTest implements Serializable {
 
   protected Properties getProperties() {
     Properties properties = new Properties();
-    properties.setProperty("log-level", "info");
     return properties;
   }
 
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithExpirationDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithExpirationDUnitTest.java
index 7f3dff9..dfc9470 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithExpirationDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithExpirationDUnitTest.java
@@ -17,12 +17,8 @@ package org.apache.geode.internal.cache;
 import static org.apache.geode.cache.ExpirationAction.DESTROY;
 import static org.apache.geode.cache.RegionShortcut.PARTITION;
 import static org.apache.geode.cache.RegionShortcut.PARTITION_OVERFLOW;
-import static org.apache.geode.cache.RegionShortcut.PARTITION_PERSISTENT;
-import static org.apache.geode.cache.RegionShortcut.PARTITION_PERSISTENT_OVERFLOW;
 import static org.apache.geode.cache.RegionShortcut.PARTITION_REDUNDANT;
 import static org.apache.geode.cache.RegionShortcut.PARTITION_REDUNDANT_OVERFLOW;
-import static org.apache.geode.cache.RegionShortcut.PARTITION_REDUNDANT_PERSISTENT;
-import static org.apache.geode.cache.RegionShortcut.PARTITION_REDUNDANT_PERSISTENT_OVERFLOW;
 import static org.apache.geode.internal.util.ArrayUtils.asList;
 import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
 import static org.apache.geode.test.dunit.VM.getVM;
@@ -53,6 +49,7 @@ import org.apache.geode.cache.CacheWriterException;
 import org.apache.geode.cache.ExpirationAttributes;
 import org.apache.geode.cache.PartitionAttributes;
 import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.PartitionedRegionPartialClearException;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionEvent;
 import org.apache.geode.cache.RegionShortcut;
@@ -75,7 +72,8 @@ import org.apache.geode.test.dunit.rules.DistributedRule;
 @RunWith(JUnitParamsRunner.class)
 public class PartitionedRegionClearWithExpirationDUnitTest implements Serializable {
   private static final Integer BUCKETS = 13;
-  private static final Integer EXPIRATION_TIME = 30;
+  private static final Integer EXPIRATION_TIME = 5 * 60;
+  private static final Integer SMALL_EXPIRATION_TIME = 10;
   private static final String REGION_NAME = "PartitionedRegion";
 
   @Rule
@@ -106,11 +104,6 @@ public class PartitionedRegionClearWithExpirationDUnitTest implements Serializab
         PARTITION_OVERFLOW,
         PARTITION_REDUNDANT,
         PARTITION_REDUNDANT_OVERFLOW,
-
-        PARTITION_PERSISTENT,
-        PARTITION_PERSISTENT_OVERFLOW,
-        PARTITION_REDUNDANT_PERSISTENT,
-        PARTITION_REDUNDANT_PERSISTENT_OVERFLOW
     };
   }
 
@@ -134,26 +127,8 @@ public class PartitionedRegionClearWithExpirationDUnitTest implements Serializab
     accessor = getVM(TestVM.ACCESSOR.vmNumber);
   }
 
-  private RegionShortcut getRegionAccessorShortcut(RegionShortcut dataStoreRegionShortcut) {
-    if (dataStoreRegionShortcut.isPersistent()) {
-      switch (dataStoreRegionShortcut) {
-        case PARTITION_PERSISTENT:
-          return PARTITION;
-        case PARTITION_PERSISTENT_OVERFLOW:
-          return PARTITION_OVERFLOW;
-        case PARTITION_REDUNDANT_PERSISTENT:
-          return PARTITION_REDUNDANT;
-        case PARTITION_REDUNDANT_PERSISTENT_OVERFLOW:
-          return PARTITION_REDUNDANT_OVERFLOW;
-      }
-    }
-
-    return dataStoreRegionShortcut;
-  }
-
   private void initAccessor(RegionShortcut regionShortcut,
       ExpirationAttributes expirationAttributes) {
-    RegionShortcut accessorShortcut = getRegionAccessorShortcut(regionShortcut);
     PartitionAttributes<String, String> attributes =
         new PartitionAttributesFactory<String, String>()
             .setTotalNumBuckets(BUCKETS)
@@ -161,7 +136,7 @@ public class PartitionedRegionClearWithExpirationDUnitTest implements Serializab
             .create();
 
     cacheRule.getCache()
-        .<String, String>createRegionFactory(accessorShortcut)
+        .<String, String>createRegionFactory(regionShortcut)
         .setPartitionAttributes(attributes)
         .setEntryTimeToLive(expirationAttributes)
         .setEntryIdleTimeout(expirationAttributes)
@@ -281,6 +256,19 @@ public class PartitionedRegionClearWithExpirationDUnitTest implements Serializab
     }));
   }
 
+  private void doClear() {
+    Cache cache = cacheRule.getCache();
+    boolean retry;
+    do {
+      retry = false;
+      try {
+        cache.getRegion(REGION_NAME).clear();
+      } catch (PartitionedRegionPartialClearException | CacheWriterException ex) {
+        retry = true;
+      }
+    } while (retry);
+  }
+
   /**
    * The test does the following (clear coordinator and region type are parametrized):
    * - Populates the Partition Region (entries have expiration).
@@ -303,10 +291,7 @@ public class PartitionedRegionClearWithExpirationDUnitTest implements Serializab
     populateRegion(accessor, entries, asList(accessor, server1, server2));
 
     // Clear the region.
-    getVM(coordinatorVM.vmNumber).invoke(() -> {
-      Cache cache = cacheRule.getCache();
-      cache.getRegion(REGION_NAME).clear();
-    });
+    getVM(coordinatorVM.vmNumber).invoke(() -> doClear());
 
     // Assert all expiration tasks were cancelled and none were executed.
     asList(server1, server2).forEach(vm -> vm.invoke(() -> {
@@ -323,7 +308,7 @@ public class PartitionedRegionClearWithExpirationDUnitTest implements Serializab
 
     // Assert Region Buckets are consistent and region is empty,
     accessor.invoke(this::assertRegionBucketsConsistency);
-    assertRegionIsEmpty(asList(accessor, server1, server1));
+    assertRegionIsEmpty(asList(accessor, server1, server2));
   }
 
   /**
@@ -344,7 +329,8 @@ public class PartitionedRegionClearWithExpirationDUnitTest implements Serializab
   public void clearShouldFailWhenCoordinatorMemberIsBouncedAndExpirationTasksShouldSurvive(
       RegionShortcut regionShortcut) {
     final int entries = 1000;
-    ExpirationAttributes expirationAttributes = new ExpirationAttributes(EXPIRATION_TIME, DESTROY);
+    ExpirationAttributes expirationAttributes =
+        new ExpirationAttributes(SMALL_EXPIRATION_TIME, DESTROY);
     parametrizedSetup(regionShortcut, expirationAttributes);
     populateRegion(accessor, entries, asList(accessor, server1, server2));
     registerVMKillerAsCacheWriter(Collections.singletonList(server1));
@@ -408,22 +394,21 @@ public class PartitionedRegionClearWithExpirationDUnitTest implements Serializab
   @TestCaseName("[{index}] {method}(Coordinator:{0}, RegionType:{1})")
   public void clearShouldSucceedAndRemoveRegisteredExpirationTasksWhenNonCoordinatorMemberIsBounced(
       TestVM coordinatorVM, RegionShortcut regionShortcut) {
-    final int entries = 1500;
+    final int entries = 500;
+
+    RegionShortcut rs = regionShortcut;
     ExpirationAttributes expirationAttributes = new ExpirationAttributes(EXPIRATION_TIME, DESTROY);
     parametrizedSetup(regionShortcut, expirationAttributes);
     registerVMKillerAsCacheWriter(Collections.singletonList(server2));
     populateRegion(accessor, entries, asList(accessor, server1, server2));
 
     // Clear the region.
-    getVM(coordinatorVM.vmNumber).invoke(() -> {
-      Cache cache = cacheRule.getCache();
-      cache.getRegion(REGION_NAME).clear();
-    });
+    getVM(coordinatorVM.vmNumber).invoke(() -> doClear());
 
     // Wait for member to get back online and assign buckets.
     server2.invoke(() -> {
       cacheRule.createCache();
-      initDataStore(regionShortcut, expirationAttributes);
+      initDataStore(rs, expirationAttributes);
       await().untilAsserted(
           () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
       PartitionRegionHelper.assignBucketsToPartitions(cacheRule.getCache().getRegion(REGION_NAME));
@@ -460,7 +445,7 @@ public class PartitionedRegionClearWithExpirationDUnitTest implements Serializab
 
     // Assert Region Buckets are consistent and region is empty,
     accessor.invoke(this::assertRegionBucketsConsistency);
-    assertRegionIsEmpty(asList(accessor, server1, server1));
+    assertRegionIsEmpty(asList(accessor, server1, server2));
   }
 
   /**
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithExpirationDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PersistentPartitionedRegionClearWithExpirationDUnitTest.java
similarity index 93%
copy from geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithExpirationDUnitTest.java
copy to geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PersistentPartitionedRegionClearWithExpirationDUnitTest.java
index 7f3dff9..f6f25bd 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithExpirationDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PersistentPartitionedRegionClearWithExpirationDUnitTest.java
@@ -53,6 +53,7 @@ import org.apache.geode.cache.CacheWriterException;
 import org.apache.geode.cache.ExpirationAttributes;
 import org.apache.geode.cache.PartitionAttributes;
 import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.PartitionedRegionPartialClearException;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionEvent;
 import org.apache.geode.cache.RegionShortcut;
@@ -73,9 +74,10 @@ import org.apache.geode.test.dunit.rules.DistributedRule;
  * on the {@link PartitionedRegion} once the operation is executed.
  */
 @RunWith(JUnitParamsRunner.class)
-public class PartitionedRegionClearWithExpirationDUnitTest implements Serializable {
+public class PersistentPartitionedRegionClearWithExpirationDUnitTest implements Serializable {
   private static final Integer BUCKETS = 13;
-  private static final Integer EXPIRATION_TIME = 30;
+  private static final Integer EXPIRATION_TIME = 5 * 60;
+  private static final Integer SMALL_EXPIRATION_TIME = 10;
   private static final String REGION_NAME = "PartitionedRegion";
 
   @Rule
@@ -102,11 +104,6 @@ public class PartitionedRegionClearWithExpirationDUnitTest implements Serializab
   @SuppressWarnings("unused")
   static RegionShortcut[] regionTypes() {
     return new RegionShortcut[] {
-        PARTITION,
-        PARTITION_OVERFLOW,
-        PARTITION_REDUNDANT,
-        PARTITION_REDUNDANT_OVERFLOW,
-
         PARTITION_PERSISTENT,
         PARTITION_PERSISTENT_OVERFLOW,
         PARTITION_REDUNDANT_PERSISTENT,
@@ -281,6 +278,19 @@ public class PartitionedRegionClearWithExpirationDUnitTest implements Serializab
     }));
   }
 
+  private void doClear() {
+    Cache cache = cacheRule.getCache();
+    boolean retry;
+    do {
+      retry = false;
+      try {
+        cache.getRegion(REGION_NAME).clear();
+      } catch (PartitionedRegionPartialClearException | CacheWriterException ex) {
+        retry = true;
+      }
+    } while (retry);
+  }
+
   /**
    * The test does the following (clear coordinator and region type are parametrized):
    * - Populates the Partition Region (entries have expiration).
@@ -303,10 +313,7 @@ public class PartitionedRegionClearWithExpirationDUnitTest implements Serializab
     populateRegion(accessor, entries, asList(accessor, server1, server2));
 
     // Clear the region.
-    getVM(coordinatorVM.vmNumber).invoke(() -> {
-      Cache cache = cacheRule.getCache();
-      cache.getRegion(REGION_NAME).clear();
-    });
+    getVM(coordinatorVM.vmNumber).invoke(() -> doClear());
 
     // Assert all expiration tasks were cancelled and none were executed.
     asList(server1, server2).forEach(vm -> vm.invoke(() -> {
@@ -323,7 +330,7 @@ public class PartitionedRegionClearWithExpirationDUnitTest implements Serializab
 
     // Assert Region Buckets are consistent and region is empty,
     accessor.invoke(this::assertRegionBucketsConsistency);
-    assertRegionIsEmpty(asList(accessor, server1, server1));
+    assertRegionIsEmpty(asList(accessor, server1, server2));
   }
 
   /**
@@ -344,7 +351,8 @@ public class PartitionedRegionClearWithExpirationDUnitTest implements Serializab
   public void clearShouldFailWhenCoordinatorMemberIsBouncedAndExpirationTasksShouldSurvive(
       RegionShortcut regionShortcut) {
     final int entries = 1000;
-    ExpirationAttributes expirationAttributes = new ExpirationAttributes(EXPIRATION_TIME, DESTROY);
+    ExpirationAttributes expirationAttributes =
+        new ExpirationAttributes(SMALL_EXPIRATION_TIME, DESTROY);
     parametrizedSetup(regionShortcut, expirationAttributes);
     populateRegion(accessor, entries, asList(accessor, server1, server2));
     registerVMKillerAsCacheWriter(Collections.singletonList(server1));
@@ -407,23 +415,29 @@ public class PartitionedRegionClearWithExpirationDUnitTest implements Serializab
   @Parameters(method = "vmsAndRegionTypes")
   @TestCaseName("[{index}] {method}(Coordinator:{0}, RegionType:{1})")
   public void clearShouldSucceedAndRemoveRegisteredExpirationTasksWhenNonCoordinatorMemberIsBounced(
-      TestVM coordinatorVM, RegionShortcut regionShortcut) {
-    final int entries = 1500;
+      TestVM coordinatorVM, RegionShortcut regionShortcut) throws Exception {
+    final int entries = 500;
+    // To avoid partition offline exception without redundancy.
+
+    if (regionShortcut == PARTITION_PERSISTENT) {
+      regionShortcut = PARTITION_REDUNDANT_PERSISTENT;
+    } else if (regionShortcut == PARTITION_PERSISTENT_OVERFLOW) {
+      regionShortcut = PARTITION_REDUNDANT_PERSISTENT_OVERFLOW;
+    }
+
+    final RegionShortcut rs = regionShortcut;
     ExpirationAttributes expirationAttributes = new ExpirationAttributes(EXPIRATION_TIME, DESTROY);
     parametrizedSetup(regionShortcut, expirationAttributes);
     registerVMKillerAsCacheWriter(Collections.singletonList(server2));
     populateRegion(accessor, entries, asList(accessor, server1, server2));
 
     // Clear the region.
-    getVM(coordinatorVM.vmNumber).invoke(() -> {
-      Cache cache = cacheRule.getCache();
-      cache.getRegion(REGION_NAME).clear();
-    });
+    getVM(coordinatorVM.vmNumber).invoke(() -> doClear());
 
     // Wait for member to get back online and assign buckets.
     server2.invoke(() -> {
       cacheRule.createCache();
-      initDataStore(regionShortcut, expirationAttributes);
+      initDataStore(rs, expirationAttributes);
       await().untilAsserted(
           () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
       PartitionRegionHelper.assignBucketsToPartitions(cacheRule.getCache().getRegion(REGION_NAME));
@@ -459,8 +473,8 @@ public class PartitionedRegionClearWithExpirationDUnitTest implements Serializab
     });
 
     // Assert Region Buckets are consistent and region is empty,
-    accessor.invoke(this::assertRegionBucketsConsistency);
-    assertRegionIsEmpty(asList(accessor, server1, server1));
+    // accessor.invoke(this::assertRegionBucketsConsistency);
+    assertRegionIsEmpty(asList(accessor, server1, server2));
   }
 
   /**
diff --git a/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/PartitionedRegionIntegrationTest.java b/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/PartitionedRegionIntegrationTest.java
index 818a855..933bc39 100644
--- a/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/PartitionedRegionIntegrationTest.java
+++ b/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/PartitionedRegionIntegrationTest.java
@@ -16,15 +16,24 @@
 package org.apache.geode.internal.cache;
 
 import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
 
+import java.util.List;
 import java.util.concurrent.ScheduledExecutorService;
 
 import org.junit.Rule;
 import org.junit.Test;
 
+import org.apache.geode.cache.CacheEvent;
 import org.apache.geode.cache.EvictionAction;
 import org.apache.geode.cache.EvictionAttributes;
+import org.apache.geode.cache.Operation;
+import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache30.TestCacheListener;
 import org.apache.geode.test.junit.rules.ServerStarterRule;
 
 public class PartitionedRegionIntegrationTest {
@@ -55,4 +64,40 @@ public class PartitionedRegionIntegrationTest {
     ScheduledExecutorService bucketSorter = region.getBucketSorter();
     assertThat(bucketSorter).isNull();
   }
+
+  @Test
+  public void prClearWithDataInvokesCacheListenerAfterClear() {
+    TestCacheListener prCacheListener = new TestCacheListener() {};
+    TestCacheListener spyPRCacheListener = spy(prCacheListener);
+
+    Region region = server.createPartitionRegion("PR1",
+        f -> f.addCacheListener(spyPRCacheListener), f -> f.setTotalNumBuckets(2));
+    region.put("key1", "value2");
+    region.put("key2", "value2");
+    spyPRCacheListener.enableEventHistory();
+
+    region.clear();
+
+    verify(spyPRCacheListener, times(1)).afterRegionClear(any());
+    List cacheEvents = spyPRCacheListener.getEventHistory();
+    assertThat(cacheEvents.size()).isEqualTo(1);
+    assertThat(((CacheEvent) cacheEvents.get(0)).getOperation()).isEqualTo(Operation.REGION_CLEAR);
+  }
+
+  @Test
+  public void prClearWithoutDataInvokesCacheListenerAfterClear() {
+    TestCacheListener prCacheListener = new TestCacheListener() {};
+    TestCacheListener spyPRCacheListener = spy(prCacheListener);
+
+    Region region = server.createPartitionRegion("PR1",
+        f -> f.addCacheListener(spyPRCacheListener), f -> f.setTotalNumBuckets(2));
+    spyPRCacheListener.enableEventHistory();
+
+    region.clear();
+
+    verify(spyPRCacheListener, times(1)).afterRegionClear(any());
+    List cacheEvents = spyPRCacheListener.getEventHistory();
+    assertThat(cacheEvents.size()).isEqualTo(1);
+    assertThat(((CacheEvent) cacheEvents.get(0)).getOperation()).isEqualTo(Operation.REGION_CLEAR);
+  }
 }
diff --git a/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt b/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
index 8e522a2..e56247d 100644
--- a/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
+++ b/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
@@ -1075,6 +1075,14 @@ org/apache/geode/internal/cache/PartitionRegionConfig,2
 fromData,207
 toData,178
 
+org/apache/geode/internal/cache/PartitionedRegionClearMessage,2
+fromData,40
+toData,36
+
+org/apache/geode/internal/cache/PartitionedRegionClearMessage$PartitionedRegionClearReplyMessage,2
+fromData,29
+toData,28
+
 org/apache/geode/internal/cache/PoolFactoryImpl$PoolAttributes,2
 fromData,161
 toData,161
diff --git a/geode-core/src/main/java/org/apache/geode/cache/PartitionedRegionPartialClearException.java b/geode-core/src/main/java/org/apache/geode/cache/PartitionedRegionPartialClearException.java
new file mode 100644
index 0000000..1ddb301
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/cache/PartitionedRegionPartialClearException.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.cache;
+
+/**
+ * Indicates a failure to perform a distributed clear operation on a Partitioned Region
+ * after multiple attempts. The clear may not have been successfully applied on some of
+ * the members hosting the region.
+ */
+public class PartitionedRegionPartialClearException extends CacheRuntimeException {
+
+  public PartitionedRegionPartialClearException() {}
+
+  public PartitionedRegionPartialClearException(String msg) {
+    super(msg);
+  }
+
+  public PartitionedRegionPartialClearException(String msg, Throwable cause) {
+    super(msg, cause);
+  }
+
+  public PartitionedRegionPartialClearException(Throwable cause) {
+    super(cause);
+  }
+}
diff --git a/geode-core/src/main/java/org/apache/geode/cache/Region.java b/geode-core/src/main/java/org/apache/geode/cache/Region.java
index b6ba670..4707a46 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/Region.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/Region.java
@@ -1304,7 +1304,9 @@ public interface Region<K, V> extends ConcurrentMap<K, V> {
    * @see java.util.Map#clear()
    * @see CacheListener#afterRegionClear
    * @see CacheWriter#beforeRegionClear
-   * @throws UnsupportedOperationException If the region is a partitioned region
+   * @throws PartitionedRegionPartialClearException when data is partially cleared on partitioned
+   *         region. It is caller responsibility to handle the partial data clear either by retrying
+   *         the clear operation or continue working with the partially cleared partitioned region.
    */
   @Override
   void clear();
diff --git a/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java b/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java
index 26d92c9..f0658a6 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java
@@ -235,6 +235,7 @@ import org.apache.geode.internal.cache.MemberFunctionStreamingMessage;
 import org.apache.geode.internal.cache.Node;
 import org.apache.geode.internal.cache.PRQueryProcessor;
 import org.apache.geode.internal.cache.PartitionRegionConfig;
+import org.apache.geode.internal.cache.PartitionedRegionClearMessage;
 import org.apache.geode.internal.cache.PreferBytesCachedDeserializable;
 import org.apache.geode.internal.cache.RegionEventImpl;
 import org.apache.geode.internal.cache.ReleaseClearLockMessage;
@@ -686,6 +687,10 @@ public class DSFIDFactory implements DataSerializableFixedID {
     serializer.registerDSFID(PR_DUMP_B2N_REPLY_MESSAGE, DumpB2NReplyMessage.class);
     serializer.registerDSFID(DESTROY_PARTITIONED_REGION_MESSAGE,
         DestroyPartitionedRegionMessage.class);
+    serializer.registerDSFID(CLEAR_PARTITIONED_REGION_MESSAGE,
+        PartitionedRegionClearMessage.class);
+    serializer.registerDSFID(CLEAR_PARTITIONED_REGION_REPLY_MESSAGE,
+        PartitionedRegionClearMessage.PartitionedRegionClearReplyMessage.class);
     serializer.registerDSFID(INVALIDATE_PARTITIONED_REGION_MESSAGE,
         InvalidatePartitionedRegionMessage.class);
     serializer.registerDSFID(COMMIT_PROCESS_QUERY_MESSAGE, CommitProcessQueryMessage.class);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java
index e4045c3..6cba754 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java
@@ -1622,7 +1622,7 @@ public class BucketAdvisor extends CacheDistributionAdvisor {
   /**
    * Returns true if the a primary is known.
    */
-  private boolean hasPrimary() {
+  protected boolean hasPrimary() {
     final byte primaryState = this.primaryState;
     return primaryState == OTHER_PRIMARY_NOT_HOSTING || primaryState == OTHER_PRIMARY_HOSTING
         || primaryState == IS_PRIMARY_HOSTING;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
index d49d3dc..3329e42 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
@@ -578,8 +578,13 @@ public class BucketRegion extends DistributedRegion implements Bucket {
     // get rvvLock
     Set<InternalDistributedMember> participants =
         getCacheDistributionAdvisor().adviseInvalidateRegion();
+    boolean isLockedAlready = this.partitionedRegion.getPartitionedRegionClear()
+        .isLockedForListenerAndClientNotification();
+
     try {
-      obtainWriteLocksForClear(regionEvent, participants);
+      if (!isLockedAlready) {
+        obtainWriteLocksForClear(regionEvent, participants);
+      }
       // no need to dominate my own rvv.
       // Clear is on going here, there won't be GII for this member
       clearRegionLocally(regionEvent, cacheWrite, null);
@@ -587,7 +592,9 @@ public class BucketRegion extends DistributedRegion implements Bucket {
 
       // TODO: call reindexUserDataRegion if there're lucene indexes
     } finally {
-      releaseWriteLocksForClear(regionEvent, participants);
+      if (!isLockedAlready) {
+        releaseWriteLocksForClear(regionEvent, participants);
+      }
     }
   }
 
@@ -2513,4 +2520,10 @@ public class BucketRegion extends DistributedRegion implements Bucket {
   void checkSameSenderIdsAvailableOnAllNodes() {
     // nothing needed on a bucket region
   }
+
+  @Override
+  protected void basicClear(RegionEventImpl regionEvent) {
+    basicClear(regionEvent, false);
+  }
+
 }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedClearOperation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedClearOperation.java
index 4396581..25cc2f5 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedClearOperation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedClearOperation.java
@@ -207,7 +207,7 @@ public class DistributedClearOperation extends DistributedCacheOperation {
     protected boolean operateOnRegion(CacheEvent event, ClusterDistributionManager dm)
         throws EntryNotFoundException {
 
-      DistributedRegion region = (DistributedRegion) event.getRegion();
+      LocalRegion region = (LocalRegion) event.getRegion();
       switch (this.clearOp) {
         case OP_CLEAR:
           region.clearRegionLocally((RegionEventImpl) event, false, this.rvv);
@@ -215,9 +215,11 @@ public class DistributedClearOperation extends DistributedCacheOperation {
           this.appliedOperation = true;
           break;
         case OP_LOCK_FOR_CLEAR:
-          if (region.getDataPolicy().withStorage()) {
-            DistributedClearOperation.regionLocked(this.getSender(), region.getFullPath(), region);
-            region.lockLocallyForClear(dm, this.getSender(), event);
+          if (region.getDataPolicy().withStorage() && region instanceof DistributedRegion) {
+            DistributedRegion distributedRegion = (DistributedRegion) region;
+            DistributedClearOperation.regionLocked(this.getSender(), region.getFullPath(),
+                distributedRegion);
+            distributedRegion.lockLocallyForClear(dm, this.getSender(), event);
           }
           this.appliedOperation = true;
           break;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
index 84b5a3b..d0035fa 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
@@ -2130,7 +2130,13 @@ public class DistributedRegion extends LocalRegion implements InternalDistribute
    */
   protected void releaseWriteLocksForClear(RegionEventImpl regionEvent,
       Set<InternalDistributedMember> participants) {
+    releaseLockLocallyForClear(regionEvent);
+    if (!isUsedForPartitionedRegionBucket()) {
+      DistributedClearOperation.releaseLocks(regionEvent, participants);
+    }
+  }
 
+  protected void releaseLockLocallyForClear(RegionEventImpl regionEvent) {
     ARMLockTestHook armLockTestHook = getRegionMap().getARMLockTestHook();
     if (armLockTestHook != null) {
       armLockTestHook.beforeRelease(this, regionEvent);
@@ -2140,9 +2146,6 @@ public class DistributedRegion extends LocalRegion implements InternalDistribute
     if (rvv != null) {
       rvv.unlockForClear(getMyId());
     }
-    if (!isUsedForPartitionedRegionBucket()) {
-      DistributedClearOperation.releaseLocks(regionEvent, participants);
-    }
 
     if (armLockTestHook != null) {
       armLockTestHook.afterRelease(this, regionEvent);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/InternalRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/InternalRegion.java
index 876353f..8ade506 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/InternalRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/InternalRegion.java
@@ -466,4 +466,7 @@ public interface InternalRegion extends Region, HasCachePerfStats, RegionEntryCo
   boolean isRegionCreateNotified();
 
   void setRegionCreateNotified(boolean notified);
+
+  void clearRegionLocally(RegionEventImpl regionEvent, boolean cacheWrite,
+      RegionVersionVector vector);
 }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
index 663b40c..20aa113 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
@@ -8469,7 +8469,8 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
    * will not take distributedLock. The clear operation will also clear the local transactional
    * entries. The clear operation will have immediate committed state.
    */
-  void clearRegionLocally(RegionEventImpl regionEvent, boolean cacheWrite,
+  @Override
+  public void clearRegionLocally(RegionEventImpl regionEvent, boolean cacheWrite,
       RegionVersionVector vector) {
     final boolean isRvvDebugEnabled = logger.isTraceEnabled(LogMarker.RVV_VERBOSE);
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
index 950ec63..671d27b 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
@@ -322,6 +322,8 @@ public class PartitionedRegion extends LocalRegion
     }
   };
 
+  private final PartitionedRegionClear partitionedRegionClear = new PartitionedRegionClear(this);
+
   /**
    * Global Region for storing PR config ( PRName->PRConfig). This region would be used to resolve
    * PR name conflict.*
@@ -2174,198 +2176,6 @@ public class PartitionedRegion extends LocalRegion
     throw new UnsupportedOperationException();
   }
 
-  @Override
-  void basicClear(RegionEventImpl regionEvent, boolean cacheWrite) {
-    final boolean isDebugEnabled = logger.isDebugEnabled();
-    synchronized (clearLock) {
-      final DistributedLockService lockService = getPartitionedRegionLockService();
-      try {
-        lockService.lock("_clearOperation" + this.getFullPath().replace('/', '_'), -1, -1);
-      } catch (IllegalStateException e) {
-        lockCheckReadiness();
-        throw e;
-      }
-      try {
-        if (cache.isCacheAtShutdownAll()) {
-          throw cache.getCacheClosedException("Cache is shutting down");
-        }
-
-        // do cacheWrite
-        cacheWriteBeforeRegionClear(regionEvent);
-
-        // create ClearPRMessage per bucket
-        List<ClearPRMessage> clearMsgList = createClearPRMessages(regionEvent.getEventId());
-        for (ClearPRMessage clearPRMessage : clearMsgList) {
-          int bucketId = clearPRMessage.getBucketId();
-          checkReadiness();
-          long sendMessagesStartTime = 0;
-          if (isDebugEnabled) {
-            sendMessagesStartTime = System.currentTimeMillis();
-          }
-          try {
-            sendClearMsgByBucket(bucketId, clearPRMessage);
-          } catch (PartitionOfflineException poe) {
-            // TODO add a PartialResultException
-            logger.info("PR.sendClearMsgByBucket encountered PartitionOfflineException at bucket "
-                + bucketId, poe);
-          } catch (Exception e) {
-            logger.info("PR.sendClearMsgByBucket encountered exception at bucket " + bucketId, e);
-          }
-
-          if (isDebugEnabled) {
-            long now = System.currentTimeMillis();
-            logger.debug("PR.sendClearMsgByBucket for bucket {} took {} ms", bucketId,
-                (now - sendMessagesStartTime));
-          }
-          // TODO add psStats
-        }
-      } finally {
-        try {
-          lockService.unlock("_clearOperation" + this.getFullPath().replace('/', '_'));
-        } catch (IllegalStateException e) {
-          lockCheckReadiness();
-        }
-      }
-
-      // notify bridge clients at PR level
-      regionEvent.setEventType(EnumListenerEvent.AFTER_REGION_CLEAR);
-      boolean hasListener = hasListener();
-      if (hasListener) {
-        dispatchListenerEvent(EnumListenerEvent.AFTER_REGION_CLEAR, regionEvent);
-      }
-      notifyBridgeClients(regionEvent);
-      logger.info("Partitioned region {} finsihed clear operation.", this.getFullPath());
-    }
-  }
-
-  void sendClearMsgByBucket(final Integer bucketId, ClearPRMessage clearPRMessage) {
-    RetryTimeKeeper retryTime = null;
-    InternalDistributedMember currentTarget = getNodeForBucketWrite(bucketId, null);
-    if (logger.isDebugEnabled()) {
-      logger.debug("PR.sendClearMsgByBucket:bucket {}'s currentTarget is {}", bucketId,
-          currentTarget);
-    }
-
-    long timeOut = 0;
-    int count = 0;
-    while (true) {
-      switch (count) {
-        case 0:
-          // Note we don't check for DM cancellation in common case.
-          // First time. Assume success, keep going.
-          break;
-        case 1:
-          this.cache.getCancelCriterion().checkCancelInProgress(null);
-          // Second time (first failure). Calculate timeout and keep going.
-          timeOut = System.currentTimeMillis() + this.retryTimeout;
-          break;
-        default:
-          this.cache.getCancelCriterion().checkCancelInProgress(null);
-          // test for timeout
-          long timeLeft = timeOut - System.currentTimeMillis();
-          if (timeLeft < 0) {
-            PRHARedundancyProvider.timedOut(this, null, null, "clear a bucket" + bucketId,
-                this.retryTimeout);
-            // NOTREACHED
-          }
-
-          // Didn't time out. Sleep a bit and then continue
-          boolean interrupted = Thread.interrupted();
-          try {
-            Thread.sleep(PartitionedRegionHelper.DEFAULT_WAIT_PER_RETRY_ITERATION);
-          } catch (InterruptedException ignore) {
-            interrupted = true;
-          } finally {
-            if (interrupted) {
-              Thread.currentThread().interrupt();
-            }
-          }
-          break;
-      } // switch
-      count++;
-
-      if (currentTarget == null) { // pick target
-        checkReadiness();
-        if (retryTime == null) {
-          retryTime = new RetryTimeKeeper(this.retryTimeout);
-        }
-
-        currentTarget = waitForNodeOrCreateBucket(retryTime, null, bucketId, false);
-        if (currentTarget == null) {
-          // the bucket does not exist, no need to clear
-          logger.info("Bucket " + bucketId + " does not contain data, no need to clear");
-          return;
-        } else {
-          if (logger.isDebugEnabled()) {
-            logger.debug("PR.sendClearMsgByBucket: new currentTarget is {}", currentTarget);
-          }
-        }
-
-        // It's possible this is a GemFire thread e.g. ServerConnection
-        // which got to this point because of a distributed system shutdown or
-        // region closure which uses interrupt to break any sleep() or wait() calls
-        // e.g. waitForPrimary or waitForBucketRecovery in which case throw exception
-        checkShutdown();
-        continue;
-      } // pick target
-
-      boolean result = false;
-      try {
-        final boolean isLocal = (this.localMaxMemory > 0) && currentTarget.equals(getMyId());
-        if (isLocal) {
-          result = clearPRMessage.doLocalClear(this);
-        } else {
-          ClearPRMessage.ClearResponse response = clearPRMessage.send(currentTarget, this);
-          if (response != null) {
-            this.prStats.incPartitionMessagesSent();
-            result = response.waitForResult();
-          }
-        }
-        if (result) {
-          return;
-        }
-      } catch (ForceReattemptException fre) {
-        checkReadiness();
-        InternalDistributedMember lastTarget = currentTarget;
-        if (retryTime == null) {
-          retryTime = new RetryTimeKeeper(this.retryTimeout);
-        }
-        currentTarget = getNodeForBucketWrite(bucketId, retryTime);
-        if (lastTarget.equals(currentTarget)) {
-          if (logger.isDebugEnabled()) {
-            logger.debug("PR.sendClearMsgByBucket: Retrying at the same node:{} due to {}",
-                currentTarget, fre.getMessage());
-          }
-          if (retryTime.overMaximum()) {
-            PRHARedundancyProvider.timedOut(this, null, null, "clear a bucket",
-                this.retryTimeout);
-            // NOTREACHED
-          }
-          retryTime.waitToRetryNode();
-        } else {
-          if (logger.isDebugEnabled()) {
-            logger.debug("PR.sendClearMsgByBucket: Old target was {}, Retrying {}", lastTarget,
-                currentTarget);
-          }
-        }
-      }
-
-      // It's possible this is a GemFire thread e.g. ServerConnection
-      // which got to this point because of a distributed system shutdown or
-      // region closure which uses interrupt to break any sleep() or wait()
-      // calls
-      // e.g. waitForPrimary or waitForBucketRecovery in which case throw
-      // exception
-      checkShutdown();
-
-      // If we get here, the attempt failed...
-      if (count == 1) {
-        // TODO prStats add ClearPRMsg retried
-        this.prStats.incPutAllMsgsRetried();
-      }
-    }
-  }
-
   List<ClearPRMessage> createClearPRMessages(EventID eventID) {
     ArrayList<ClearPRMessage> clearMsgList = new ArrayList<>();
     for (int bucketId = 0; bucketId < getTotalNumberOfBuckets(); bucketId++) {
@@ -10437,4 +10247,27 @@ public class PartitionedRegion extends LocalRegion
     this.getSystem().handleResourceEvent(ResourceEvent.REGION_CREATE, this);
     this.regionCreationNotified = true;
   }
+
+  protected PartitionedRegionClear getPartitionedRegionClear() {
+    return partitionedRegionClear;
+  }
+
+  @Override
+  void cmnClearRegion(RegionEventImpl regionEvent, boolean cacheWrite, boolean useRVV) {
+    // Synchronized to avoid other threads invoking clear on this vm/node.
+    synchronized (clearLock) {
+      partitionedRegionClear.doClear(regionEvent, cacheWrite, this);
+    }
+  }
+
+  boolean hasAnyClientsInterested() {
+    // Check local filter
+    if (getFilterProfile() != null && (getFilterProfile().hasInterest() || getFilterProfile()
+        .hasCQs())) {
+      return true;
+    }
+    // check peer server filters
+    return (getRegionAdvisor().hasPRServerWithInterest()
+        || getRegionAdvisor().hasPRServerWithCQs());
+  }
 }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
new file mode 100644
index 0000000..69277ef
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
@@ -0,0 +1,419 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.logging.log4j.Logger;
+
+import org.apache.geode.CancelException;
+import org.apache.geode.cache.CacheWriterException;
+import org.apache.geode.cache.Operation;
+import org.apache.geode.cache.OperationAbortedException;
+import org.apache.geode.cache.PartitionedRegionPartialClearException;
+import org.apache.geode.cache.partition.PartitionRegionHelper;
+import org.apache.geode.distributed.internal.DistributionManager;
+import org.apache.geode.distributed.internal.MembershipListener;
+import org.apache.geode.distributed.internal.ReplyException;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.cache.versions.RegionVersionVector;
+import org.apache.geode.logging.internal.log4j.api.LogService;
+
+public class PartitionedRegionClear {
+
+  private static final Logger logger = LogService.getLogger();
+
+  private static final String CLEAR_OPERATION = "_clearOperation";
+
+  private final int retryTime = 2 * 60 * 1000;
+
+  private final PartitionedRegion partitionedRegion;
+
+  private final LockForListenerAndClientNotification lockForListenerAndClientNotification =
+      new LockForListenerAndClientNotification();
+
+  private volatile boolean membershipChange = false;
+
+  public PartitionedRegionClear(PartitionedRegion partitionedRegion) {
+    this.partitionedRegion = partitionedRegion;
+    partitionedRegion.getDistributionManager()
+        .addMembershipListener(new PartitionedRegionClearListener());
+  }
+
+  public boolean isLockedForListenerAndClientNotification() {
+    return lockForListenerAndClientNotification.isLocked();
+  }
+
+  void acquireDistributedClearLock(String clearLock) {
+    try {
+      partitionedRegion.getPartitionedRegionLockService().lock(clearLock, -1, -1);
+    } catch (IllegalStateException e) {
+      partitionedRegion.lockCheckReadiness();
+      throw e;
+    }
+  }
+
+  void releaseDistributedClearLock(String clearLock) {
+    try {
+      partitionedRegion.getPartitionedRegionLockService().unlock(clearLock);
+    } catch (IllegalStateException e) {
+      partitionedRegion.lockCheckReadiness();
+    } catch (Exception ex) {
+      logger.warn("Caught exception while unlocking clear distributed lock. " + ex.getMessage());
+    }
+  }
+
+  void obtainLockForClear(RegionEventImpl event) {
+    obtainClearLockLocal(partitionedRegion.getDistributionManager().getId());
+    sendPartitionedRegionClearMessage(event,
+        PartitionedRegionClearMessage.OperationType.OP_LOCK_FOR_PR_CLEAR);
+  }
+
+  void releaseLockForClear(RegionEventImpl event) {
+    releaseClearLockLocal();
+    sendPartitionedRegionClearMessage(event,
+        PartitionedRegionClearMessage.OperationType.OP_UNLOCK_FOR_PR_CLEAR);
+  }
+
+  List clearRegion(RegionEventImpl regionEvent, boolean cacheWrite,
+      RegionVersionVector vector) {
+    List allBucketsCleared = new ArrayList();
+    allBucketsCleared.addAll(clearRegionLocal(regionEvent));
+    allBucketsCleared.addAll(sendPartitionedRegionClearMessage(regionEvent,
+        PartitionedRegionClearMessage.OperationType.OP_PR_CLEAR));
+    return allBucketsCleared;
+  }
+
+  private void waitForPrimary() {
+    boolean retry;
+    PartitionedRegion.RetryTimeKeeper retryTimer = new PartitionedRegion.RetryTimeKeeper(retryTime);
+    do {
+      retry = false;
+      for (BucketRegion bucketRegion : partitionedRegion.getDataStore()
+          .getAllLocalBucketRegions()) {
+        if (!bucketRegion.getBucketAdvisor().hasPrimary()) {
+          if (retryTimer.overMaximum()) {
+            throw new PartitionedRegionPartialClearException(
+                "Unable to find primary bucket region during clear operation for region: " +
+                    partitionedRegion.getName());
+          }
+          retryTimer.waitForBucketsRecovery();
+          retry = true;
+        }
+      }
+    } while (retry);
+  }
+
+  public ArrayList clearRegionLocal(RegionEventImpl regionEvent) {
+    ArrayList clearedBuckets = new ArrayList();
+    membershipChange = false;
+    // Synchronized to handle the requester departure.
+    synchronized (lockForListenerAndClientNotification) {
+      if (partitionedRegion.getDataStore() != null) {
+        partitionedRegion.getDataStore().lockBucketCreationForRegionClear();
+        try {
+          boolean retry;
+          do {
+            waitForPrimary();
+
+            for (BucketRegion localPrimaryBucketRegion : partitionedRegion.getDataStore()
+                .getAllLocalPrimaryBucketRegions()) {
+              if (localPrimaryBucketRegion.size() > 0) {
+                localPrimaryBucketRegion.clear();
+              }
+              clearedBuckets.add(localPrimaryBucketRegion.getId());
+            }
+
+            if (membershipChange) {
+              membershipChange = false;
+              retry = true;
+            } else {
+              retry = false;
+            }
+
+          } while (retry);
+          doAfterClear(regionEvent);
+        } finally {
+          partitionedRegion.getDataStore().unlockBucketCreationForRegionClear();
+        }
+      } else {
+        // Non data-store with client queue and listener
+        doAfterClear(regionEvent);
+      }
+    }
+    return clearedBuckets;
+  }
+
+  private void doAfterClear(RegionEventImpl regionEvent) {
+    if (partitionedRegion.hasAnyClientsInterested()) {
+      notifyClients(regionEvent);
+    }
+
+    if (partitionedRegion.hasListener()) {
+      partitionedRegion.dispatchListenerEvent(EnumListenerEvent.AFTER_REGION_CLEAR, regionEvent);
+    }
+  }
+
+  void notifyClients(RegionEventImpl event) {
+    // Set client routing information into the event
+    // The clear operation in case of PR is distributed differently
+    // hence the FilterRoutingInfo is set here instead of
+    // DistributedCacheOperation.distribute().
+    event.setEventType(EnumListenerEvent.AFTER_REGION_CLEAR);
+    if (!partitionedRegion.isUsedForMetaRegion() && !partitionedRegion
+        .isUsedForPartitionedRegionAdmin()
+        && !partitionedRegion.isUsedForPartitionedRegionBucket() && !partitionedRegion
+            .isUsedForParallelGatewaySenderQueue()) {
+
+      FilterRoutingInfo localCqFrInfo =
+          partitionedRegion.getFilterProfile().getFilterRoutingInfoPart1(event,
+              FilterProfile.NO_PROFILES, Collections.emptySet());
+
+      FilterRoutingInfo localCqInterestFrInfo =
+          partitionedRegion.getFilterProfile().getFilterRoutingInfoPart2(localCqFrInfo, event);
+
+      if (localCqInterestFrInfo != null) {
+        event.setLocalFilterInfo(localCqInterestFrInfo.getLocalFilterInfo());
+      }
+    }
+    partitionedRegion.notifyBridgeClients(event);
+  }
+
+  protected void obtainClearLockLocal(InternalDistributedMember requester) {
+    synchronized (lockForListenerAndClientNotification) {
+      // Check if the member is still part of the distributed system
+      if (!partitionedRegion.getDistributionManager().isCurrentMember(requester)) {
+        return;
+      }
+
+      lockForListenerAndClientNotification.setLocked(requester);
+      if (partitionedRegion.getDataStore() != null) {
+        for (BucketRegion localPrimaryBucketRegion : partitionedRegion.getDataStore()
+            .getAllLocalPrimaryBucketRegions()) {
+          try {
+            localPrimaryBucketRegion.lockLocallyForClear(partitionedRegion.getDistributionManager(),
+                partitionedRegion.getMyId(), null);
+          } catch (Exception ex) {
+            partitionedRegion.checkClosed();
+          }
+        }
+      }
+    }
+  }
+
+  protected void releaseClearLockLocal() {
+    synchronized (lockForListenerAndClientNotification) {
+      if (lockForListenerAndClientNotification.getLockRequester() == null) {
+        // The member has left.
+        return;
+      }
+      try {
+        if (partitionedRegion.getDataStore() != null) {
+
+          for (BucketRegion localPrimaryBucketRegion : partitionedRegion.getDataStore()
+              .getAllLocalPrimaryBucketRegions()) {
+            try {
+              localPrimaryBucketRegion.releaseLockLocallyForClear(null);
+            } catch (Exception ex) {
+              logger.debug(
+                  "Unable to acquire clear lock for bucket region " + localPrimaryBucketRegion
+                      .getName(),
+                  ex.getMessage());
+              partitionedRegion.checkClosed();
+            }
+          }
+        }
+      } finally {
+        lockForListenerAndClientNotification.setUnLocked();
+      }
+    }
+  }
+
+  private List sendPartitionedRegionClearMessage(RegionEventImpl event,
+      PartitionedRegionClearMessage.OperationType op) {
+    RegionEventImpl eventForLocalClear = (RegionEventImpl) event.clone();
+    eventForLocalClear.setOperation(Operation.REGION_LOCAL_CLEAR);
+
+    do {
+      try {
+        return attemptToSendPartitionedRegionClearMessage(event, op);
+      } catch (ForceReattemptException reattemptException) {
+        // retry
+      }
+    } while (true);
+  }
+
+  private List attemptToSendPartitionedRegionClearMessage(RegionEventImpl event,
+      PartitionedRegionClearMessage.OperationType op)
+      throws ForceReattemptException {
+    List bucketsOperated = null;
+
+    if (partitionedRegion.getPRRoot() == null) {
+      if (logger.isDebugEnabled()) {
+        logger.debug(
+            "Partition region {} failed to initialize. Remove its profile from remote members.",
+            this.partitionedRegion);
+      }
+      new UpdateAttributesProcessor(partitionedRegion, true).distribute(false);
+      return bucketsOperated;
+    }
+
+    final HashSet configRecipients =
+        new HashSet(partitionedRegion.getRegionAdvisor().adviseAllPRNodes());
+
+    try {
+      final PartitionRegionConfig prConfig =
+          partitionedRegion.getPRRoot().get(partitionedRegion.getRegionIdentifier());
+
+      if (prConfig != null) {
+        Iterator itr = prConfig.getNodes().iterator();
+        while (itr.hasNext()) {
+          InternalDistributedMember idm = ((Node) itr.next()).getMemberId();
+          if (!idm.equals(partitionedRegion.getMyId())) {
+            configRecipients.add(idm);
+          }
+        }
+      }
+    } catch (CancelException ignore) {
+      // ignore
+    }
+
+    try {
+      PartitionedRegionClearMessage.PartitionedRegionClearResponse resp =
+          new PartitionedRegionClearMessage.PartitionedRegionClearResponse(
+              partitionedRegion.getSystem(),
+              configRecipients);
+      PartitionedRegionClearMessage partitionedRegionClearMessage =
+          new PartitionedRegionClearMessage(configRecipients, partitionedRegion, resp, op, event);
+      partitionedRegionClearMessage.send();
+
+      resp.waitForRepliesUninterruptibly();
+      bucketsOperated = resp.bucketsCleared;
+
+    } catch (ReplyException e) {
+      Throwable t = e.getCause();
+      if (t instanceof ForceReattemptException) {
+        throw (ForceReattemptException) t;
+      }
+      if (t instanceof PartitionedRegionPartialClearException) {
+        throw new PartitionedRegionPartialClearException(t.getMessage(), t);
+      }
+      logger.warn(
+          "PartitionedRegionClear#sendPartitionedRegionClearMessage: Caught exception during ClearRegionMessage send and waiting for response",
+          e);
+    }
+    return bucketsOperated;
+  }
+
+  void doClear(RegionEventImpl regionEvent, boolean cacheWrite,
+      PartitionedRegion partitionedRegion) {
+    String lockName = CLEAR_OPERATION + partitionedRegion.getDisplayName();
+
+    try {
+      // distributed lock to make sure only one clear op is in progress in the cluster.
+      acquireDistributedClearLock(lockName);
+
+      // Force all primary buckets to be created before clear.
+      PartitionRegionHelper.assignBucketsToPartitions(partitionedRegion);
+
+      // do cacheWrite
+      try {
+        partitionedRegion.cacheWriteBeforeRegionClear(regionEvent);
+      } catch (OperationAbortedException operationAbortedException) {
+        throw new CacheWriterException(operationAbortedException);
+      }
+
+      // Check if there are any listeners or clients interested. If so, then clear write
+      // locks needs to be taken on all local and remote primary buckets in order to
+      // preserve the ordering of client events (for concurrent operations on the region).
+      boolean acquireClearLockForClientNotification =
+          (partitionedRegion.hasAnyClientsInterested() && partitionedRegion.hasListener());
+      if (acquireClearLockForClientNotification) {
+        obtainLockForClear(regionEvent);
+      }
+      try {
+        List bucketsCleared = clearRegion(regionEvent, cacheWrite, null);
+
+        if (partitionedRegion.getTotalNumberOfBuckets() != bucketsCleared.size()) {
+          String message = "Unable to clear all the buckets from the partitioned region "
+              + partitionedRegion.getName()
+              + ", either data (buckets) moved or member departed.";
+
+          logger.warn(message + " expected to clear number of buckets: "
+              + partitionedRegion.getTotalNumberOfBuckets() +
+              " actual cleared: " + bucketsCleared.size());
+
+          throw new PartitionedRegionPartialClearException(message);
+        }
+      } finally {
+        if (acquireClearLockForClientNotification) {
+          releaseLockForClear(regionEvent);
+        }
+      }
+
+    } finally {
+      releaseDistributedClearLock(lockName);
+    }
+  }
+
+  void handleClearFromDepartedMember(InternalDistributedMember departedMember) {
+    if (departedMember.equals(lockForListenerAndClientNotification.getLockRequester())) {
+      synchronized (lockForListenerAndClientNotification) {
+        if (lockForListenerAndClientNotification.getLockRequester() != null) {
+          releaseClearLockLocal();
+        }
+      }
+    }
+  }
+
+  class LockForListenerAndClientNotification {
+
+    private boolean locked = false;
+
+    private InternalDistributedMember lockRequester;
+
+    synchronized void setLocked(InternalDistributedMember member) {
+      locked = true;
+      lockRequester = member;
+    }
+
+    synchronized void setUnLocked() {
+      locked = false;
+      lockRequester = null;
+    }
+
+    synchronized boolean isLocked() {
+      return locked;
+    }
+
+    synchronized InternalDistributedMember getLockRequester() {
+      return lockRequester;
+    }
+  }
+
+  protected class PartitionedRegionClearListener implements MembershipListener {
+
+    @Override
+    public synchronized void memberDeparted(DistributionManager distributionManager,
+        InternalDistributedMember id, boolean crashed) {
+      membershipChange = true;
+      handleClearFromDepartedMember(id);
+    }
+  }
+}
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClearMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClearMessage.java
new file mode 100755
index 0000000..b66ab44
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClearMessage.java
@@ -0,0 +1,287 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.geode.internal.cache;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+import org.apache.geode.DataSerializer;
+import org.apache.geode.cache.CacheException;
+import org.apache.geode.cache.Operation;
+import org.apache.geode.distributed.internal.ClusterDistributionManager;
+import org.apache.geode.distributed.internal.DistributionManager;
+import org.apache.geode.distributed.internal.DistributionMessage;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.ReplyException;
+import org.apache.geode.distributed.internal.ReplyMessage;
+import org.apache.geode.distributed.internal.ReplyProcessor21;
+import org.apache.geode.distributed.internal.ReplySender;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.Assert;
+import org.apache.geode.internal.NanoTimer;
+import org.apache.geode.internal.cache.partitioned.PartitionMessage;
+import org.apache.geode.internal.logging.log4j.LogMarker;
+import org.apache.geode.internal.serialization.DeserializationContext;
+import org.apache.geode.internal.serialization.SerializationContext;
+import org.apache.geode.logging.internal.log4j.api.LogService;
+
+public class PartitionedRegionClearMessage extends PartitionMessage {
+
+  public enum OperationType {
+    OP_LOCK_FOR_PR_CLEAR, OP_UNLOCK_FOR_PR_CLEAR, OP_PR_CLEAR,
+  }
+
+  private Object cbArg;
+
+  private OperationType op;
+
+  private EventID eventID;
+
+  private PartitionedRegion partitionedRegion;
+
+  private ArrayList bucketsCleared;
+
+  @Override
+  public EventID getEventID() {
+    return eventID;
+  }
+
+  public PartitionedRegionClearMessage() {}
+
+  PartitionedRegionClearMessage(Set recipients, PartitionedRegion region,
+      ReplyProcessor21 processor, PartitionedRegionClearMessage.OperationType operationType,
+      final RegionEventImpl event) {
+    super(recipients, region.getPRId(), processor);
+    partitionedRegion = region;
+    op = operationType;
+    cbArg = event.getRawCallbackArgument();
+    eventID = event.getEventId();
+  }
+
+  public OperationType getOp() {
+    return op;
+  }
+
+  public void send() {
+    Assert.assertTrue(getRecipients() != null, "ClearMessage NULL recipients set");
+    setTransactionDistributed(partitionedRegion.getCache().getTxManager().isDistributed());
+    partitionedRegion.getDistributionManager().putOutgoing(this);
+  }
+
+  @Override
+  protected Throwable processCheckForPR(PartitionedRegion pr,
+      DistributionManager distributionManager) {
+    if (pr != null && !pr.getDistributionAdvisor().isInitialized()) {
+      Throwable thr = new ForceReattemptException(
+          String.format("%s : could not find partitioned region with Id %s",
+              distributionManager.getDistributionManagerId(),
+              pr.getRegionIdentifier()));
+      return thr;
+    }
+    return null;
+  }
+
+  @Override
+  protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm,
+      PartitionedRegion partitionedRegion,
+      long startTime) throws CacheException {
+
+    if (partitionedRegion == null) {
+      return true;
+    }
+
+    if (partitionedRegion.isDestroyed()) {
+      return true;
+    }
+
+    if (op == OperationType.OP_LOCK_FOR_PR_CLEAR) {
+      partitionedRegion.getPartitionedRegionClear().obtainClearLockLocal(getSender());
+    } else if (op == OperationType.OP_UNLOCK_FOR_PR_CLEAR) {
+      partitionedRegion.getPartitionedRegionClear().releaseClearLockLocal();
+    } else {
+      RegionEventImpl event =
+          new RegionEventImpl(partitionedRegion, Operation.REGION_CLEAR, this.cbArg, true,
+              partitionedRegion.getMyId(),
+              getEventID());
+      bucketsCleared = partitionedRegion.getPartitionedRegionClear().clearRegionLocal(event);
+    }
+    return true;
+  }
+
+  @Override
+  protected void appendFields(StringBuilder buff) {
+    super.appendFields(buff);
+    buff.append(" cbArg=").append(this.cbArg).append(" op=").append(this.op);
+  }
+
+  @Override
+  public int getDSFID() {
+    return CLEAR_PARTITIONED_REGION_MESSAGE;
+  }
+
+  @Override
+  public void fromData(DataInput in,
+      DeserializationContext context) throws IOException, ClassNotFoundException {
+    super.fromData(in, context);
+    this.cbArg = DataSerializer.readObject(in);
+    op = PartitionedRegionClearMessage.OperationType.values()[in.readByte()];
+    eventID = DataSerializer.readObject(in);
+  }
+
+  @Override
+  public void toData(DataOutput out,
+      SerializationContext context) throws IOException {
+    super.toData(out, context);
+    DataSerializer.writeObject(this.cbArg, out);
+    out.writeByte(op.ordinal());
+    DataSerializer.writeObject(eventID, out);
+  }
+
+  /**
+   * The response on which to wait for all the replies. This response ignores any exceptions
+   * received from the "far side"
+   */
+  public static class PartitionedRegionClearResponse extends ReplyProcessor21 {
+    CopyOnWriteArrayList bucketsCleared = new CopyOnWriteArrayList();
+
+    public PartitionedRegionClearResponse(InternalDistributedSystem system, Set initMembers) {
+      super(system, initMembers);
+    }
+
+    @Override
+    public void process(DistributionMessage msg) {
+      if (msg instanceof PartitionedRegionClearReplyMessage) {
+        List buckets = ((PartitionedRegionClearReplyMessage) msg).bucketsCleared;
+        if (buckets != null) {
+          bucketsCleared.addAll(buckets);
+        }
+      }
+      super.process(msg, true);
+    }
+  }
+
+  @Override
+  protected void sendReply(InternalDistributedMember member, int processorId,
+      DistributionManager distributionManager, ReplyException ex,
+      PartitionedRegion partitionedRegion, long startTime) {
+    if (partitionedRegion != null) {
+      if (startTime > 0) {
+        partitionedRegion.getPrStats().endPartitionMessagesProcessing(startTime);
+      }
+    }
+    PartitionedRegionClearMessage.PartitionedRegionClearReplyMessage
+        .send(member, processorId, getReplySender(distributionManager), op, bucketsCleared,
+            ex);
+  }
+
+  public static class PartitionedRegionClearReplyMessage extends ReplyMessage {
+
+    private ArrayList bucketsCleared;
+
+    private OperationType op;
+
+    @Override
+    public boolean getInlineProcess() {
+      return true;
+    }
+
+    /**
+     * Empty constructor to conform to DataSerializable interface
+     */
+    public PartitionedRegionClearReplyMessage() {}
+
+    private PartitionedRegionClearReplyMessage(int processorId, OperationType op,
+        ArrayList bucketsCleared, ReplyException ex) {
+      super();
+      this.bucketsCleared = bucketsCleared;
+      this.op = op;
+      setProcessorId(processorId);
+      setException(ex);
+    }
+
+    /** Send an ack */
+    public static void send(InternalDistributedMember recipient, int processorId, ReplySender dm,
+        OperationType op, ArrayList bucketsCleared, ReplyException ex) {
+
+      Assert.assertTrue(recipient != null, "partitionedRegionClearReplyMessage NULL reply message");
+
+      PartitionedRegionClearMessage.PartitionedRegionClearReplyMessage m =
+          new PartitionedRegionClearMessage.PartitionedRegionClearReplyMessage(processorId, op,
+              bucketsCleared, ex);
+
+      m.setRecipient(recipient);
+      dm.putOutgoing(m);
+    }
+
+    /**
+     * Processes this message. This method is invoked by the receiver of the message.
+     *
+     * @param dm the distribution manager that is processing the message.
+     */
+    @Override
+    public void process(final DistributionManager dm, final ReplyProcessor21 rp) {
+      final long startTime = getTimestamp();
+
+      if (rp == null) {
+        if (LogService.getLogger().isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          LogService.getLogger().trace(LogMarker.DM_VERBOSE, "{}: processor not found", this);
+        }
+        return;
+      }
+
+      rp.process(this);
+
+      dm.getStats().incReplyMessageTime(NanoTimer.getTime() - startTime);
+    }
+
+    @Override
+    public int getDSFID() {
+      return CLEAR_PARTITIONED_REGION_REPLY_MESSAGE;
+    }
+
+    @Override
+    public void fromData(DataInput in,
+        DeserializationContext context) throws IOException, ClassNotFoundException {
+      super.fromData(in, context);
+      op = PartitionedRegionClearMessage.OperationType.values()[in.readByte()];
+      bucketsCleared = DataSerializer.readArrayList(in);
+    }
+
+    @Override
+    public void toData(DataOutput out,
+        SerializationContext context) throws IOException {
+      super.toData(out, context);
+      out.writeByte(op.ordinal());
+      DataSerializer.writeArrayList(bucketsCleared, out);
+    }
+
+    @Override
+    public String toString() {
+      StringBuffer sb = new StringBuffer();
+      sb.append("PartitionedRegionClearReplyMessage ")
+          .append("processorId=").append(this.processorId)
+          .append(" sender=").append(sender)
+          .append(" bucketsCleared ").append(this.bucketsCleared)
+          .append(" exception=").append(getException());
+      return sb.toString();
+    }
+  }
+}
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionDataStore.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionDataStore.java
index 23a7487..578ed3e 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionDataStore.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionDataStore.java
@@ -980,6 +980,14 @@ public class PartitionedRegionDataStore implements HasCachePerfStats {
     }
   }
 
+  protected void lockBucketCreationForRegionClear() {
+    bucketCreationLock.writeLock().lock();
+  }
+
+  protected void unlockBucketCreationForRegionClear() {
+    bucketCreationLock.writeLock().unlock();
+  }
+
   /**
    * Gets the total amount of memory in bytes allocated for all values for this PR in this VM. This
    * is the current memory (MB) watermark for data in this PR.
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RegionAdvisor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RegionAdvisor.java
index 5d2ff24..13ad666 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RegionAdvisor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RegionAdvisor.java
@@ -851,10 +851,21 @@ public class RegionAdvisor extends CacheDistributionAdvisor {
         && prof.filterProfile.hasInterest();
   };
 
+  @Immutable
+  private static final Filter prServerWithCqFilter = profile -> {
+    CacheProfile prof = (CacheProfile) profile;
+    return prof.isPartitioned && prof.hasCacheServer && prof.filterProfile != null
+        && prof.filterProfile.hasCQs();
+  };
+
   public boolean hasPRServerWithInterest() {
     return satisfiesFilter(prServerWithInterestFilter);
   }
 
+  public boolean hasPRServerWithCQs() {
+    return satisfiesFilter(prServerWithCqFilter);
+  }
+
   /**
    * return the set of all members who must receive operation notifications
    *
diff --git a/geode-core/src/main/resources/org/apache/geode/internal/sanctioned-geode-core-serializables.txt b/geode-core/src/main/resources/org/apache/geode/internal/sanctioned-geode-core-serializables.txt
index 652d1b2..644fbc2 100644
--- a/geode-core/src/main/resources/org/apache/geode/internal/sanctioned-geode-core-serializables.txt
+++ b/geode-core/src/main/resources/org/apache/geode/internal/sanctioned-geode-core-serializables.txt
@@ -79,6 +79,7 @@ org/apache/geode/cache/NoSubscriptionServersAvailableException,true,848408601915
 org/apache/geode/cache/Operation,true,-7521751729852504238,ordinal:byte
 org/apache/geode/cache/OperationAbortedException,true,-8293166225026556949
 org/apache/geode/cache/PartitionedRegionDistributionException,true,-3004093739855972548
+org/apache/geode/cache/PartitionedRegionPartialClearException,false
 org/apache/geode/cache/PartitionedRegionStorageException,true,5905463619475329732
 org/apache/geode/cache/RegionAccessException,true,3142958723089038406
 org/apache/geode/cache/RegionDestroyedException,true,319804842308010754,regionFullPath:java/lang/String
@@ -302,6 +303,7 @@ org/apache/geode/internal/cache/PRContainsValueFunction,false
 org/apache/geode/internal/cache/PRHARedundancyProvider$ArrayListWithClearState,true,1,wasCleared:boolean
 org/apache/geode/internal/cache/PartitionedRegion$PRIdMap,true,3667357372967498179,cleared:boolean
 org/apache/geode/internal/cache/PartitionedRegion$SizeEntry,false,isPrimary:boolean,size:int
+org/apache/geode/internal/cache/PartitionedRegionClearMessage$OperationType,false
 org/apache/geode/internal/cache/PartitionedRegionDataStore$CreateBucketResult,false,nowExists:boolean
 org/apache/geode/internal/cache/PartitionedRegionException,true,5113786059279106007
 org/apache/geode/internal/cache/PartitionedRegionQueryEvaluator$MemberResultsList,false,isLastChunkReceived:boolean
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/BucketRegionJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/BucketRegionJUnitTest.java
index c7cf5a6..d3397eb 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/BucketRegionJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/BucketRegionJUnitTest.java
@@ -51,7 +51,9 @@ public class BucketRegionJUnitTest extends DistributedRegionJUnitTest {
     when(ba.getPrimaryMoveReadLock()).thenReturn(primaryMoveReadLock);
     when(ba.getProxyBucketRegion()).thenReturn(mock(ProxyBucketRegion.class));
     when(ba.isPrimary()).thenReturn(true);
-
+    PartitionedRegionClear clearPR = mock(PartitionedRegionClear.class);
+    when(clearPR.isLockedForListenerAndClientNotification()).thenReturn(true);
+    when(pr.getPartitionedRegionClear()).thenReturn(clearPR);
     ira.setPartitionedRegion(pr).setPartitionedRegionBucketRedundancy(1).setBucketAdvisor(ba);
   }
 
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionTest.java
index 898c4f7..e02ba2c 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionTest.java
@@ -58,7 +58,6 @@ import org.mockito.junit.MockitoRule;
 import org.apache.geode.CancelCriterion;
 import org.apache.geode.Statistics;
 import org.apache.geode.cache.AttributesFactory;
-import org.apache.geode.cache.CacheClosedException;
 import org.apache.geode.cache.CacheLoader;
 import org.apache.geode.cache.CacheWriter;
 import org.apache.geode.cache.Operation;
@@ -221,22 +220,6 @@ public class PartitionedRegionTest {
     spyPartitionedRegion.clear();
   }
 
-  @Test(expected = CacheClosedException.class)
-  public void clearShouldThrowCacheClosedExceptionIfShutdownAll() {
-    PartitionedRegion spyPartitionedRegion = spy(partitionedRegion);
-    RegionEventImpl regionEvent =
-        new RegionEventImpl(spyPartitionedRegion, Operation.REGION_CLEAR, null, false,
-            spyPartitionedRegion.getMyId(), true);
-    when(cache.isCacheAtShutdownAll()).thenReturn(true);
-    when(cache.getCacheClosedException("Cache is shutting down"))
-        .thenReturn(new CacheClosedException("Cache is shutting down"));
-    DistributedLockService lockService = mock(DistributedLockService.class);
-    when(spyPartitionedRegion.getPartitionedRegionLockService()).thenReturn(lockService);
-    String lockName = "_clearOperation" + spyPartitionedRegion.getFullPath().replace('/', '_');
-    when(lockService.lock(lockName, -1, -1)).thenReturn(true);
-    spyPartitionedRegion.basicClear(regionEvent, true);
-  }
-
   @Test
   public void createClearPRMessagesShouldCreateMessagePerBucket() {
     PartitionedRegion spyPartitionedRegion = spy(partitionedRegion);
@@ -249,28 +232,6 @@ public class PartitionedRegionTest {
     assertThat(msgs.size()).isEqualTo(3);
   }
 
-  @Test
-  public void sendEachMessagePerBucket() {
-    PartitionedRegion spyPartitionedRegion = spy(partitionedRegion);
-    RegionEventImpl regionEvent =
-        new RegionEventImpl(spyPartitionedRegion, Operation.REGION_CLEAR, null, false,
-            spyPartitionedRegion.getMyId(), true);
-    when(cache.isCacheAtShutdownAll()).thenReturn(false);
-    DistributedLockService lockService = mock(DistributedLockService.class);
-    when(spyPartitionedRegion.getPartitionedRegionLockService()).thenReturn(lockService);
-    when(spyPartitionedRegion.getTotalNumberOfBuckets()).thenReturn(3);
-    String lockName = "_clearOperation" + spyPartitionedRegion.getFullPath().replace('/', '_');
-    when(lockService.lock(lockName, -1, -1)).thenReturn(true);
-    when(spyPartitionedRegion.hasListener()).thenReturn(true);
-    doNothing().when(spyPartitionedRegion).dispatchListenerEvent(any(), any());
-    doNothing().when(spyPartitionedRegion).notifyBridgeClients(eq(regionEvent));
-    doNothing().when(spyPartitionedRegion).checkReadiness();
-    doNothing().when(lockService).unlock(lockName);
-    spyPartitionedRegion.basicClear(regionEvent, true);
-    verify(spyPartitionedRegion, times(3)).sendClearMsgByBucket(any(), any());
-    verify(spyPartitionedRegion, times(1)).dispatchListenerEvent(any(), any());
-    verify(spyPartitionedRegion, times(1)).notifyBridgeClients(eq(regionEvent));
-  }
 
   @Test
   public void getBucketNodeForReadOrWriteReturnsPrimaryNodeForRegisterInterest() {
diff --git a/geode-serialization/src/main/java/org/apache/geode/internal/serialization/DataSerializableFixedID.java b/geode-serialization/src/main/java/org/apache/geode/internal/serialization/DataSerializableFixedID.java
index 3598b5d..481c78c 100644
--- a/geode-serialization/src/main/java/org/apache/geode/internal/serialization/DataSerializableFixedID.java
+++ b/geode-serialization/src/main/java/org/apache/geode/internal/serialization/DataSerializableFixedID.java
@@ -56,6 +56,8 @@ public interface DataSerializableFixedID extends SerializationVersions, BasicSer
 
   // NOTE, codes < -65536 will take 4 bytes to serialize
   // NOTE, codes < -128 will take 2 bytes to serialize
+  short CLEAR_PARTITIONED_REGION_REPLY_MESSAGE = -166;
+  short CLEAR_PARTITIONED_REGION_MESSAGE = -165;
 
   short PR_CLEAR_REPLY_MESSAGE = -164;
   short PR_CLEAR_MESSAGE = -163;


[geode] 05/22: GEODE-7983: Clear region writer callbacks should not be invoked for bucket regions (#4954)

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit c89af0006dbda1c8e7a28bce0650870d91848e7c
Author: Xiaojian Zhou <ge...@users.noreply.github.com>
AuthorDate: Tue Apr 14 10:50:21 2020 -0700

    GEODE-7983: Clear region writer callbacks should not be invoked for bucket regions (#4954)
---
 .../cache/PartitionedRegionClearDUnitTest.java     | 44 +++++++++++++++++++++-
 .../internal/cache/partitioned/ClearPRMessage.java |  2 +-
 2 files changed, 44 insertions(+), 2 deletions(-)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java
index a5a22b9..e2e04eb 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java
@@ -47,6 +47,7 @@ import org.apache.geode.test.dunit.rules.MemberVM;
 
 public class PartitionedRegionClearDUnitTest implements Serializable {
   protected static final String REGION_NAME = "testPR";
+  protected static final int TOTAL_BUCKET_NUM = 10;
   protected static final int NUM_ENTRIES = 1000;
 
   protected int locatorPort;
@@ -103,7 +104,8 @@ public class PartitionedRegionClearDUnitTest implements Serializable {
 
   private void initDataStore(boolean withWriter) {
     RegionFactory factory = getCache().createRegionFactory(getRegionShortCut())
-        .setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(10).create());
+        .setPartitionAttributes(
+            new PartitionAttributesFactory().setTotalNumBuckets(TOTAL_BUCKET_NUM).create());
     if (withWriter) {
       factory.setCacheWriter(new CountingCacheWriter());
     }
@@ -169,6 +171,26 @@ public class PartitionedRegionClearDUnitTest implements Serializable {
     return destroys;
   };
 
+  SerializableCallableIF<Integer> getBucketRegionWriterClears = () -> {
+    int clears = 0;
+    for (int i = 0; i < TOTAL_BUCKET_NUM; i++) {
+      String bucketRegionName = "_B__" + REGION_NAME + "_" + i;
+      clears += clearsByRegion.get(bucketRegionName) == null ? 0
+          : clearsByRegion.get(bucketRegionName).get();
+    }
+    return clears;
+  };
+
+  SerializableCallableIF<Integer> getBucketRegionWriterDestroys = () -> {
+    int destroys = 0;
+    for (int i = 0; i < TOTAL_BUCKET_NUM; i++) {
+      String bucketRegionName = "_B__" + REGION_NAME + "_" + i;
+      destroys += destroysByRegion.get(bucketRegionName) == null ? 0
+          : destroysByRegion.get(bucketRegionName).get();
+    }
+    return destroys;
+  };
+
   void configureServers(boolean dataStoreWithWriter, boolean accessorWithWriter) {
     dataStore1.invoke(() -> initDataStore(dataStoreWithWriter));
     dataStore2.invoke(() -> initDataStore(dataStoreWithWriter));
@@ -210,6 +232,10 @@ public class PartitionedRegionClearDUnitTest implements Serializable {
         .isEqualTo(1);
     assertThat(accessor.invoke(getWriterDestroys)).isEqualTo(accessor.invoke(getWriterClears))
         .isEqualTo(0);
+
+    assertThat(dataStore3.invoke(getBucketRegionWriterDestroys))
+        .isEqualTo(dataStore3.invoke(getBucketRegionWriterClears))
+        .isEqualTo(0);
   }
 
   @Test
@@ -237,6 +263,10 @@ public class PartitionedRegionClearDUnitTest implements Serializable {
         .isEqualTo(0);
     assertThat(accessor.invoke(getWriterDestroys)).isEqualTo(accessor.invoke(getWriterClears))
         .isEqualTo(1);
+
+    assertThat(accessor.invoke(getBucketRegionWriterDestroys))
+        .isEqualTo(accessor.invoke(getBucketRegionWriterClears))
+        .isEqualTo(0);
   }
 
   @Test
@@ -264,6 +294,10 @@ public class PartitionedRegionClearDUnitTest implements Serializable {
         .isEqualTo(0);
     assertThat(accessor.invoke(getWriterDestroys)).isEqualTo(accessor.invoke(getWriterClears))
         .isEqualTo(1);
+
+    assertThat(accessor.invoke(getBucketRegionWriterDestroys))
+        .isEqualTo(accessor.invoke(getBucketRegionWriterClears))
+        .isEqualTo(0);
   }
 
   @Test
@@ -291,6 +325,10 @@ public class PartitionedRegionClearDUnitTest implements Serializable {
         .isEqualTo(1);
     assertThat(accessor.invoke(getWriterDestroys)).isEqualTo(accessor.invoke(getWriterClears))
         .isEqualTo(0);
+
+    assertThat(dataStore3.invoke(getBucketRegionWriterDestroys))
+        .isEqualTo(dataStore3.invoke(getBucketRegionWriterClears))
+        .isEqualTo(0);
   }
 
   @Test
@@ -321,6 +359,10 @@ public class PartitionedRegionClearDUnitTest implements Serializable {
         .isEqualTo(1);
     assertThat(accessor.invoke(getWriterDestroys)).isEqualTo(accessor.invoke(getWriterClears))
         .isEqualTo(0);
+
+    assertThat(dataStore3.invoke(getBucketRegionWriterDestroys))
+        .isEqualTo(dataStore3.invoke(getBucketRegionWriterClears))
+        .isEqualTo(0);
   }
 
   public static HashMap<String, AtomicInteger> clearsByRegion = new HashMap<>();
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java
index cc01920..2603b78 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java
@@ -175,7 +175,7 @@ public class ClearPRMessage extends PartitionMessageWithDirectReply {
     try {
       RegionEventImpl regionEvent = new RegionEventImpl(bucketRegion, Operation.REGION_CLEAR, null,
           false, region.getMyId(), eventID);
-      bucketRegion.cmnClearRegion(regionEvent, true, true);
+      bucketRegion.cmnClearRegion(regionEvent, false, true);
     } catch (PartitionOfflineException poe) {
       logger.info(
           "All members holding data for bucket {} are offline, no more retries will be attempted",


[geode] 20/22: fix rebase compiling error

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 0589b0d6e9ea5e53c524eb2529f5e6e36cbf2591
Author: Jinmei Liao <ji...@pivotal.io>
AuthorDate: Fri Oct 2 16:31:28 2020 -0700

    fix rebase compiling error
---
 .../org/apache/geode/cache/PRCacheListenerDistributedTest.java    | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/PRCacheListenerDistributedTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/PRCacheListenerDistributedTest.java
index 7d95473..ac02b65 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/PRCacheListenerDistributedTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/PRCacheListenerDistributedTest.java
@@ -39,9 +39,9 @@ import org.junit.runners.Parameterized.UseParametersRunnerFactory;
 import org.apache.geode.cache.util.CacheListenerAdapter;
 import org.apache.geode.logging.internal.log4j.api.LogService;
 import org.apache.geode.test.dunit.rules.CacheRule;
+import org.apache.geode.test.dunit.rules.DistributedCounters;
+import org.apache.geode.test.dunit.rules.DistributedErrorCollector;
 import org.apache.geode.test.dunit.rules.DistributedRule;
-import org.apache.geode.test.dunit.rules.SharedCountersRule;
-import org.apache.geode.test.dunit.rules.SharedErrorCollector;
 import org.apache.geode.test.junit.rules.serializable.SerializableTestName;
 import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactory;
 
@@ -75,9 +75,9 @@ public class PRCacheListenerDistributedTest implements Serializable {
   @Rule
   public SerializableTestName testName = new SerializableTestName();
   @Rule
-  public SharedCountersRule sharedCountersRule = new SharedCountersRule();
+  public DistributedCounters sharedCountersRule = new DistributedCounters();
   @Rule
-  public SharedErrorCollector errorCollector = new SharedErrorCollector();
+  public DistributedErrorCollector errorCollector = new DistributedErrorCollector();
   protected String regionName;
 
   @Parameters


[geode] 10/22: GEODE-7667: Fixing test to include PR clear help text.

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit d765608c2b15284895d478e0759ad87df7bc61af
Author: Nabarun Nag <na...@cs.wisc.edu>
AuthorDate: Mon May 11 16:52:50 2020 -0700

    GEODE-7667: Fixing test to include PR clear help text.
---
 .../internal/cli/GfshParserAutoCompletionIntegrationTest.java           | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/geode-gfsh/src/integrationTest/java/org/apache/geode/management/internal/cli/GfshParserAutoCompletionIntegrationTest.java b/geode-gfsh/src/integrationTest/java/org/apache/geode/management/internal/cli/GfshParserAutoCompletionIntegrationTest.java
index 4c29427..3bc6a03 100644
--- a/geode-gfsh/src/integrationTest/java/org/apache/geode/management/internal/cli/GfshParserAutoCompletionIntegrationTest.java
+++ b/geode-gfsh/src/integrationTest/java/org/apache/geode/management/internal/cli/GfshParserAutoCompletionIntegrationTest.java
@@ -381,7 +381,7 @@ public class GfshParserAutoCompletionIntegrationTest {
     String hintArgument = "data";
     String hintsProvided = gfshParserRule.getCommandManager().obtainHint(hintArgument);
     String[] hintsProvidedArray = hintsProvided.split(lineSeparator());
-    assertThat(hintsProvidedArray.length).isEqualTo(17);
+    assertThat(hintsProvidedArray.length).isEqualTo(18);
     assertThat(hintsProvidedArray[0])
         .isEqualTo("User data as stored in regions of the Geode distributed system.");
   }


[geode] 12/22: GEODE-7669 Test coverage for Partitioned Region clear with Overflow enabled (#5189)

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit be03f1cd4ba71e8a58cd5297ff9193e3fdb12874
Author: Jianxia Chen <11...@users.noreply.github.com>
AuthorDate: Thu Jun 4 11:39:04 2020 -0700

    GEODE-7669 Test coverage for Partitioned Region clear with Overflow enabled (#5189)
    
    Authored-by: Jianxia Chen <jc...@apache.org>
---
 .../PartitionedRegionOverflowClearDUnitTest.java   | 380 +++++++++++++++++++++
 1 file changed, 380 insertions(+)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionOverflowClearDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionOverflowClearDUnitTest.java
new file mode 100644
index 0000000..c10d1db
--- /dev/null
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionOverflowClearDUnitTest.java
@@ -0,0 +1,380 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.distributed.ConfigurationProperties.ENABLE_CLUSTER_CONFIGURATION;
+import static org.apache.geode.distributed.ConfigurationProperties.HTTP_SERVICE_PORT;
+import static org.apache.geode.distributed.ConfigurationProperties.JMX_MANAGER;
+import static org.apache.geode.distributed.ConfigurationProperties.JMX_MANAGER_PORT;
+import static org.apache.geode.distributed.ConfigurationProperties.JMX_MANAGER_START;
+import static org.apache.geode.distributed.ConfigurationProperties.LOCATORS;
+import static org.apache.geode.distributed.ConfigurationProperties.LOG_FILE;
+import static org.apache.geode.distributed.ConfigurationProperties.MAX_WAIT_TIME_RECONNECT;
+import static org.apache.geode.distributed.ConfigurationProperties.MEMBER_TIMEOUT;
+import static org.apache.geode.distributed.ConfigurationProperties.USE_CLUSTER_CONFIGURATION;
+import static org.apache.geode.internal.AvailablePortHelper.getRandomAvailableTCPPorts;
+import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.apache.geode.test.dunit.VM.getVM;
+import static org.apache.geode.test.dunit.VM.getVMId;
+import static org.apache.geode.test.dunit.VM.toArray;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.File;
+import java.io.Serializable;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.IntStream;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.DiskStoreFactory;
+import org.apache.geode.cache.EvictionAction;
+import org.apache.geode.cache.EvictionAttributes;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.client.ClientCache;
+import org.apache.geode.cache.client.ClientCacheFactory;
+import org.apache.geode.cache.client.ClientRegionShortcut;
+import org.apache.geode.distributed.LocatorLauncher;
+import org.apache.geode.distributed.ServerLauncher;
+import org.apache.geode.distributed.internal.InternalLocator;
+import org.apache.geode.management.internal.cli.util.CommandStringBuilder;
+import org.apache.geode.test.dunit.AsyncInvocation;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.rules.DistributedRule;
+import org.apache.geode.test.junit.rules.GfshCommandRule;
+import org.apache.geode.test.junit.rules.serializable.SerializableTemporaryFolder;
+
+public class PartitionedRegionOverflowClearDUnitTest implements Serializable {
+
+  @Rule
+  public DistributedRule distributedRule = new DistributedRule(5);
+
+  @Rule
+  public SerializableTemporaryFolder temporaryFolder = new SerializableTemporaryFolder();
+
+  @Rule
+  public transient GfshCommandRule gfsh = new GfshCommandRule();
+
+  private VM locator;
+  private VM server1;
+  private VM server2;
+  private VM accessor;
+  private VM client;
+
+  private static final String LOCATOR_NAME = "locator";
+  private static final String SERVER1_NAME = "server1";
+  private static final String SERVER2_NAME = "server2";
+  private static final String SERVER3_NAME = "server3";
+
+  private File locatorDir;
+  private File server1Dir;
+  private File server2Dir;
+  private File server3Dir;
+
+  private String locatorString;
+
+  private int locatorPort;
+  private int locatorJmxPort;
+  private int locatorHttpPort;
+  private int serverPort1;
+  private int serverPort2;
+  private int serverPort3;
+
+  private static final AtomicReference<LocatorLauncher> LOCATOR_LAUNCHER = new AtomicReference<>();
+
+  private static final AtomicReference<ServerLauncher> SERVER_LAUNCHER = new AtomicReference<>();
+
+  private static final AtomicReference<ClientCache> CLIENT_CACHE = new AtomicReference<>();
+
+  private static final String OVERFLOW_REGION_NAME = "testOverflowRegion";
+
+  public static final int NUM_ENTRIES = 1000;
+
+  @Before
+  public void setup() throws Exception {
+    locator = getVM(0);
+    server1 = getVM(1);
+    server2 = getVM(2);
+    accessor = getVM(3);
+    client = getVM(4);
+
+    locatorDir = temporaryFolder.newFolder(LOCATOR_NAME);
+    server1Dir = temporaryFolder.newFolder(SERVER1_NAME);
+    server2Dir = temporaryFolder.newFolder(SERVER2_NAME);
+    server3Dir = temporaryFolder.newFolder(SERVER3_NAME);
+
+    int[] ports = getRandomAvailableTCPPorts(6);
+    locatorPort = ports[0];
+    locatorJmxPort = ports[1];
+    locatorHttpPort = ports[2];
+    serverPort1 = ports[3];
+    serverPort2 = ports[4];
+    serverPort3 = ports[5];
+
+    locator.invoke(
+        () -> startLocator(locatorDir, locatorPort, locatorJmxPort, locatorHttpPort));
+    gfsh.connectAndVerify(locatorJmxPort, GfshCommandRule.PortType.jmxManager);
+
+    locatorString = "localhost[" + locatorPort + "]";
+    server1.invoke(() -> startServer(SERVER1_NAME, server1Dir, locatorString, serverPort1));
+    server2.invoke(() -> startServer(SERVER2_NAME, server2Dir, locatorString, serverPort2));
+  }
+
+  @After
+  public void tearDown() {
+    destroyRegion();
+    destroyDiskStore(DiskStoreFactory.DEFAULT_DISK_STORE_NAME);
+
+    for (VM vm : new VM[] {client, accessor, server1, server2, locator}) {
+      vm.invoke(() -> {
+        if (CLIENT_CACHE.get() != null) {
+          CLIENT_CACHE.get().close();
+        }
+        if (LOCATOR_LAUNCHER.get() != null) {
+          LOCATOR_LAUNCHER.get().stop();
+        }
+        if (SERVER_LAUNCHER.get() != null) {
+          SERVER_LAUNCHER.get().stop();
+        }
+
+        CLIENT_CACHE.set(null);
+        LOCATOR_LAUNCHER.set(null);
+        SERVER_LAUNCHER.set(null);
+      });
+    }
+  }
+
+  @Test
+  public void testGfshClearRegionWithOverflow() throws InterruptedException {
+    createPartitionRedundantPersistentOverflowRegion();
+
+    populateRegion();
+    assertRegionSize(NUM_ENTRIES);
+
+    gfsh.executeAndAssertThat("clear region --name=" + OVERFLOW_REGION_NAME).statusIsSuccess();
+    assertRegionSize(0);
+
+    restartServers();
+
+    assertRegionSize(0);
+  }
+
+  @Test
+  public void testClientRegionClearWithOverflow() throws InterruptedException {
+    createPartitionRedundantPersistentOverflowRegion();
+
+    populateRegion();
+    assertRegionSize(NUM_ENTRIES);
+
+    client.invoke(() -> {
+      if (CLIENT_CACHE.get() == null) {
+        ClientCache clientCache =
+            new ClientCacheFactory().addPoolLocator("localhost", locatorPort).create();
+        CLIENT_CACHE.set(clientCache);
+      }
+
+      CLIENT_CACHE.get().getRegion(OVERFLOW_REGION_NAME).clear();
+    });
+    assertRegionSize(0);
+
+    restartServers();
+
+    assertRegionSize(0);
+  }
+
+  @Test
+  public void testAccessorRegionClearWithOverflow() throws InterruptedException {
+
+    for (VM vm : toArray(server1, server2)) {
+      vm.invoke(this::createRegionWithDefaultDiskStore);
+    }
+
+    accessor.invoke(() -> {
+      startServer(SERVER3_NAME, server3Dir, locatorString, serverPort3);
+      SERVER_LAUNCHER.get().getCache()
+          .createRegionFactory(RegionShortcut.PARTITION_REDUNDANT_OVERFLOW)
+          .setPartitionAttributes(
+              new PartitionAttributesFactory().setLocalMaxMemory(0).create())
+          .create(OVERFLOW_REGION_NAME);
+    });
+
+    populateRegion();
+    assertRegionSize(NUM_ENTRIES);
+
+    accessor.invoke(() -> {
+      assertThat(SERVER_LAUNCHER.get().getCache().getRegion(OVERFLOW_REGION_NAME).size())
+          .isEqualTo(NUM_ENTRIES);
+      SERVER_LAUNCHER.get().getCache().getRegion(OVERFLOW_REGION_NAME).clear();
+      assertThat(SERVER_LAUNCHER.get().getCache().getRegion(OVERFLOW_REGION_NAME).size())
+          .isEqualTo(0);
+    });
+    assertRegionSize(0);
+
+    for (VM vm : toArray(server1, server2)) {
+      vm.invoke(PartitionedRegionOverflowClearDUnitTest::stopServer);
+    }
+
+    gfsh.executeAndAssertThat("list members").statusIsSuccess();
+    assertThat(gfsh.getGfshOutput()).contains("locator");
+    AsyncInvocation asyncInvocation1 = server1.invokeAsync(() -> {
+      startServer(SERVER1_NAME, server1Dir, locatorString, serverPort1);
+      createRegionWithDefaultDiskStore();
+    });
+    AsyncInvocation asyncInvocation2 = server2.invokeAsync(() -> {
+      startServer(SERVER2_NAME, server2Dir, locatorString, serverPort2);
+      createRegionWithDefaultDiskStore();
+    });
+    asyncInvocation1.get();
+    asyncInvocation2.get();
+    assertRegionSize(0);
+  }
+
+  private void restartServers() throws InterruptedException {
+    for (VM vm : toArray(server1, server2)) {
+      vm.invoke(PartitionedRegionOverflowClearDUnitTest::stopServer);
+    }
+
+    gfsh.executeAndAssertThat("list members").statusIsSuccess();
+    assertThat(gfsh.getGfshOutput()).contains("locator");
+    AsyncInvocation asyncInvocation1 =
+        server1
+            .invokeAsync(() -> startServer(SERVER1_NAME, server1Dir, locatorString, serverPort1));
+    AsyncInvocation asyncInvocation2 =
+        server2
+            .invokeAsync(() -> startServer(SERVER2_NAME, server2Dir, locatorString, serverPort2));
+    asyncInvocation1.get();
+    asyncInvocation2.get();
+  }
+
+  private void createPartitionRedundantPersistentOverflowRegion() {
+    String command = new CommandStringBuilder("create region")
+        .addOption("name", OVERFLOW_REGION_NAME)
+        .addOption("type", "PARTITION_REDUNDANT_PERSISTENT_OVERFLOW")
+        .addOption("redundant-copies", "1")
+        .addOption("eviction-entry-count", "1")
+        .addOption("eviction-action", "overflow-to-disk")
+        .getCommandString();
+    gfsh.executeAndAssertThat(command).statusIsSuccess();
+  }
+
+  private void destroyRegion() {
+    server1.invoke(() -> {
+      assertThat(SERVER_LAUNCHER.get().getCache().getRegion(OVERFLOW_REGION_NAME)).isNotNull();
+      SERVER_LAUNCHER.get().getCache().getRegion(OVERFLOW_REGION_NAME).destroyRegion();
+
+    });
+  }
+
+  private void destroyDiskStore(String diskStoreName) {
+    String command = new CommandStringBuilder("destroy disk-store")
+        .addOption("name", diskStoreName)
+        .getCommandString();
+    gfsh.executeAndAssertThat(command).statusIsSuccess();
+  }
+
+  private void createRegionWithDefaultDiskStore() {
+    SERVER_LAUNCHER.get().getCache().createDiskStoreFactory()
+        .create(DiskStoreFactory.DEFAULT_DISK_STORE_NAME);
+    SERVER_LAUNCHER.get().getCache()
+        .createRegionFactory(RegionShortcut.PARTITION_REDUNDANT_PERSISTENT_OVERFLOW)
+        .setPartitionAttributes(
+            new PartitionAttributesFactory().setRedundantCopies(1).create())
+        .setDiskStoreName(DiskStoreFactory.DEFAULT_DISK_STORE_NAME)
+        .setEvictionAttributes(
+            EvictionAttributes.createLRUEntryAttributes(1, EvictionAction.OVERFLOW_TO_DISK))
+        .create(OVERFLOW_REGION_NAME);
+  }
+
+  private void populateRegion() {
+    client.invoke(() -> {
+      if (CLIENT_CACHE.get() == null) {
+        ClientCache clientCache =
+            new ClientCacheFactory().addPoolLocator("localhost", locatorPort).create();
+        CLIENT_CACHE.set(clientCache);
+      }
+
+      Region<Object, Object> clientRegion = CLIENT_CACHE.get()
+          .createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
+          .create(OVERFLOW_REGION_NAME);
+
+      IntStream.range(0, NUM_ENTRIES).forEach(i -> clientRegion.put("key-" + i, "value-" + i));
+    });
+  }
+
+  private void assertRegionSize(int size) {
+    server1.invoke(() -> {
+      assertThat(SERVER_LAUNCHER.get().getCache().getRegion(OVERFLOW_REGION_NAME)).isNotNull();
+      assertThat(SERVER_LAUNCHER.get().getCache().getRegion(OVERFLOW_REGION_NAME).size())
+          .isEqualTo(size);
+    });
+    server2.invoke(() -> {
+      assertThat(SERVER_LAUNCHER.get().getCache().getRegion(OVERFLOW_REGION_NAME)).isNotNull();
+      assertThat(SERVER_LAUNCHER.get().getCache().getRegion(OVERFLOW_REGION_NAME).size())
+          .isEqualTo(size);
+    });
+  }
+
+  private void startLocator(File directory, int port, int jmxPort, int httpPort) {
+    LOCATOR_LAUNCHER.set(new LocatorLauncher.Builder()
+        .setMemberName(LOCATOR_NAME)
+        .setPort(port)
+        .setWorkingDirectory(directory.getAbsolutePath())
+        .set(HTTP_SERVICE_PORT, httpPort + "")
+        .set(JMX_MANAGER, "true")
+        .set(JMX_MANAGER_PORT, String.valueOf(jmxPort))
+        .set(JMX_MANAGER_START, "true")
+        .set(LOG_FILE, new File(directory, LOCATOR_NAME + ".log").getAbsolutePath())
+        .set(MAX_WAIT_TIME_RECONNECT, "1000")
+        .set(MEMBER_TIMEOUT, "2000")
+        .set(ENABLE_CLUSTER_CONFIGURATION, "true")
+        .set(USE_CLUSTER_CONFIGURATION, "true")
+        .build());
+
+    LOCATOR_LAUNCHER.get().start();
+
+    await().untilAsserted(() -> {
+      InternalLocator locator = (InternalLocator) LOCATOR_LAUNCHER.get().getLocator();
+      assertThat(locator.isSharedConfigurationRunning())
+          .as("Locator shared configuration is running on locator" + getVMId())
+          .isTrue();
+    });
+  }
+
+  private void startServer(String name, File workingDirectory, String locator, int serverPort) {
+    SERVER_LAUNCHER.set(new ServerLauncher.Builder()
+        .setDeletePidFileOnStop(true)
+        .setMemberName(name)
+        .setWorkingDirectory(workingDirectory.getAbsolutePath())
+        .setServerPort(serverPort)
+        .set(HTTP_SERVICE_PORT, "0")
+        .set(LOCATORS, locator)
+        .set(LOG_FILE, new File(workingDirectory, name + ".log").getAbsolutePath())
+        .set(MAX_WAIT_TIME_RECONNECT, "1000")
+        .set(MEMBER_TIMEOUT, "2000")
+        .set(ENABLE_CLUSTER_CONFIGURATION, "true")
+        .set(USE_CLUSTER_CONFIGURATION, "true")
+        .build());
+
+    SERVER_LAUNCHER.get().start();
+  }
+
+  private static void stopServer() {
+    SERVER_LAUNCHER.get().stop();
+  }
+}


[geode] 21/22: GEODE-7845 blocking PR region clear if one or more server versions are too old (#5577)

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 68d7c308d9ae070351a0e3045b04bcaf9636537d
Author: mhansonp <ha...@vmware.com>
AuthorDate: Tue Oct 13 10:10:25 2020 -0700

    GEODE-7845 blocking PR region clear if one or more server versions are too old (#5577)
    
    - if a server is running an old version when a PR clear is invoked
    by the client, the client will receive a ServerOperationException
    with a cause of ServerVersionMismatchException.
---
 .../integrationTest/resources/assembly_content.txt |   1 +
 .../main/java/org/apache/geode/cache/Region.java   |   2 +
 .../cache/ServerVersionMismatchException.java      |  34 ++
 .../geode/internal/cache/PartitionedRegion.java    |   1 +
 .../internal/cache/PartitionedRegionClear.java     |  34 +-
 .../sanctioned-geode-core-serializables.txt        |   1 +
 .../internal/cache/PartitionedRegionClearTest.java | 109 ++++--
 .../RollingUpgrade2DUnitTestBase.java              |   4 +-
 ...ionRegionClearMixedServerPartitionedRegion.java | 412 +++++++++++++++++++++
 9 files changed, 571 insertions(+), 27 deletions(-)

diff --git a/geode-assembly/src/integrationTest/resources/assembly_content.txt b/geode-assembly/src/integrationTest/resources/assembly_content.txt
index 549150f..553785a 100644
--- a/geode-assembly/src/integrationTest/resources/assembly_content.txt
+++ b/geode-assembly/src/integrationTest/resources/assembly_content.txt
@@ -245,6 +245,7 @@ javadoc/org/apache/geode/cache/RoleEvent.html
 javadoc/org/apache/geode/cache/RoleException.html
 javadoc/org/apache/geode/cache/Scope.html
 javadoc/org/apache/geode/cache/SerializedCacheValue.html
+javadoc/org/apache/geode/cache/ServerVersionMismatchException.html
 javadoc/org/apache/geode/cache/StatisticsDisabledException.html
 javadoc/org/apache/geode/cache/SubscriptionAttributes.html
 javadoc/org/apache/geode/cache/SynchronizationCommitConflictException.html
diff --git a/geode-core/src/main/java/org/apache/geode/cache/Region.java b/geode-core/src/main/java/org/apache/geode/cache/Region.java
index 4707a46..5162bd5 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/Region.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/Region.java
@@ -1307,6 +1307,8 @@ public interface Region<K, V> extends ConcurrentMap<K, V> {
    * @throws PartitionedRegionPartialClearException when data is partially cleared on partitioned
    *         region. It is caller responsibility to handle the partial data clear either by retrying
    *         the clear operation or continue working with the partially cleared partitioned region.
+   * @throws ServerVersionMismatchException when data was not cleared because one or more
+   *         of the member servers' version was too old to understand the clear message.
    */
   @Override
   void clear();
diff --git a/geode-core/src/main/java/org/apache/geode/cache/ServerVersionMismatchException.java b/geode-core/src/main/java/org/apache/geode/cache/ServerVersionMismatchException.java
new file mode 100644
index 0000000..1d4231a
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/cache/ServerVersionMismatchException.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.cache;
+
+import java.util.List;
+
+/**
+ * Indicates a failure to perform an operation on a Partitioned Region due to
+ * server versions not meeting requirements.
+ *
+ * @since GEODE 1.14.0
+ */
+public class ServerVersionMismatchException extends CacheRuntimeException {
+  private static final long serialVersionUID = -3004093739855972548L;
+
+  public ServerVersionMismatchException(List<String> members, String featureName,
+      String version) {
+    super(
+        "A server's " + members + " version was too old (< " + version + ") for : " + featureName);
+
+  }
+}
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
index 6bfd0cf..8bbeb1c 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
@@ -5328,6 +5328,7 @@ public class PartitionedRegion extends LocalRegion
     return this.totalNumberOfBuckets;
   }
 
+
   @Override
   public void basicDestroy(final EntryEventImpl event, final boolean cacheWrite,
       final Object expectedOldValue)
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
index e8b01d8..0e5acfc 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
@@ -14,8 +14,10 @@
  */
 package org.apache.geode.internal.cache;
 
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Set;
 
 import org.apache.logging.log4j.Logger;
@@ -25,11 +27,13 @@ import org.apache.geode.cache.CacheWriterException;
 import org.apache.geode.cache.Operation;
 import org.apache.geode.cache.OperationAbortedException;
 import org.apache.geode.cache.PartitionedRegionPartialClearException;
+import org.apache.geode.cache.ServerVersionMismatchException;
 import org.apache.geode.cache.partition.PartitionRegionHelper;
 import org.apache.geode.distributed.internal.DistributionManager;
 import org.apache.geode.distributed.internal.MembershipListener;
 import org.apache.geode.distributed.internal.ReplyException;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.serialization.KnownVersion;
 import org.apache.geode.logging.internal.log4j.api.LogService;
 
 public class PartitionedRegionClear {
@@ -289,7 +293,8 @@ public class PartitionedRegionClear {
     }
 
     final Set<InternalDistributedMember> configRecipients =
-        new HashSet<>(partitionedRegion.getRegionAdvisor().adviseAllPRNodes());
+        new HashSet<>(partitionedRegion.getRegionAdvisor()
+            .adviseAllPRNodes());
 
     try {
       final PartitionRegionConfig prConfig =
@@ -310,8 +315,7 @@ public class PartitionedRegionClear {
     try {
       PartitionedRegionClearMessage.PartitionedRegionClearResponse resp =
           new PartitionedRegionClearMessage.PartitionedRegionClearResponse(
-              partitionedRegion.getSystem(),
-              configRecipients);
+              partitionedRegion.getSystem(), configRecipients);
       PartitionedRegionClearMessage partitionedRegionClearMessage =
           new PartitionedRegionClearMessage(configRecipients, partitionedRegion, resp, op, event);
       partitionedRegionClearMessage.send();
@@ -334,10 +338,34 @@ public class PartitionedRegionClear {
     return bucketsOperated;
   }
 
+  /**
+   * This method returns a boolean to indicate if all server versions support Partition Region clear
+   */
+  public void allServerVersionsSupportPartitionRegionClear() {
+    List<String> memberNames = new ArrayList<>();
+    for (int i = 0; i < partitionedRegion.getTotalNumberOfBuckets(); i++) {
+      InternalDistributedMember internalDistributedMember = partitionedRegion.getBucketPrimary(i);
+      if ((internalDistributedMember != null)
+          && (internalDistributedMember.getVersion().isOlderThan(KnownVersion.GEODE_1_14_0))) {
+        if (!memberNames.contains(internalDistributedMember.getName())) {
+          memberNames.add(internalDistributedMember.getName());
+          logger.info("MLH adding " + internalDistributedMember.getName());
+        }
+      }
+    }
+    if (!memberNames.isEmpty()) {
+      throw new ServerVersionMismatchException(memberNames, "Partitioned Region Clear",
+          KnownVersion.GEODE_1_14_0.toString());
+    }
+  }
+
+
   void doClear(RegionEventImpl regionEvent, boolean cacheWrite) {
     String lockName = CLEAR_OPERATION + partitionedRegion.getName();
     long clearStartTime = 0;
 
+    allServerVersionsSupportPartitionRegionClear();
+
     try {
       // distributed lock to make sure only one clear op is in progress in the cluster.
       acquireDistributedClearLock(lockName);
diff --git a/geode-core/src/main/resources/org/apache/geode/internal/sanctioned-geode-core-serializables.txt b/geode-core/src/main/resources/org/apache/geode/internal/sanctioned-geode-core-serializables.txt
index 644fbc2..86e2372 100644
--- a/geode-core/src/main/resources/org/apache/geode/internal/sanctioned-geode-core-serializables.txt
+++ b/geode-core/src/main/resources/org/apache/geode/internal/sanctioned-geode-core-serializables.txt
@@ -92,6 +92,7 @@ org/apache/geode/cache/ResourceException,true,-5559328592343363268
 org/apache/geode/cache/ResumptionAction,true,6632254151314915610,ordinal:byte
 org/apache/geode/cache/RoleException,true,-7521056108445887394
 org/apache/geode/cache/Scope,true,5534399159504301602,ordinal:int
+org/apache/geode/cache/ServerVersionMismatchException,true,-3004093739855972548
 org/apache/geode/cache/StatisticsDisabledException,true,-2987721454129719551
 org/apache/geode/cache/SynchronizationCommitConflictException,true,2619806460255259492
 org/apache/geode/cache/TimeoutException,true,-6260761691185737442
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java
index bd37d9e..bd78fd0 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java
@@ -37,6 +37,7 @@ import org.mockito.ArgumentCaptor;
 import org.apache.geode.CancelCriterion;
 import org.apache.geode.cache.PartitionedRegionPartialClearException;
 import org.apache.geode.cache.Region;
+import org.apache.geode.cache.ServerVersionMismatchException;
 import org.apache.geode.distributed.DistributedLockService;
 import org.apache.geode.distributed.internal.DMStats;
 import org.apache.geode.distributed.internal.DistributionManager;
@@ -44,6 +45,7 @@ import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.distributed.internal.MembershipListener;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
 import org.apache.geode.internal.cache.partitioned.RegionAdvisor;
+import org.apache.geode.internal.serialization.KnownVersion;
 
 public class PartitionedRegionClearTest {
 
@@ -51,6 +53,8 @@ public class PartitionedRegionClearTest {
   private PartitionedRegionClear partitionedRegionClear;
   private DistributionManager distributionManager;
   private PartitionedRegion partitionedRegion;
+  private RegionAdvisor regionAdvisor;
+  private InternalDistributedMember internalDistributedMember;
 
   @Before
   public void setUp() {
@@ -62,6 +66,14 @@ public class PartitionedRegionClearTest {
     when(partitionedRegion.getName()).thenReturn("prRegion");
 
     partitionedRegionClear = new PartitionedRegionClear(partitionedRegion);
+    internalDistributedMember = mock(InternalDistributedMember.class);
+    when(internalDistributedMember.getVersion()).thenReturn(KnownVersion.CURRENT);
+    regionAdvisor = mock(RegionAdvisor.class);
+    when(partitionedRegion.getRegionAdvisor()).thenReturn(regionAdvisor);
+    when(regionAdvisor.getDistributionManager()).thenReturn(distributionManager);
+    when(distributionManager.getDistributionManagerId()).thenReturn(internalDistributedMember);
+    when(distributionManager.getId()).thenReturn(internalDistributedMember);
+
   }
 
   private Set<BucketRegion> setupBucketRegions(
@@ -85,7 +97,6 @@ public class PartitionedRegionClearTest {
 
   @Test
   public void isLockedForListenerAndClientNotificationReturnsTrueWhenLocked() {
-    InternalDistributedMember internalDistributedMember = mock(InternalDistributedMember.class);
     when(distributionManager.isCurrentMember(internalDistributedMember)).thenReturn(true);
     partitionedRegionClear.obtainClearLockLocal(internalDistributedMember);
 
@@ -94,7 +105,6 @@ public class PartitionedRegionClearTest {
 
   @Test
   public void isLockedForListenerAndClientNotificationReturnsFalseWhenMemberNotInTheSystemRequestsLock() {
-    InternalDistributedMember internalDistributedMember = mock(InternalDistributedMember.class);
     when(distributionManager.isCurrentMember(internalDistributedMember)).thenReturn(false);
 
     assertThat(partitionedRegionClear.isLockedForListenerAndClientNotification()).isFalse();
@@ -132,8 +142,6 @@ public class PartitionedRegionClearTest {
     doReturn(Collections.EMPTY_SET).when(spyPartitionedRegionClear)
         .attemptToSendPartitionedRegionClearMessage(regionEvent,
             PartitionedRegionClearMessage.OperationType.OP_LOCK_FOR_PR_CLEAR);
-    InternalDistributedMember internalDistributedMember = mock(InternalDistributedMember.class);
-    when(distributionManager.getId()).thenReturn(internalDistributedMember);
 
     spyPartitionedRegionClear.obtainLockForClear(regionEvent);
 
@@ -152,8 +160,6 @@ public class PartitionedRegionClearTest {
     doReturn(Collections.EMPTY_SET).when(spyPartitionedRegionClear)
         .attemptToSendPartitionedRegionClearMessage(regionEvent,
             PartitionedRegionClearMessage.OperationType.OP_UNLOCK_FOR_PR_CLEAR);
-    InternalDistributedMember internalDistributedMember = mock(InternalDistributedMember.class);
-    when(distributionManager.getId()).thenReturn(internalDistributedMember);
 
     spyPartitionedRegionClear.releaseLockForClear(regionEvent);
 
@@ -172,8 +178,6 @@ public class PartitionedRegionClearTest {
     doReturn(Collections.EMPTY_SET).when(spyPartitionedRegionClear)
         .attemptToSendPartitionedRegionClearMessage(regionEvent,
             PartitionedRegionClearMessage.OperationType.OP_PR_CLEAR);
-    InternalDistributedMember internalDistributedMember = mock(InternalDistributedMember.class);
-    when(distributionManager.getId()).thenReturn(internalDistributedMember);
 
     spyPartitionedRegionClear.clearRegion(regionEvent);
 
@@ -330,13 +334,12 @@ public class PartitionedRegionClearTest {
     PartitionedRegionDataStore partitionedRegionDataStore = mock(PartitionedRegionDataStore.class);
     Set<BucketRegion> buckets = setupBucketRegions(partitionedRegionDataStore, bucketAdvisor);
     when(partitionedRegion.getDataStore()).thenReturn(partitionedRegionDataStore);
-    InternalDistributedMember member = mock(InternalDistributedMember.class);
-    when(distributionManager.isCurrentMember(member)).thenReturn(true);
+    when(distributionManager.isCurrentMember(internalDistributedMember)).thenReturn(true);
 
-    partitionedRegionClear.obtainClearLockLocal(member);
+    partitionedRegionClear.obtainClearLockLocal(internalDistributedMember);
 
     assertThat(partitionedRegionClear.lockForListenerAndClientNotification.getLockRequester())
-        .isSameAs(member);
+        .isSameAs(internalDistributedMember);
     for (BucketRegion bucketRegion : buckets) {
       verify(bucketRegion, times(1)).lockLocallyForClear(partitionedRegion.getDistributionManager(),
           partitionedRegion.getMyId(), null);
@@ -350,10 +353,9 @@ public class PartitionedRegionClearTest {
     PartitionedRegionDataStore partitionedRegionDataStore = mock(PartitionedRegionDataStore.class);
     Set<BucketRegion> buckets = setupBucketRegions(partitionedRegionDataStore, bucketAdvisor);
     when(partitionedRegion.getDataStore()).thenReturn(partitionedRegionDataStore);
-    InternalDistributedMember member = mock(InternalDistributedMember.class);
-    when(distributionManager.isCurrentMember(member)).thenReturn(false);
+    when(distributionManager.isCurrentMember(internalDistributedMember)).thenReturn(false);
 
-    partitionedRegionClear.obtainClearLockLocal(member);
+    partitionedRegionClear.obtainClearLockLocal(internalDistributedMember);
 
     assertThat(partitionedRegionClear.lockForListenerAndClientNotification.getLockRequester())
         .isNull();
@@ -370,9 +372,9 @@ public class PartitionedRegionClearTest {
     PartitionedRegionDataStore partitionedRegionDataStore = mock(PartitionedRegionDataStore.class);
     Set<BucketRegion> buckets = setupBucketRegions(partitionedRegionDataStore, bucketAdvisor);
     when(partitionedRegion.getDataStore()).thenReturn(partitionedRegionDataStore);
-    InternalDistributedMember member = mock(InternalDistributedMember.class);
-    when(distributionManager.isCurrentMember(member)).thenReturn(true);
-    partitionedRegionClear.lockForListenerAndClientNotification.setLocked(member);
+    when(distributionManager.isCurrentMember(internalDistributedMember)).thenReturn(true);
+    partitionedRegionClear.lockForListenerAndClientNotification
+        .setLocked(internalDistributedMember);
 
     partitionedRegionClear.releaseClearLockLocal();
 
@@ -405,13 +407,11 @@ public class PartitionedRegionClearTest {
     Region<String, PartitionRegionConfig> prRoot = mock(Region.class);
     when(partitionedRegion.getPRRoot()).thenReturn(prRoot);
     InternalDistributedMember member = mock(InternalDistributedMember.class);
-    RegionAdvisor regionAdvisor = mock(RegionAdvisor.class);
     Set<InternalDistributedMember> prNodes = Collections.singleton(member);
     Node node = mock(Node.class);
     when(node.getMemberId()).thenReturn(member);
     Set<Node> configNodes = Collections.singleton(node);
     when(regionAdvisor.adviseAllPRNodes()).thenReturn(prNodes);
-    when(partitionedRegion.getRegionAdvisor()).thenReturn(regionAdvisor);
     PartitionRegionConfig partitionRegionConfig = mock(PartitionRegionConfig.class);
     when(partitionRegionConfig.getNodes()).thenReturn(configNodes);
     when(prRoot.get(any())).thenReturn(partitionRegionConfig);
@@ -423,7 +423,7 @@ public class PartitionedRegionClearTest {
     when(txManager.isDistributed()).thenReturn(false);
     when(internalCache.getTxManager()).thenReturn(txManager);
     when(partitionedRegion.getCache()).thenReturn(internalCache);
-
+    when(member.getVersion()).thenReturn(KnownVersion.getCurrentVersion());
     when(distributionManager.getCancelCriterion()).thenReturn(mock(CancelCriterion.class));
     when(distributionManager.getStats()).thenReturn(mock(DMStats.class));
 
@@ -433,6 +433,8 @@ public class PartitionedRegionClearTest {
     verify(distributionManager, times(1)).putOutgoing(any());
   }
 
+
+
   @Test
   public void doClearAcquiresAndReleasesDistributedClearLockAndCreatesAllPrimaryBuckets() {
     RegionEventImpl regionEvent = mock(RegionEventImpl.class);
@@ -458,7 +460,6 @@ public class PartitionedRegionClearTest {
     doReturn(Collections.EMPTY_SET).when(spyPartitionedRegionClear).clearRegion(regionEvent);
 
     spyPartitionedRegionClear.doClear(regionEvent, cacheWrite);
-
     verify(spyPartitionedRegionClear, times(1)).invokeCacheWriter(regionEvent);
   }
 
@@ -558,6 +559,70 @@ public class PartitionedRegionClearTest {
   }
 
   @Test
+  public void doClearThrowsServerVersionMismatchException() {
+    boolean cacheWrite = false;
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    when(partitionedRegion.hasListener()).thenReturn(false);
+    when(partitionedRegion.hasAnyClientsInterested()).thenReturn(false);
+    when(partitionedRegion.getTotalNumberOfBuckets()).thenReturn(2);
+    when(partitionedRegion.getName()).thenReturn("prRegion");
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doNothing().when(spyPartitionedRegionClear).acquireDistributedClearLock(any());
+    doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
+    doNothing().when(spyPartitionedRegionClear).obtainLockForClear(regionEvent);
+    doNothing().when(spyPartitionedRegionClear).releaseLockForClear(regionEvent);
+    doReturn(Collections.singleton("2")).when(spyPartitionedRegionClear).clearRegion(regionEvent);
+
+    when(regionEvent.clone()).thenReturn(mock(RegionEventImpl.class));
+    Region<String, PartitionRegionConfig> prRoot = mock(Region.class);
+    when(partitionedRegion.getPRRoot()).thenReturn(prRoot);
+    InternalDistributedMember member = mock(InternalDistributedMember.class);
+    InternalDistributedMember oldMember = mock(InternalDistributedMember.class);
+    Set<InternalDistributedMember> prNodes = new HashSet<>();
+    prNodes.add(member);
+    prNodes.add(oldMember);
+    Node node = mock(Node.class);
+    Node oldNode = mock(Node.class);
+    when(member.getName()).thenReturn("member");
+    when(oldMember.getName()).thenReturn("oldMember");
+    when(node.getMemberId()).thenReturn(member);
+    when(oldNode.getMemberId()).thenReturn(oldMember);
+    Set<Node> configNodes = new HashSet<>();
+    configNodes.add(node);
+    configNodes.add(oldNode);
+    when(partitionedRegion.getBucketPrimary(0)).thenReturn(member);
+    when(partitionedRegion.getBucketPrimary(1)).thenReturn(oldMember);
+
+    when(regionAdvisor.adviseAllPRNodes()).thenReturn(prNodes);
+    PartitionRegionConfig partitionRegionConfig = mock(PartitionRegionConfig.class);
+    when(partitionRegionConfig.getNodes()).thenReturn(configNodes);
+    when(prRoot.get(any())).thenReturn(partitionRegionConfig);
+    InternalDistributedSystem internalDistributedSystem = mock(InternalDistributedSystem.class);
+    when(internalDistributedSystem.getDistributionManager()).thenReturn(distributionManager);
+    when(partitionedRegion.getSystem()).thenReturn(internalDistributedSystem);
+    InternalCache internalCache = mock(InternalCache.class);
+    TXManagerImpl txManager = mock(TXManagerImpl.class);
+    when(txManager.isDistributed()).thenReturn(false);
+    when(internalCache.getTxManager()).thenReturn(txManager);
+    when(partitionedRegion.getCache()).thenReturn(internalCache);
+    when(oldMember.getVersion()).thenReturn(KnownVersion.GEODE_1_11_0);
+    when(member.getVersion()).thenReturn(KnownVersion.getCurrentVersion());
+    when(distributionManager.getCancelCriterion()).thenReturn(mock(CancelCriterion.class));
+    when(distributionManager.getStats()).thenReturn(mock(DMStats.class));
+
+
+    Throwable thrown =
+        catchThrowable(() -> spyPartitionedRegionClear.doClear(regionEvent, cacheWrite));
+
+    assertThat(thrown)
+        .isInstanceOf(ServerVersionMismatchException.class)
+        .hasMessage(
+            "A server's [oldMember] version was too old (< GEODE 1.14.0) for : Partitioned Region Clear");
+  }
+
+
+
+  @Test
   public void handleClearFromDepartedMemberReleasesTheLockForRequesterDeparture() {
     InternalDistributedMember member = mock(InternalDistributedMember.class);
     partitionedRegionClear.lockForListenerAndClientNotification.setLocked(member);
diff --git a/geode-core/src/upgradeTest/java/org/apache/geode/internal/cache/rollingupgrade/RollingUpgrade2DUnitTestBase.java b/geode-core/src/upgradeTest/java/org/apache/geode/internal/cache/rollingupgrade/RollingUpgrade2DUnitTestBase.java
index 293bc69..6181e56 100755
--- a/geode-core/src/upgradeTest/java/org/apache/geode/internal/cache/rollingupgrade/RollingUpgrade2DUnitTestBase.java
+++ b/geode-core/src/upgradeTest/java/org/apache/geode/internal/cache/rollingupgrade/RollingUpgrade2DUnitTestBase.java
@@ -987,7 +987,7 @@ public abstract class RollingUpgrade2DUnitTestBase extends JUnit4DistributedTest
     return clientCache;
   }
 
-  private static boolean assertRegionExists(GemFireCache cache, String regionName) {
+  protected static boolean assertRegionExists(GemFireCache cache, String regionName) {
     Region region = cache.getRegion(regionName);
     if (region == null) {
       throw new Error("Region: " + regionName + " does not exist");
@@ -995,7 +995,7 @@ public abstract class RollingUpgrade2DUnitTestBase extends JUnit4DistributedTest
     return true;
   }
 
-  private static Region getRegion(GemFireCache cache, String regionName) {
+  protected static Region getRegion(GemFireCache cache, String regionName) {
     return cache.getRegion(regionName);
   }
 
diff --git a/geode-core/src/upgradeTest/java/org/apache/geode/internal/cache/rollingupgrade/RollingUpgradePartitionRegionClearMixedServerPartitionedRegion.java b/geode-core/src/upgradeTest/java/org/apache/geode/internal/cache/rollingupgrade/RollingUpgradePartitionRegionClearMixedServerPartitionedRegion.java
new file mode 100644
index 0000000..bfcd651
--- /dev/null
+++ b/geode-core/src/upgradeTest/java/org/apache/geode/internal/cache/rollingupgrade/RollingUpgradePartitionRegionClearMixedServerPartitionedRegion.java
@@ -0,0 +1,412 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache.rollingupgrade;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.catchThrowable;
+
+import java.io.File;
+import java.lang.reflect.Constructor;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.logging.log4j.Logger;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Parameterized.Parameters;
+import org.junit.runners.Parameterized.UseParametersRunnerFactory;
+
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.CacheFactory;
+import org.apache.geode.cache.GemFireCache;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionFactory;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.ServerVersionMismatchException;
+import org.apache.geode.cache.client.ClientCache;
+import org.apache.geode.cache.client.ClientCacheFactory;
+import org.apache.geode.cache.client.ClientRegionShortcut;
+import org.apache.geode.cache.client.ServerOperationException;
+import org.apache.geode.cache.server.CacheServer;
+import org.apache.geode.distributed.DistributedSystem;
+import org.apache.geode.distributed.internal.DistributionConfig;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.AvailablePortHelper;
+import org.apache.geode.internal.cache.GemFireCacheImpl;
+import org.apache.geode.internal.cache.PartitionedRegion;
+import org.apache.geode.logging.internal.log4j.api.LogService;
+import org.apache.geode.test.dunit.DistributedTestUtils;
+import org.apache.geode.test.dunit.Host;
+import org.apache.geode.test.dunit.IgnoredException;
+import org.apache.geode.test.dunit.Invoke;
+import org.apache.geode.test.dunit.NetworkUtils;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.internal.DUnitLauncher;
+import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
+import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactory;
+import org.apache.geode.test.version.VersionManager;
+
+@RunWith(Parameterized.class)
+@UseParametersRunnerFactory(CategoryWithParameterizedRunnerFactory.class)
+public class RollingUpgradePartitionRegionClearMixedServerPartitionedRegion
+    extends JUnit4DistributedTestCase {
+
+  protected static final Logger logger = LogService.getLogger();
+  protected static GemFireCache cache;
+  protected static ClientCache clientcache;
+
+  @Parameter
+  public String oldVersion;
+
+  @Parameters(name = "from_v{0}")
+  public static Collection<String> data() {
+    List<String> result = VersionManager.getInstance().getVersionsWithoutCurrent();
+    if (result.size() < 1) {
+      throw new RuntimeException("No older versions of Geode were found to test against");
+    } else {
+      System.out.println("running against these versions: " + result);
+    }
+    return result;
+  }
+
+  @Test
+  public void testPutAndGetMixedServerPartitionedRegion() throws Exception {
+    doTestPutAndGetMixedServers(oldVersion);
+  }
+
+  /**
+   * This test starts up multiple servers from the current code base and multiple servers from the
+   * old version and executes puts and gets on a new server and old server and verifies that the
+   * results are present. Note that the puts have overlapping region keys just to test new puts and
+   * replaces
+   */
+  void doTestPutAndGetMixedServers(String oldVersion)
+      throws Exception {
+    VM currentServer1 = VM.getVM(VersionManager.CURRENT_VERSION, 0);
+    VM oldServerAndLocator = VM.getVM(oldVersion, 1);
+    VM currentServer2 = VM.getVM(VersionManager.CURRENT_VERSION, 2);
+    VM oldServer2 = VM.getVM(oldVersion, 3);
+
+    String regionName = "aRegion";
+
+    final String serverHostName = NetworkUtils.getServerHostName();
+    final int port = AvailablePortHelper.getRandomAvailableTCPPort();
+    oldServerAndLocator.invoke(() -> DistributedTestUtils.deleteLocatorStateFile(port));
+    try {
+      final Properties props = getSystemProperties();
+      props.remove(DistributionConfig.LOCATORS_NAME);
+
+      // Fire up the locator and server
+      oldServerAndLocator.invoke(() -> {
+        props.put(DistributionConfig.START_LOCATOR_NAME,
+            "" + serverHostName + "[" + port + "]");
+        props.put(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
+        cache = createCache(props);
+        Thread.sleep(5000); // bug in 1.0 - cluster config service not immediately available
+      });
+
+      props.put(DistributionConfig.LOCATORS_NAME, serverHostName + "[" + port + "]");
+
+      // create the cache in all the server VMs.
+      for (VM vm : Arrays.asList(oldServer2, currentServer1, currentServer2)) {
+        vm.invoke(() -> {
+          cache = createCache(props);
+        });
+      }
+      // spin up current version servers
+      for (VM vm : Arrays.asList(currentServer1, currentServer2)) {
+        vm.invoke(
+            () -> assertVersion(cache, VersionManager.getInstance().getCurrentVersionOrdinal()));
+      }
+
+      // create region
+      for (VM vm : Arrays.asList(currentServer1, currentServer2, oldServerAndLocator, oldServer2)) {
+        vm.invoke(() -> createRegion(cache, regionName));
+      }
+
+      // put some data in the region to make sure there is something to clear.
+      putDataSerializableAndVerify(currentServer1, regionName, currentServer2, oldServerAndLocator,
+          oldServer2);
+
+      // invoke Partition Region Clear and verify we didn't touch the old servers.
+
+      currentServer1.invoke(() -> {
+        assertRegionExists(cache, regionName);
+        PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName);
+
+        Throwable thrown = catchThrowable(region::clear);
+        assertThat(thrown).isInstanceOf(ServerVersionMismatchException.class);
+
+      });
+    } finally {
+      for (VM vm : Arrays.asList(currentServer1, currentServer2, oldServerAndLocator, oldServer2)) {
+        vm.invoke(
+            () -> closeCache(RollingUpgradePartitionRegionClearMixedServerPartitionedRegion.cache));
+      }
+    }
+  }
+
+  @Test
+  public void TestClientServerGetsUnsupportedExceptionWhenPRClearInvoked() throws Exception {
+    doTestClientServerGetsUnsupportedExceptionWhenPRClearInvoked(oldVersion);
+  }
+
+  void doTestClientServerGetsUnsupportedExceptionWhenPRClearInvoked(String oldVersion)
+      throws Exception {
+
+    VM client = VM.getVM(VersionManager.CURRENT_VERSION, 0);
+    VM locator = VM.getVM(VersionManager.CURRENT_VERSION, 1);
+    VM currentServer = VM.getVM(VersionManager.CURRENT_VERSION, 2);
+    VM oldServer2 = VM.getVM(oldVersion, 3);
+
+    for (VM vm : Arrays.asList(locator, currentServer, client)) {
+      vm.invoke(() -> System.setProperty("gemfire.allow_old_members_to_join_for_testing", "true"));
+    }
+
+    String regionName = "aRegion";
+
+    final String serverHostName = NetworkUtils.getServerHostName();
+    final int port = AvailablePortHelper.getRandomAvailableTCPPort();
+    locator.invoke(() -> DistributedTestUtils.deleteLocatorStateFile(port));
+    try {
+      final Properties props = getSystemProperties();
+      props.remove(DistributionConfig.LOCATORS_NAME);
+
+      // Fire up the locator and server
+      locator.invoke(() -> {
+        props.put(DistributionConfig.START_LOCATOR_NAME,
+            "" + serverHostName + "[" + port + "]");
+        props.put(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
+        cache = createCache(props);
+      });
+
+      props.put(DistributionConfig.LOCATORS_NAME, serverHostName + "[" + port + "]");
+
+      // create the cache in all the server VMs.
+      for (VM vm : Arrays.asList(oldServer2, currentServer)) {
+        vm.invoke(() -> {
+          props.setProperty(DistributionConfig.NAME_NAME, "vm" + VM.getVMId());
+          cache = createCache(props);
+        });
+      }
+      int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
+
+      oldServer2.invoke(() -> startCacheServer(cache, ports[0]));
+      currentServer.invoke(() -> startCacheServer(cache, ports[1]));
+
+      // create region
+      for (VM vm : Arrays.asList(currentServer, locator, oldServer2)) {
+        vm.invoke(() -> createRegion(cache, regionName));
+      }
+
+      // put some data in the region to make sure there is something to clear.
+      putDataSerializableAndVerify(currentServer, regionName, locator, oldServer2);
+
+      // invoke Partition Region Clear from the client and verify the exception.
+      client.invoke(() -> {
+        clientcache = new ClientCacheFactory().addPoolServer(serverHostName, ports[1]).create();
+        Region<Object, Object> clientRegion = clientcache.createClientRegionFactory(
+            ClientRegionShortcut.PROXY).create(regionName);
+
+        clientRegion.put("key", "value");
+
+        Throwable thrown = catchThrowable(clientRegion::clear);
+        assertThat(thrown).isInstanceOf(ServerOperationException.class);
+        assertThat(thrown).hasCauseInstanceOf(ServerVersionMismatchException.class);
+        ServerVersionMismatchException serverVersionMismatchException =
+            (ServerVersionMismatchException) thrown.getCause();
+        assertThat(serverVersionMismatchException.getMessage()).contains("vm3");
+      });
+
+    } finally {
+
+      for (VM vm : Arrays.asList(currentServer, locator, oldServer2)) {
+        vm.invoke(() -> closeCache(cache));
+      }
+
+      client.invoke(() -> {
+        if (cache != null && !clientcache.isClosed()) {
+          clientcache.close(false);
+        }
+      });
+    }
+  }
+
+  private String getLocatorString(int locatorPort) {
+    return getDUnitLocatorAddress() + "[" + locatorPort + "]";
+  }
+
+  public String getLocatorString(int[] locatorPorts) {
+    StringBuilder locatorString = new StringBuilder();
+    int numLocators = locatorPorts.length;
+    for (int i = 0; i < numLocators; i++) {
+      locatorString.append(getLocatorString(locatorPorts[i]));
+      if (i + 1 < numLocators) {
+        locatorString.append(",");
+      }
+    }
+    return locatorString.toString();
+  }
+
+  private Cache createCache(Properties systemProperties) {
+    systemProperties.setProperty(DistributionConfig.USE_CLUSTER_CONFIGURATION_NAME, "false");
+    if (VersionManager.getInstance().getCurrentVersionOrdinal() < 75) {
+      systemProperties.remove("validate-serializable-objects");
+      systemProperties.remove("serializable-object-filter");
+    }
+    CacheFactory cf = new CacheFactory(systemProperties);
+    return cf.create();
+  }
+
+  private void startCacheServer(GemFireCache cache, int port) throws Exception {
+    CacheServer cacheServer = ((GemFireCacheImpl) cache).addCacheServer();
+    cacheServer.setPort(port);
+    cacheServer.start();
+  }
+
+  protected void assertRegionExists(GemFireCache cache, String regionName) {
+    Region<Object, Object> region = cache.getRegion(regionName);
+    if (region == null) {
+      throw new Error("Region: " + regionName + " does not exist");
+    }
+  }
+
+  private void assertEntryExists(GemFireCache cache, String regionName) {
+    assertRegionExists(cache, regionName);
+    Region<Object, Object> region = cache.getRegion(regionName);
+    for (int i = 0; i < 10; i++) {
+      String key = "" + i;
+      Object regionValue = region.get(key);
+      assertThat(regionValue).describedAs("Entry for key:" + key + " does not exist").isNotNull();
+    }
+  }
+
+  public void put(GemFireCache cache, String regionName, Object key, Object value) {
+    Region<Object, Object> region = cache.getRegion(regionName);
+    System.out.println(regionName + ".put(" + key + "," + value + ")");
+    Object result = region.put(key, value);
+    System.out.println("returned " + result);
+  }
+
+  private void createRegion(GemFireCache cache, String regionName) {
+    RegionFactory<Object, Object> rf = ((GemFireCacheImpl) cache).createRegionFactory(
+        RegionShortcut.PARTITION);
+    System.out.println("created region " + rf.create(regionName));
+  }
+
+  void assertVersion(GemFireCache cache, short ordinal) {
+    DistributedSystem system = cache.getDistributedSystem();
+    int thisOrdinal =
+        ((InternalDistributedMember) system.getDistributedMember()).getVersion()
+            .ordinal();
+    if (ordinal != thisOrdinal) {
+      throw new Error(
+          "Version ordinal:" + thisOrdinal + " was not the expected ordinal of:" + ordinal);
+    }
+  }
+
+  private void closeCache(GemFireCache cache) {
+    if (cache == null) {
+      return;
+    }
+    boolean cacheClosed = cache.isClosed();
+    if (!cacheClosed) {
+      List<CacheServer> servers = ((Cache) cache).getCacheServers();
+      for (CacheServer server : servers) {
+        server.stop();
+      }
+      cache.close();
+    }
+  }
+
+  /**
+   * Get the port that the standard dunit locator is listening on.
+   *
+   */
+  private String getDUnitLocatorAddress() {
+    return Host.getHost(0).getHostName();
+  }
+
+  private void deleteVMFiles() {
+    System.out.println("deleting files in vm" + VM.getVMId());
+    File pwd = new File(".");
+    for (File entry : pwd.listFiles()) {
+      try {
+        if (entry.isDirectory()) {
+          FileUtils.deleteDirectory(entry);
+        } else {
+          if (!entry.delete()) {
+            System.out.println("Could not delete " + entry);
+          }
+        }
+      } catch (Exception e) {
+        System.out.println("Could not delete " + entry + ": " + e.getMessage());
+      }
+    }
+  }
+
+  @Override
+  public void postSetUp() {
+    Invoke.invokeInEveryVM("delete files", this::deleteVMFiles);
+    IgnoredException.addIgnoredException(
+        "cluster configuration service not available|ConflictingPersistentDataException");
+  }
+
+
+  void putDataSerializableAndVerify(VM putter, String regionName,
+      VM... vms) throws Exception {
+    for (int i = 0; i < 10; i++) {
+      Class aClass = Thread.currentThread().getContextClassLoader()
+          .loadClass("org.apache.geode.cache.ExpirationAttributes");
+      Constructor constructor = aClass.getConstructor(int.class);
+      Object testDataSerializable = constructor.newInstance(i);
+      int finalI = i;
+      putter.invoke(() -> put(cache, regionName, "" + finalI, testDataSerializable));
+    }
+
+    // verify present in others
+    for (VM vm : vms) {
+      vm.invoke(() -> assertEntryExists(cache, regionName));
+    }
+  }
+
+  public Properties getSystemProperties() {
+    Properties props = DistributedTestUtils.getAllDistributedSystemProperties(new Properties());
+    props.remove("disable-auto-reconnect");
+    props.put(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "true");
+    props.put(DistributionConfig.USE_CLUSTER_CONFIGURATION_NAME, "false");
+    props.remove(DistributionConfig.LOAD_CLUSTER_CONFIG_FROM_DIR_NAME);
+    props.remove(DistributionConfig.OFF_HEAP_MEMORY_SIZE_NAME);
+    props.remove(DistributionConfig.LOCK_MEMORY_NAME);
+    return props;
+  }
+
+  public Properties getSystemProperties(int[] locatorPorts) {
+    Properties props = new Properties();
+    String locatorString = getLocatorString(locatorPorts);
+    props.setProperty("locators", locatorString);
+    props.setProperty("mcast-port", "0");
+    props.put(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "true");
+    props.put(DistributionConfig.USE_CLUSTER_CONFIGURATION_NAME, "false");
+    props.remove(DistributionConfig.LOAD_CLUSTER_CONFIG_FROM_DIR_NAME);
+    props.setProperty(DistributionConfig.LOG_LEVEL_NAME, DUnitLauncher.logLevel);
+    return props;
+  }
+}


[geode] 09/22: GEODE-7894: Moving expiry tasks to AbstractRegion.

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 9999300983b050575d55f89ee6878b71fa3d34da
Author: Nabarun Nag <na...@cs.wisc.edu>
AuthorDate: Mon May 11 13:44:56 2020 -0700

    GEODE-7894: Moving expiry tasks to AbstractRegion.
---
 .../src/main/java/org/apache/geode/internal/cache/AbstractRegion.java  | 1 +
 .../src/main/java/org/apache/geode/internal/cache/LocalRegion.java     | 3 ---
 2 files changed, 1 insertion(+), 3 deletions(-)

diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegion.java
index 8e919fc..b320ba9 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegion.java
@@ -113,6 +113,7 @@ public abstract class AbstractRegion implements InternalRegion, AttributesMutato
   private static final Logger logger = LogService.getLogger();
   private final ReentrantReadWriteLock readWriteLockForCacheLoader = new ReentrantReadWriteLock();
   private final ReentrantReadWriteLock readWriteLockForCacheWriter = new ReentrantReadWriteLock();
+  @VisibleForTesting
   protected final ConcurrentHashMap<RegionEntry, EntryExpiryTask> entryExpiryTasks =
       new ConcurrentHashMap<>();
   /**
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
index 954e3a4..663b40c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
@@ -325,9 +325,6 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
    */
   private int txRefCount;
 
-  @VisibleForTesting
-  final ConcurrentHashMap<RegionEntry, EntryExpiryTask> entryExpiryTasks =
-      new ConcurrentHashMap<>();
 
   private volatile boolean regionInvalid;
 


[geode] 19/22: GEODE-7672: add dunit test to verify OQL index after PR clear. (#5436)

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 4bf638afb85840636f9fc35107afbed8122ce6b8
Author: Jinmei Liao <ji...@pivotal.io>
AuthorDate: Tue Sep 8 12:19:52 2020 -0700

    GEODE-7672: add dunit test to verify OQL index after PR clear. (#5436)
    
    * require rvv lock when create index
---
 .../partitioned/PRClearQueryIndexDUnitTest.java    | 376 +++++++++++++++++++++
 .../cache/query/internal/DefaultQueryService.java  |   4 +-
 .../cache/query/internal/index/IndexManager.java   |   4 +-
 .../internal/cache/PartitionedRegionClear.java     |   5 +-
 .../geode/test/dunit/rules/ClusterStartupRule.java |  23 +-
 .../org/apache/geode/cache/query/data/City.java    |   5 +-
 6 files changed, 410 insertions(+), 7 deletions(-)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRClearQueryIndexDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRClearQueryIndexDUnitTest.java
new file mode 100644
index 0000000..feed3fc
--- /dev/null
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRClearQueryIndexDUnitTest.java
@@ -0,0 +1,376 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.geode.cache.query.partitioned;
+
+import static org.apache.geode.distributed.ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER;
+import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.apache.geode.test.junit.rules.VMProvider.invokeInEveryMember;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.IntStream;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.client.ClientCache;
+import org.apache.geode.cache.client.ServerOperationException;
+import org.apache.geode.cache.query.Index;
+import org.apache.geode.cache.query.IndexStatistics;
+import org.apache.geode.cache.query.Query;
+import org.apache.geode.cache.query.QueryService;
+import org.apache.geode.cache.query.SelectResults;
+import org.apache.geode.cache.query.data.City;
+import org.apache.geode.internal.cache.InternalCache;
+import org.apache.geode.test.dunit.AsyncInvocation;
+import org.apache.geode.test.dunit.DUnitBlackboard;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+import org.apache.geode.test.junit.rules.ClientCacheRule;
+import org.apache.geode.test.junit.rules.ExecutorServiceRule;
+
+public class PRClearQueryIndexDUnitTest {
+  public static final String MUMBAI_QUERY = "select * from /cities c where c.name = 'MUMBAI'";
+  public static final String ID_10_QUERY = "select * from /cities c where c.id = 10";
+  @ClassRule
+  public static ClusterStartupRule cluster = new ClusterStartupRule(4, true);
+
+  private static MemberVM server1;
+  private static MemberVM server2;
+
+  private static DUnitBlackboard blackboard;
+
+  @Rule
+  public ClientCacheRule clientCacheRule = new ClientCacheRule();
+
+  @Rule
+  public ExecutorServiceRule executor = ExecutorServiceRule.builder().build();
+
+  private ClientCache clientCache;
+  private Region cities;
+
+  // class test setup. set up the servers, regions and indexes on the servers
+  @BeforeClass
+  public static void beforeClass() {
+    int locatorPort = ClusterStartupRule.getDUnitLocatorPort();
+    server1 = cluster.startServerVM(1, s -> s.withConnectionToLocator(locatorPort)
+        .withProperty(SERIALIZABLE_OBJECT_FILTER, "org.apache.geode.cache.query.data.*")
+        .withRegion(RegionShortcut.PARTITION, "cities"));
+    server2 = cluster.startServerVM(2, s -> s.withConnectionToLocator(locatorPort)
+        .withProperty(SERIALIZABLE_OBJECT_FILTER, "org.apache.geode.cache.query.data.*")
+        .withRegion(RegionShortcut.PARTITION, "cities"));
+
+    server1.invoke(() -> {
+      Cache cache = ClusterStartupRule.getCache();
+      Region region = cache.getRegion("cities");
+      // create indexes
+      QueryService queryService = cache.getQueryService();
+      queryService.createKeyIndex("cityId", "c.id", "/cities c");
+      queryService.createIndex("cityName", "c.name", "/cities c");
+      assertThat(cache.getQueryService().getIndexes(region))
+          .extracting(Index::getName).containsExactlyInAnyOrder("cityId", "cityName");
+    });
+
+    server2.invoke(() -> {
+      Cache cache = ClusterStartupRule.getCache();
+      Region region = cache.getRegion("cities");
+      assertThat(cache.getQueryService().getIndexes(region))
+          .extracting(Index::getName).containsExactlyInAnyOrder("cityId", "cityName");
+    });
+  }
+
+  // before every test method, create the client cache and region
+  @Before
+  public void before() throws Exception {
+    int locatorPort = ClusterStartupRule.getDUnitLocatorPort();
+    clientCache = clientCacheRule.withLocatorConnection(locatorPort).createCache();
+    cities = clientCacheRule.createProxyRegion("cities");
+  }
+
+  @Test
+  public void clearOnEmptyRegion() throws Exception {
+    cities.clear();
+    invokeInEveryMember(() -> {
+      verifyIndexesAfterClear("cities", "cityId", "cityName");
+    }, server1, server2);
+
+    IntStream.range(0, 10).forEach(i -> cities.put(i, new City(i)));
+    cities.clear();
+    invokeInEveryMember(() -> {
+      verifyIndexesAfterClear("cities", "cityId", "cityName");
+    }, server1, server2);
+  }
+
+  @Test
+  public void createIndexWhileClear() throws Exception {
+    IntStream.range(0, 1000).forEach(i -> cities.put(i, new City(i)));
+
+    // create index while clear
+    AsyncInvocation createIndex = server1.invokeAsync("create index", () -> {
+      Cache cache = ClusterStartupRule.getCache();
+      QueryService queryService = cache.getQueryService();
+      Index cityZip = queryService.createIndex("cityZip", "c.zip", "/cities c");
+      assertThat(cityZip).isNotNull();
+    });
+
+    // do clear for 3 times at the same time to increease the concurrency of clear and createIndex
+    for (int i = 0; i < 3; i++) {
+      cities.clear();
+    }
+    createIndex.await();
+
+    invokeInEveryMember(() -> {
+      verifyIndexesAfterClear("cities", "cityId", "cityName");
+    }, server1, server2);
+
+    QueryService queryService = clientCache.getQueryService();
+    Query query =
+        queryService.newQuery("select * from /cities c where c.zip < " + (City.ZIP_START + 10));
+    assertThat(((SelectResults) query.execute()).size()).isEqualTo(0);
+
+    IntStream.range(0, 10).forEach(i -> cities.put(i, new City(i)));
+    assertThat(((SelectResults) query.execute()).size()).isEqualTo(10);
+  }
+
+  @Test
+  public void createIndexWhileClearOnReplicateRegion() throws Exception {
+    invokeInEveryMember(() -> {
+      Cache cache = ClusterStartupRule.getCache();
+      cache.createRegionFactory(RegionShortcut.PARTITION)
+          .create("replicateCities");
+    }, server1, server2);
+
+    Region replicateCities = clientCacheRule.createProxyRegion("replicateCities");
+    IntStream.range(0, 1000).forEach(i -> replicateCities.put(i, new City(i)));
+
+    // create index while clear
+    AsyncInvocation createIndex = server1.invokeAsync("create index on replicate regions", () -> {
+      Cache cache = ClusterStartupRule.getCache();
+      QueryService queryService = cache.getQueryService();
+      Index cityZip = queryService.createIndex("cityZip_replicate", "c.zip", "/replicateCities c");
+      assertThat(cityZip).isNotNull();
+    });
+
+    // do clear at the same time for 3 timese
+    for (int i = 0; i < 3; i++) {
+      replicateCities.clear();
+    }
+    createIndex.await();
+
+    invokeInEveryMember(() -> {
+      verifyIndexesAfterClear("replicateCities", "cityZip_replicate");
+    }, server1, server2);
+
+    QueryService queryService = clientCache.getQueryService();
+    Query query =
+        queryService
+            .newQuery("select * from /replicateCities c where c.zip < " + (City.ZIP_START + 10));
+    assertThat(((SelectResults) query.execute()).size()).isEqualTo(0);
+
+    IntStream.range(0, 10).forEach(i -> replicateCities.put(i, new City(i)));
+    assertThat(((SelectResults) query.execute()).size()).isEqualTo(10);
+  }
+
+  @Test
+  public void removeIndexWhileClear() throws Exception {
+    // create cityZip index
+    server1.invoke("create index", () -> {
+      Cache cache = ClusterStartupRule.getCache();
+      QueryService queryService = cache.getQueryService();
+      Index cityZip = queryService.createIndex("cityZip", "c.zip", "/cities c");
+      assertThat(cityZip).isNotNull();
+    });
+
+    // remove index while clear
+    // removeIndex has to be invoked on each server. It's not distributed
+    AsyncInvocation removeIndex1 = server1.invokeAsync("remove index",
+        PRClearQueryIndexDUnitTest::removeCityZipIndex);
+    AsyncInvocation removeIndex2 = server2.invokeAsync("remove index",
+        PRClearQueryIndexDUnitTest::removeCityZipIndex);
+
+    cities.clear();
+    removeIndex1.await();
+    removeIndex2.await();
+
+    // make sure removeIndex and clear operations are successful
+    invokeInEveryMember(() -> {
+      InternalCache internalCache = ClusterStartupRule.getCache();
+      QueryService qs = internalCache.getQueryService();
+      Region region = internalCache.getRegion("cities");
+      assertThat(region.size()).isEqualTo(0);
+      // verify only 2 indexes created in the beginning of the tests exist
+      assertThat(qs.getIndexes(region)).extracting(Index::getName)
+          .containsExactlyInAnyOrder("cityId", "cityName");
+    }, server1, server2);
+  }
+
+  private static void removeCityZipIndex() {
+    Cache cache = ClusterStartupRule.getCache();
+    QueryService qs = cache.getQueryService();
+    Region<Object, Object> region = cache.getRegion("cities");
+    Index cityZip = qs.getIndex(region, "cityZip");
+    if (cityZip != null) {
+      qs.removeIndex(cityZip);
+    }
+  }
+
+  @Test
+  public void verifyQuerySucceedsAfterClear() throws Exception {
+    // put in some data
+    IntStream.range(0, 100).forEach(i -> cities.put(i, new City(i)));
+
+    QueryService queryService = clientCache.getQueryService();
+    Query query = queryService.newQuery(MUMBAI_QUERY);
+    Query query2 = queryService.newQuery(ID_10_QUERY);
+    assertThat(((SelectResults) query.execute()).size()).isEqualTo(50);
+    assertThat(((SelectResults) query2.execute()).size()).isEqualTo(1);
+
+    cities.clear();
+    invokeInEveryMember(() -> {
+      verifyIndexesAfterClear("cities", "cityId", "cityName");
+    }, server1, server2);
+
+    assertThat(((SelectResults) query.execute()).size()).isEqualTo(0);
+    assertThat(((SelectResults) query2.execute()).size()).isEqualTo(0);
+  }
+
+  private static void verifyIndexesAfterClear(String regionName, String... indexes) {
+    InternalCache internalCache = ClusterStartupRule.getCache();
+    QueryService qs = internalCache.getQueryService();
+    Region region = internalCache.getRegion(regionName);
+    assertThat(region.size()).isEqualTo(0);
+    for (String indexName : indexes) {
+      Index index = qs.getIndex(region, indexName);
+      IndexStatistics statistics = index.getStatistics();
+      assertThat(statistics.getNumberOfKeys()).isEqualTo(0);
+      assertThat(statistics.getNumberOfValues()).isEqualTo(0);
+    }
+  }
+
+  @Test
+  public void concurrentClearAndQuery() {
+    QueryService queryService = clientCache.getQueryService();
+    Query query = queryService.newQuery(MUMBAI_QUERY);
+    Query query2 = queryService.newQuery(ID_10_QUERY);
+
+    IntStream.range(0, 100).forEach(i -> cities.put(i, new City(i)));
+
+    server1.invokeAsync(() -> {
+      Cache cache = ClusterStartupRule.getCache();
+      Region region = cache.getRegion("cities");
+      region.clear();
+    });
+
+    await().untilAsserted(() -> {
+      assertThat(((SelectResults) query.execute()).size()).isEqualTo(0);
+      assertThat(((SelectResults) query2.execute()).size()).isEqualTo(0);
+    });
+  }
+
+  @Test
+  public void concurrentClearAndPut() throws Exception {
+    AsyncInvocation puts = server1.invokeAsync(() -> {
+      Cache cache = ClusterStartupRule.getCache();
+      Region region = cache.getRegion("cities");
+      for (int i = 0; i < 1000; i++) {
+        // wait for gate to open
+        getBlackboard().waitForGate("proceedToPut", 60, TimeUnit.SECONDS);
+        region.put(i, new City(i));
+      }
+    });
+
+    AsyncInvocation clears = server2.invokeAsync(() -> {
+      Cache cache = ClusterStartupRule.getCache();
+      Region region = cache.getRegion("cities");
+      // do clear 10 times
+      for (int i = 0; i < 10; i++) {
+        try {
+          // don't allow put to proceed. It's like "close the gate"
+          getBlackboard().clearGate("proceedToPut");
+          region.clear();
+          verifyIndexesAfterClear("cities", "cityId", "cityName");
+        } finally {
+          // allow put to proceed. It's like "open the gate"
+          getBlackboard().signalGate("proceedToPut");
+        }
+      }
+    });
+
+    puts.await();
+    clears.await();
+  }
+
+  @Test
+  public void serverLeavingAndJoiningWhilePutAndClear() throws Exception {
+    int locatorPort = ClusterStartupRule.getDUnitLocatorPort();
+    Future<Void> startStopServer = executor.submit(() -> {
+      for (int i = 0; i < 3; i++) {
+        MemberVM server3 = cluster.startServerVM(3, s -> s.withConnectionToLocator(locatorPort)
+            .withProperty(SERIALIZABLE_OBJECT_FILTER, "org.apache.geode.cache.query.data.*")
+            .withRegion(RegionShortcut.PARTITION, "cities"));
+        server3.stop(false);
+      }
+    });
+
+    Future<Void> putAndClear = executor.submit(() -> {
+      for (int i = 0; i < 30; i++) {
+        IntStream.range(0, 100).forEach(j -> cities.put(j, new City(j)));
+        try {
+          cities.clear();
+
+          // only verify if clear is successful
+          QueryService queryService = clientCache.getQueryService();
+          Query query = queryService.newQuery(MUMBAI_QUERY);
+          Query query2 = queryService.newQuery(ID_10_QUERY);
+          assertThat(((SelectResults) query.execute()).size()).isEqualTo(0);
+          assertThat(((SelectResults) query2.execute()).size()).isEqualTo(0);
+        } catch (ServerOperationException e) {
+          assertThat(e.getCause().getMessage())
+              .contains("Unable to clear all the buckets from the partitioned region cities")
+              .contains("either data (buckets) moved or member departed");
+        }
+      }
+    });
+    startStopServer.get(60, TimeUnit.SECONDS);
+    putAndClear.get(60, TimeUnit.SECONDS);
+  }
+
+  private static DUnitBlackboard getBlackboard() {
+    if (blackboard == null) {
+      blackboard = new DUnitBlackboard();
+    }
+    return blackboard;
+  }
+
+  @After
+  public void tearDown() {
+    invokeInEveryMember(() -> {
+      if (blackboard != null) {
+        blackboard.clearGate("proceedToPut");
+      }
+      // remove the cityZip index
+      removeCityZipIndex();
+    }, server1, server2);
+  }
+}
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQueryService.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQueryService.java
index 2930a3a..2895aaf 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQueryService.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQueryService.java
@@ -213,7 +213,7 @@ public class DefaultQueryService implements InternalQueryService {
       throw new UnsupportedOperationException(
           "Index creation on the server is not supported from the client.");
     }
-    PartitionedIndex parIndex = null;
+
     if (region == null) {
       region = getRegionFromPath(imports, fromClause);
     }
@@ -241,6 +241,7 @@ public class DefaultQueryService implements InternalQueryService {
       }
     }
     if (region instanceof PartitionedRegion) {
+      PartitionedIndex parIndex = null;
       try {
         parIndex = (PartitionedIndex) ((PartitionedRegion) region).createIndex(false, indexType,
             indexName, indexedExpression, fromClause, imports, loadEntries);
@@ -256,7 +257,6 @@ public class DefaultQueryService implements InternalQueryService {
       return parIndex;
 
     } else {
-
       IndexManager indexManager = IndexUtils.getIndexManager(this.cache, region, true);
       Index index = indexManager.createIndex(indexName, indexType, indexedExpression, fromClause,
           imports, null, null, loadEntries);
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/IndexManager.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/IndexManager.java
index 5b2867b..0501603 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/IndexManager.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/IndexManager.java
@@ -275,6 +275,8 @@ public class IndexManager {
     }
 
     try {
+      ((LocalRegion) this.region).lockRVVForBulkOp();
+
       String projectionAttributes = "*"; // for now this is the only option
 
       if (getIndex(indexName) != null) {
@@ -425,7 +427,7 @@ public class IndexManager {
     } finally {
       this.cache.setPdxReadSerializedOverride(oldReadSerialized);
       ((TXManagerImpl) this.cache.getCacheTransactionManager()).unpauseTransaction(tx);
-
+      ((LocalRegion) this.region).unlockRVVForBulkOp();
     }
   }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
index 4796a17..e8b01d8 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
@@ -270,10 +270,13 @@ public class PartitionedRegionClear {
     } while (true);
   }
 
+  /**
+   * @return buckets that are cleared. empty set if any exception happened
+   */
   protected Set<Integer> attemptToSendPartitionedRegionClearMessage(RegionEventImpl event,
       PartitionedRegionClearMessage.OperationType op)
       throws ForceReattemptException {
-    Set<Integer> bucketsOperated = null;
+    Set<Integer> bucketsOperated = new HashSet<>();
 
     if (partitionedRegion.getPRRoot() == null) {
       if (logger.isDebugEnabled()) {
diff --git a/geode-dunit/src/main/java/org/apache/geode/test/dunit/rules/ClusterStartupRule.java b/geode-dunit/src/main/java/org/apache/geode/test/dunit/rules/ClusterStartupRule.java
index c817f8c..8de2513 100644
--- a/geode-dunit/src/main/java/org/apache/geode/test/dunit/rules/ClusterStartupRule.java
+++ b/geode-dunit/src/main/java/org/apache/geode/test/dunit/rules/ClusterStartupRule.java
@@ -39,6 +39,7 @@ import org.apache.geode.cache.client.ClientCacheFactory;
 import org.apache.geode.cache.server.CacheServer;
 import org.apache.geode.distributed.internal.InternalLocator;
 import org.apache.geode.internal.cache.InternalCache;
+import org.apache.geode.test.dunit.DUnitEnv;
 import org.apache.geode.test.dunit.Host;
 import org.apache.geode.test.dunit.IgnoredException;
 import org.apache.geode.test.dunit.SerializableConsumerIF;
@@ -97,6 +98,7 @@ public class ClusterStartupRule implements SerializableTestRule {
   }
 
   private final int vmCount;
+  private final boolean launchDunitLocator;
 
   private final DistributedRestoreSystemProperties restoreSystemProperties =
       new DistributedRestoreSystemProperties();
@@ -106,11 +108,20 @@ public class ClusterStartupRule implements SerializableTestRule {
   private boolean logFile = false;
 
   public ClusterStartupRule() {
-    this(NUM_VMS);
+    this(NUM_VMS, false);
   }
 
   public ClusterStartupRule(final int vmCount) {
+    this(vmCount, false);
+  }
+
+  public ClusterStartupRule(final boolean launchDunitLocator) {
+    this(NUM_VMS, launchDunitLocator);
+  }
+
+  public ClusterStartupRule(final int vmCount, boolean launchDunitLocator) {
     this.vmCount = vmCount;
+    this.launchDunitLocator = launchDunitLocator;
   }
 
   public static ClientCache getClientCache() {
@@ -148,7 +159,7 @@ public class ClusterStartupRule implements SerializableTestRule {
       // GEODE-6247: JDK 11 has an issue where native code is reporting committed is 2MB > max.
       IgnoredException.addIgnoredException("committed = 538968064 should be < max = 536870912");
     }
-    DUnitLauncher.launchIfNeeded(false);
+    DUnitLauncher.launchIfNeeded(launchDunitLocator);
     for (int i = 0; i < vmCount; i++) {
       Host.getHost(0).getVM(i);
     }
@@ -156,6 +167,14 @@ public class ClusterStartupRule implements SerializableTestRule {
     occupiedVMs = new HashMap<>();
   }
 
+  /**
+   * Returns the port that the standard dunit locator is listening on.
+   */
+  public static int getDUnitLocatorPort() {
+    return DUnitEnv.get().getLocatorPort();
+  }
+
+
   private void after(Description description) throws Throwable {
 
     if (!skipLocalDistributedSystemCleanup) {
diff --git a/geode-junit/src/main/java/org/apache/geode/cache/query/data/City.java b/geode-junit/src/main/java/org/apache/geode/cache/query/data/City.java
index e7e7b39..622bf5d 100644
--- a/geode-junit/src/main/java/org/apache/geode/cache/query/data/City.java
+++ b/geode-junit/src/main/java/org/apache/geode/cache/query/data/City.java
@@ -24,6 +24,8 @@ package org.apache.geode.cache.query.data;
 import java.io.Serializable;
 
 public class City implements Serializable {
+  public static int ZIP_START = 300000;
+  public int id;
   public String name;
   public int zip;
 
@@ -37,7 +39,8 @@ public class City implements Serializable {
     String arr1[] = {"MUMBAI", "PUNE", "GANDHINAGAR", "CHANDIGARH"};
     /* this is for the test to have 50% of the objects belonging to one city */
     this.name = arr1[i % 2];
-    this.zip = 425125 + i;
+    this.zip = ZIP_START + i;
+    this.id = i;
   }// end of constructor 2
 
   ////////////////////////////


[geode] 03/22: PR.clear's event id should be created and used in BR (#4805)

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 967427520348c771dfe67505995f1ea64bb0ec61
Author: Xiaojian Zhou <ge...@users.noreply.github.com>
AuthorDate: Mon Mar 16 17:35:35 2020 -0700

    PR.clear's event id should be created and used in BR (#4805)
    
    * GEODE-7857: PR.clear's event id should be created and used in BR
---
 .../PartitionedRegionPersistentClearDUnitTest.java |  2 +-
 .../codeAnalysis/sanctionedDataSerializables.txt   |  4 +-
 .../geode/internal/cache/PartitionedRegion.java    |  8 +--
 .../internal/cache/partitioned/ClearPRMessage.java | 12 ++--
 .../internal/cache/PartitionedRegionTest.java      | 65 ++++++++++++++++++++++
 5 files changed, 80 insertions(+), 11 deletions(-)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionPersistentClearDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionPersistentClearDUnitTest.java
index 847699b..c758446 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionPersistentClearDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionPersistentClearDUnitTest.java
@@ -21,6 +21,6 @@ import org.apache.geode.cache.RegionShortcut;
 public class PartitionedRegionPersistentClearDUnitTest extends PartitionedRegionClearDUnitTest {
 
   protected RegionShortcut getRegionShortCut() {
-    return RegionShortcut.PARTITION_REDUNDANT_PERSISTENT_OVERFLOW;
+    return RegionShortcut.PARTITION_REDUNDANT_PERSISTENT;
   }
 }
diff --git a/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt b/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
index fb83c84..8e522a2 100644
--- a/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
+++ b/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
@@ -1377,8 +1377,8 @@ fromData,27
 toData,27
 
 org/apache/geode/internal/cache/partitioned/ClearPRMessage,2
-fromData,19
-toData,36
+fromData,30
+toData,44
 
 org/apache/geode/internal/cache/partitioned/ClearPRMessage$ClearReplyMessage,2
 fromData,17
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
index 1aa427a..ffb01af 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
@@ -2191,7 +2191,7 @@ public class PartitionedRegion extends LocalRegion
         }
 
         // create ClearPRMessage per bucket
-        List<ClearPRMessage> clearMsgList = createClearPRMessages();
+        List<ClearPRMessage> clearMsgList = createClearPRMessages(regionEvent.getEventId());
         for (ClearPRMessage clearPRMessage : clearMsgList) {
           int bucketId = clearPRMessage.getBucketId();
           checkReadiness();
@@ -2363,10 +2363,10 @@ public class PartitionedRegion extends LocalRegion
     }
   }
 
-  List<ClearPRMessage> createClearPRMessages() {
+  List<ClearPRMessage> createClearPRMessages(EventID eventID) {
     ArrayList<ClearPRMessage> clearMsgList = new ArrayList<>();
-    for (int bucketId = 0; bucketId < this.totalNumberOfBuckets; bucketId++) {
-      ClearPRMessage clearPRMessage = new ClearPRMessage(bucketId);
+    for (int bucketId = 0; bucketId < getTotalNumberOfBuckets(); bucketId++) {
+      ClearPRMessage clearPRMessage = new ClearPRMessage(bucketId, eventID);
       clearMsgList.add(clearPRMessage);
     }
     return clearMsgList;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java
index 9fa8057..cc01920 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java
@@ -56,6 +56,8 @@ public class ClearPRMessage extends PartitionMessageWithDirectReply {
 
   private Integer bucketId;
 
+  private EventID eventID;
+
   public static final String BUCKET_NON_PRIMARY_MESSAGE =
       "The bucket region on target member is no longer primary";
   public static final String EXCEPTION_THROWN_DURING_CLEAR_OPERATION =
@@ -71,8 +73,9 @@ public class ClearPRMessage extends PartitionMessageWithDirectReply {
    */
   public ClearPRMessage() {}
 
-  public ClearPRMessage(int bucketId) {
+  public ClearPRMessage(int bucketId, EventID eventID) {
     this.bucketId = bucketId;
+    this.eventID = eventID;
   }
 
   public void initMessage(PartitionedRegion region, Set<InternalDistributedMember> recipients,
@@ -119,6 +122,7 @@ public class ClearPRMessage extends PartitionMessageWithDirectReply {
     } else {
       InternalDataSerializer.writeSignedVL(bucketId, out);
     }
+    DataSerializer.writeObject(this.eventID, out);
   }
 
   @Override
@@ -126,6 +130,7 @@ public class ClearPRMessage extends PartitionMessageWithDirectReply {
       throws IOException, ClassNotFoundException {
     super.fromData(in, context);
     this.bucketId = (int) InternalDataSerializer.readSignedVL(in);
+    this.eventID = (EventID) DataSerializer.readObject(in);
   }
 
   @Override
@@ -168,9 +173,8 @@ public class ClearPRMessage extends PartitionMessageWithDirectReply {
       throw new ForceReattemptException(BUCKET_NON_PRIMARY_MESSAGE);
     }
     try {
-      RegionEventImpl regionEvent = new RegionEventImpl();
-      regionEvent.setOperation(Operation.REGION_CLEAR);
-      regionEvent.setRegion(bucketRegion);
+      RegionEventImpl regionEvent = new RegionEventImpl(bucketRegion, Operation.REGION_CLEAR, null,
+          false, region.getMyId(), eventID);
       bucketRegion.cmnClearRegion(regionEvent, true, true);
     } catch (PartitionOfflineException poe) {
       logger.info(
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionTest.java
index 742db8a..898c4f7 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionTest.java
@@ -21,10 +21,12 @@ import static org.assertj.core.api.Assertions.assertThat;
 import static org.assertj.core.api.Assertions.assertThatCode;
 import static org.assertj.core.api.Assertions.catchThrowable;
 import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.ArgumentMatchers.anyInt;
 import static org.mockito.ArgumentMatchers.anyLong;
 import static org.mockito.ArgumentMatchers.eq;
 import static org.mockito.ArgumentMatchers.isNull;
+import static org.mockito.Mockito.doCallRealMethod;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.doThrow;
@@ -39,6 +41,7 @@ import static org.mockito.quality.Strictness.STRICT_STUBS;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
@@ -55,6 +58,7 @@ import org.mockito.junit.MockitoRule;
 import org.apache.geode.CancelCriterion;
 import org.apache.geode.Statistics;
 import org.apache.geode.cache.AttributesFactory;
+import org.apache.geode.cache.CacheClosedException;
 import org.apache.geode.cache.CacheLoader;
 import org.apache.geode.cache.CacheWriter;
 import org.apache.geode.cache.Operation;
@@ -71,6 +75,7 @@ import org.apache.geode.distributed.internal.DistributionManager;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
 import org.apache.geode.internal.cache.control.InternalResourceManager;
+import org.apache.geode.internal.cache.partitioned.ClearPRMessage;
 import org.apache.geode.internal.cache.partitioned.FetchKeysMessage;
 import org.apache.geode.internal.cache.partitioned.colocation.ColocationLoggerFactory;
 import org.apache.geode.internal.cache.tier.sockets.ServerConnection;
@@ -208,6 +213,66 @@ public class PartitionedRegionTest {
   }
 
   @Test
+  public void clearShouldNotThrowUnsupportedOperationException() {
+    PartitionedRegion spyPartitionedRegion = spy(partitionedRegion);
+    doNothing().when(spyPartitionedRegion).checkReadiness();
+    doCallRealMethod().when(spyPartitionedRegion).basicClear(any());
+    doNothing().when(spyPartitionedRegion).basicClear(any(), anyBoolean());
+    spyPartitionedRegion.clear();
+  }
+
+  @Test(expected = CacheClosedException.class)
+  public void clearShouldThrowCacheClosedExceptionIfShutdownAll() {
+    PartitionedRegion spyPartitionedRegion = spy(partitionedRegion);
+    RegionEventImpl regionEvent =
+        new RegionEventImpl(spyPartitionedRegion, Operation.REGION_CLEAR, null, false,
+            spyPartitionedRegion.getMyId(), true);
+    when(cache.isCacheAtShutdownAll()).thenReturn(true);
+    when(cache.getCacheClosedException("Cache is shutting down"))
+        .thenReturn(new CacheClosedException("Cache is shutting down"));
+    DistributedLockService lockService = mock(DistributedLockService.class);
+    when(spyPartitionedRegion.getPartitionedRegionLockService()).thenReturn(lockService);
+    String lockName = "_clearOperation" + spyPartitionedRegion.getFullPath().replace('/', '_');
+    when(lockService.lock(lockName, -1, -1)).thenReturn(true);
+    spyPartitionedRegion.basicClear(regionEvent, true);
+  }
+
+  @Test
+  public void createClearPRMessagesShouldCreateMessagePerBucket() {
+    PartitionedRegion spyPartitionedRegion = spy(partitionedRegion);
+    RegionEventImpl regionEvent =
+        new RegionEventImpl(spyPartitionedRegion, Operation.REGION_CLEAR, null, false,
+            spyPartitionedRegion.getMyId(), true);
+    when(spyPartitionedRegion.getTotalNumberOfBuckets()).thenReturn(3);
+    EventID eventID = new EventID(spyPartitionedRegion.getCache().getDistributedSystem());
+    List<ClearPRMessage> msgs = spyPartitionedRegion.createClearPRMessages(eventID);
+    assertThat(msgs.size()).isEqualTo(3);
+  }
+
+  @Test
+  public void sendEachMessagePerBucket() {
+    PartitionedRegion spyPartitionedRegion = spy(partitionedRegion);
+    RegionEventImpl regionEvent =
+        new RegionEventImpl(spyPartitionedRegion, Operation.REGION_CLEAR, null, false,
+            spyPartitionedRegion.getMyId(), true);
+    when(cache.isCacheAtShutdownAll()).thenReturn(false);
+    DistributedLockService lockService = mock(DistributedLockService.class);
+    when(spyPartitionedRegion.getPartitionedRegionLockService()).thenReturn(lockService);
+    when(spyPartitionedRegion.getTotalNumberOfBuckets()).thenReturn(3);
+    String lockName = "_clearOperation" + spyPartitionedRegion.getFullPath().replace('/', '_');
+    when(lockService.lock(lockName, -1, -1)).thenReturn(true);
+    when(spyPartitionedRegion.hasListener()).thenReturn(true);
+    doNothing().when(spyPartitionedRegion).dispatchListenerEvent(any(), any());
+    doNothing().when(spyPartitionedRegion).notifyBridgeClients(eq(regionEvent));
+    doNothing().when(spyPartitionedRegion).checkReadiness();
+    doNothing().when(lockService).unlock(lockName);
+    spyPartitionedRegion.basicClear(regionEvent, true);
+    verify(spyPartitionedRegion, times(3)).sendClearMsgByBucket(any(), any());
+    verify(spyPartitionedRegion, times(1)).dispatchListenerEvent(any(), any());
+    verify(spyPartitionedRegion, times(1)).notifyBridgeClients(eq(regionEvent));
+  }
+
+  @Test
   public void getBucketNodeForReadOrWriteReturnsPrimaryNodeForRegisterInterest() {
     // ARRANGE
     EntryEventImpl clientEvent = mock(EntryEventImpl.class);


[geode] 08/22: GEODE-7676: Conversion of duration to seconds.

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 3522100a000d19ca1655aa4a05cc489cce7c33b3
Author: Nabarun Nag <na...@cs.wisc.edu>
AuthorDate: Mon May 11 12:24:16 2020 -0700

    GEODE-7676: Conversion of duration to seconds.
---
 .../internal/cache/PartitionedRegionClearWithExpirationDUnitTest.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithExpirationDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithExpirationDUnitTest.java
index 33301f4..7f3dff9 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithExpirationDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithExpirationDUnitTest.java
@@ -298,7 +298,7 @@ public class PartitionedRegionClearWithExpirationDUnitTest implements Serializab
   public void clearShouldRemoveRegisteredExpirationTasks(TestVM coordinatorVM,
       RegionShortcut regionShortcut) {
     final int entries = 500;
-    int expirationTime = (int) GeodeAwaitility.getTimeout().getValueInMS() / 1000;
+    int expirationTime = (int) GeodeAwaitility.getTimeout().getSeconds();
     parametrizedSetup(regionShortcut, new ExpirationAttributes(expirationTime, DESTROY));
     populateRegion(accessor, entries, asList(accessor, server1, server2));
 


[geode] 01/22: GEODE-7683: introduce BR.cmnClearRegion

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 09638b896fa813dda35b75096b309d548f2a38ef
Author: zhouxh <gz...@pivotal.io>
AuthorDate: Mon Jan 27 17:02:48 2020 -0800

    GEODE-7683: introduce BR.cmnClearRegion
    
    Co-authored-by: Xiaojian Zhou <gz...@pivotal.io>
    
    GEODE-7684: Create messaging class for PR Clear (#4689)
    
    * Added new message class and test
    
    Co-authored-by: Benjamin Ross <br...@pivotal.io>
    Co-authored-by: Donal Evans <do...@pivotal.io>
---
 .../codeAnalysis/sanctionedDataSerializables.txt   |   8 +
 .../apache/geode/internal/cache/BucketRegion.java  |  34 +-
 .../geode/internal/cache/DistributedRegion.java    |  23 +-
 .../internal/cache/partitioned/ClearPRMessage.java | 388 +++++++++++++++++++++
 .../internal/cache/BucketRegionJUnitTest.java      |  77 ++++
 .../internal/cache/DistributedRegionJUnitTest.java |  18 +
 .../cache/partitioned/ClearPRMessageTest.java      | 288 +++++++++++++++
 .../serialization/DataSerializableFixedID.java     |   3 +
 8 files changed, 831 insertions(+), 8 deletions(-)

diff --git a/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt b/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
index 204be50..8e522a2 100644
--- a/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
+++ b/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
@@ -1376,6 +1376,14 @@ org/apache/geode/internal/cache/partitioned/BucketSizeMessage$BucketSizeReplyMes
 fromData,27
 toData,27
 
+org/apache/geode/internal/cache/partitioned/ClearPRMessage,2
+fromData,30
+toData,44
+
+org/apache/geode/internal/cache/partitioned/ClearPRMessage$ClearReplyMessage,2
+fromData,17
+toData,17
+
 org/apache/geode/internal/cache/partitioned/ColocatedRegionDetails,2
 fromData,81
 toData,133
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
index 8662a6e..d49d3dc 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
@@ -560,6 +560,36 @@ public class BucketRegion extends DistributedRegion implements Bucket {
     }
   }
 
+  @Override
+  public void cmnClearRegion(RegionEventImpl regionEvent, boolean cacheWrite, boolean useRVV) {
+    if (!getBucketAdvisor().isPrimary()) {
+      if (logger.isDebugEnabled()) {
+        logger.debug("Not primary bucket when doing clear, do nothing");
+      }
+      return;
+    }
+
+    boolean enableRVV = useRVV && getConcurrencyChecksEnabled();
+    RegionVersionVector rvv = null;
+    if (enableRVV) {
+      rvv = getVersionVector().getCloneForTransmission();
+    }
+
+    // get rvvLock
+    Set<InternalDistributedMember> participants =
+        getCacheDistributionAdvisor().adviseInvalidateRegion();
+    try {
+      obtainWriteLocksForClear(regionEvent, participants);
+      // no need to dominate my own rvv.
+      // Clear is on going here, there won't be GII for this member
+      clearRegionLocally(regionEvent, cacheWrite, null);
+      distributeClearOperation(regionEvent, rvv, participants);
+
+      // TODO: call reindexUserDataRegion if there're lucene indexes
+    } finally {
+      releaseWriteLocksForClear(regionEvent, participants);
+    }
+  }
 
   long generateTailKey() {
     long key = eventSeqNum.addAndGet(partitionedRegion.getTotalNumberOfBuckets());
@@ -2110,8 +2140,8 @@ public class BucketRegion extends DistributedRegion implements Bucket {
       // counters to 0.
       oldMemValue = bytesInMemory.getAndSet(0);
     } else {
-      throw new InternalGemFireError(
-          "Trying to clear a bucket region that was not destroyed or in initialization.");
+      // BucketRegion's clear is supported now
+      oldMemValue = bytesInMemory.getAndSet(0);
     }
     if (oldMemValue != BUCKET_DESTROYED) {
       partitionedRegion.getPrStats().incDataStoreEntryCount(-sizeBeforeClear);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
index b822dde..489d85a 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
@@ -2013,6 +2013,10 @@ public class DistributedRegion extends LocalRegion implements InternalDistribute
     super.basicClear(regionEvent, cacheWrite);
   }
 
+  void distributeClearOperation(RegionEventImpl regionEvent, RegionVersionVector rvv,
+      Set<InternalDistributedMember> participants) {
+    DistributedClearOperation.clear(regionEvent, rvv, participants);
+  }
 
   @Override
   void cmnClearRegion(RegionEventImpl regionEvent, boolean cacheWrite, boolean useRVV) {
@@ -2035,7 +2039,7 @@ public class DistributedRegion extends LocalRegion implements InternalDistribute
             obtainWriteLocksForClear(regionEvent, participants);
             clearRegionLocally(regionEvent, cacheWrite, null);
             if (!regionEvent.isOriginRemote() && regionEvent.getOperation().isDistributed()) {
-              DistributedClearOperation.clear(regionEvent, null, participants);
+              distributeClearOperation(regionEvent, null, participants);
             }
           } finally {
             releaseWriteLocksForClear(regionEvent, participants);
@@ -2091,10 +2095,12 @@ public class DistributedRegion extends LocalRegion implements InternalDistribute
   /**
    * obtain locks preventing generation of new versions in other members
    */
-  private void obtainWriteLocksForClear(RegionEventImpl regionEvent,
+  protected void obtainWriteLocksForClear(RegionEventImpl regionEvent,
       Set<InternalDistributedMember> participants) {
     lockLocallyForClear(getDistributionManager(), getMyId(), regionEvent);
-    DistributedClearOperation.lockAndFlushToOthers(regionEvent, participants);
+    if (!isUsedForPartitionedRegionBucket()) {
+      DistributedClearOperation.lockAndFlushToOthers(regionEvent, participants);
+    }
   }
 
   /**
@@ -2131,7 +2137,7 @@ public class DistributedRegion extends LocalRegion implements InternalDistribute
   /**
    * releases the locks obtained in obtainWriteLocksForClear
    */
-  private void releaseWriteLocksForClear(RegionEventImpl regionEvent,
+  protected void releaseWriteLocksForClear(RegionEventImpl regionEvent,
       Set<InternalDistributedMember> participants) {
 
     ARMLockTestHook armLockTestHook = getRegionMap().getARMLockTestHook();
@@ -2139,8 +2145,13 @@ public class DistributedRegion extends LocalRegion implements InternalDistribute
       armLockTestHook.beforeRelease(this, regionEvent);
     }
 
-    getVersionVector().unlockForClear(getMyId());
-    DistributedClearOperation.releaseLocks(regionEvent, participants);
+    RegionVersionVector rvv = getVersionVector();
+    if (rvv != null) {
+      rvv.unlockForClear(getMyId());
+    }
+    if (!isUsedForPartitionedRegionBucket()) {
+      DistributedClearOperation.releaseLocks(regionEvent, participants);
+    }
 
     if (armLockTestHook != null) {
       armLockTestHook.afterRelease(this, regionEvent);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java
new file mode 100644
index 0000000..1a8aba1
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java
@@ -0,0 +1,388 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.geode.internal.cache.partitioned;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Set;
+
+import org.apache.logging.log4j.Logger;
+
+import org.apache.geode.DataSerializer;
+import org.apache.geode.annotations.VisibleForTesting;
+import org.apache.geode.cache.CacheException;
+import org.apache.geode.distributed.DistributedLockService;
+import org.apache.geode.distributed.DistributedMember;
+import org.apache.geode.distributed.internal.ClusterDistributionManager;
+import org.apache.geode.distributed.internal.DirectReplyProcessor;
+import org.apache.geode.distributed.internal.DistributionManager;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.ReplyException;
+import org.apache.geode.distributed.internal.ReplyMessage;
+import org.apache.geode.distributed.internal.ReplyProcessor21;
+import org.apache.geode.distributed.internal.ReplySender;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.Assert;
+import org.apache.geode.internal.InternalDataSerializer;
+import org.apache.geode.internal.NanoTimer;
+import org.apache.geode.internal.cache.BucketRegion;
+import org.apache.geode.internal.cache.EventID;
+import org.apache.geode.internal.cache.ForceReattemptException;
+import org.apache.geode.internal.cache.PartitionedRegion;
+import org.apache.geode.internal.cache.PartitionedRegionHelper;
+import org.apache.geode.internal.cache.RegionEventImpl;
+import org.apache.geode.internal.logging.log4j.LogMarker;
+import org.apache.geode.internal.serialization.DeserializationContext;
+import org.apache.geode.internal.serialization.SerializationContext;
+import org.apache.geode.logging.internal.log4j.api.LogService;
+
+public class ClearPRMessage extends PartitionMessageWithDirectReply {
+  private static final Logger logger = LogService.getLogger();
+
+  private RegionEventImpl regionEvent;
+
+  private Integer bucketId;
+
+  /** The time in ms to wait for a lock to be obtained during doLocalClear() */
+  public static final int LOCK_WAIT_TIMEOUT_MS = 1000;
+  public static final String BUCKET_NON_PRIMARY_MESSAGE =
+      "The bucket region on target member is no longer primary";
+  public static final String BUCKET_REGION_LOCK_UNAVAILABLE_MESSAGE =
+      "A lock for the bucket region could not be obtained.";
+  public static final String EXCEPTION_THROWN_DURING_CLEAR_OPERATION =
+      "An exception was thrown during the local clear operation: ";
+
+  /**
+   * state from operateOnRegion that must be preserved for transmission from the waiting pool
+   */
+  transient boolean result = false;
+
+  /**
+   * Empty constructor to satisfy {@link DataSerializer}requirements
+   */
+  public ClearPRMessage() {}
+
+  public ClearPRMessage(int bucketId) {
+    this.bucketId = bucketId;
+
+    // These are both used by the parent class, but don't apply to this message type
+    this.notificationOnly = false;
+    this.posDup = false;
+  }
+
+  public void setRegionEvent(RegionEventImpl event) {
+    regionEvent = event;
+  }
+
+  public void initMessage(PartitionedRegion region, Set<InternalDistributedMember> recipients,
+      DirectReplyProcessor replyProcessor) {
+    this.resetRecipients();
+    if (recipients != null) {
+      setRecipients(recipients);
+    }
+    this.regionId = region.getPRId();
+    this.processor = replyProcessor;
+    this.processorId = replyProcessor == null ? 0 : replyProcessor.getProcessorId();
+    if (replyProcessor != null) {
+      replyProcessor.enableSevereAlertProcessing();
+    }
+  }
+
+  @Override
+  public boolean isSevereAlertCompatible() {
+    // allow forced-disconnect processing for all cache op messages
+    return true;
+  }
+
+  public RegionEventImpl getRegionEvent() {
+    return regionEvent;
+  }
+
+  public ClearResponse send(DistributedMember recipient, PartitionedRegion region)
+      throws ForceReattemptException {
+    Set<InternalDistributedMember> recipients =
+        Collections.singleton((InternalDistributedMember) recipient);
+    ClearResponse clearResponse = new ClearResponse(region.getSystem(), recipients);
+    initMessage(region, recipients, clearResponse);
+    if (logger.isDebugEnabled()) {
+      logger.debug("ClearPRMessage.send: recipient is {}, msg is {}", recipient, this);
+    }
+
+    Set<InternalDistributedMember> failures = region.getDistributionManager().putOutgoing(this);
+    if (failures != null && failures.size() > 0) {
+      throw new ForceReattemptException("Failed sending <" + this + ">");
+    }
+    return clearResponse;
+  }
+
+  @Override
+  public int getDSFID() {
+    return PR_CLEAR_MESSAGE;
+  }
+
+  @Override
+  public void toData(DataOutput out, SerializationContext context) throws IOException {
+    super.toData(out, context);
+    if (bucketId == null) {
+      InternalDataSerializer.writeSignedVL(-1, out);
+    } else {
+      InternalDataSerializer.writeSignedVL(bucketId, out);
+    }
+    DataSerializer.writeObject(regionEvent, out);
+  }
+
+  @Override
+  public void fromData(DataInput in, DeserializationContext context)
+      throws IOException, ClassNotFoundException {
+    super.fromData(in, context);
+    this.bucketId = (int) InternalDataSerializer.readSignedVL(in);
+    this.regionEvent = DataSerializer.readObject(in);
+  }
+
+  @Override
+  public EventID getEventID() {
+    return regionEvent.getEventId();
+  }
+
+  /**
+   * This method is called upon receipt and make the desired changes to the PartitionedRegion Note:
+   * It is very important that this message does NOT cause any deadlocks as the sender will wait
+   * indefinitely for the acknowledgement
+   */
+  @Override
+  @VisibleForTesting
+  protected boolean operateOnPartitionedRegion(ClusterDistributionManager distributionManager,
+      PartitionedRegion region, long startTime) {
+    try {
+      result = doLocalClear(region);
+    } catch (ForceReattemptException ex) {
+      sendReply(getSender(), getProcessorId(), distributionManager, new ReplyException(ex), region,
+          startTime);
+      return false;
+    }
+    sendReply(getSender(), getProcessorId(), distributionManager, null, region, startTime);
+    return false;
+  }
+
+  public boolean doLocalClear(PartitionedRegion region) throws ForceReattemptException {
+    // Retrieve local bucket region which matches target bucketId
+    BucketRegion bucketRegion = region.getDataStore().getInitializedBucketForId(null, bucketId);
+
+    // Check if we are primary, throw exception if not
+    if (!bucketRegion.isPrimary()) {
+      throw new ForceReattemptException(BUCKET_NON_PRIMARY_MESSAGE);
+    }
+
+    DistributedLockService lockService = getPartitionRegionLockService();
+    String lockName = bucketRegion.getFullPath();
+    try {
+      boolean locked = lockService.lock(lockName, LOCK_WAIT_TIMEOUT_MS, -1);
+
+      if (!locked) {
+        throw new ForceReattemptException(BUCKET_REGION_LOCK_UNAVAILABLE_MESSAGE);
+      }
+
+      // Double check if we are still primary, as this could have changed between our first check
+      // and obtaining the lock
+      if (!bucketRegion.isPrimary()) {
+        throw new ForceReattemptException(BUCKET_NON_PRIMARY_MESSAGE);
+      }
+
+      try {
+        bucketRegion.cmnClearRegion(regionEvent, true, true);
+      } catch (Exception ex) {
+        throw new ForceReattemptException(
+            EXCEPTION_THROWN_DURING_CLEAR_OPERATION + ex.getClass().getName(), ex);
+      }
+
+    } finally {
+      lockService.unlock(lockName);
+    }
+
+    return true;
+  }
+
+  // Extracted for testing
+  protected DistributedLockService getPartitionRegionLockService() {
+    return DistributedLockService
+        .getServiceNamed(PartitionedRegionHelper.PARTITION_LOCK_SERVICE_NAME);
+  }
+
+  @Override
+  public boolean canStartRemoteTransaction() {
+    return false;
+  }
+
+  @Override
+  protected void sendReply(InternalDistributedMember member, int processorId,
+      DistributionManager distributionManager, ReplyException ex,
+      PartitionedRegion partitionedRegion, long startTime) {
+    if (partitionedRegion != null) {
+      if (startTime > 0) {
+        partitionedRegion.getPrStats().endPartitionMessagesProcessing(startTime);
+      }
+    }
+    ClearReplyMessage.send(member, processorId, getReplySender(distributionManager), this.result,
+        ex);
+  }
+
+  @Override
+  protected void appendFields(StringBuilder buff) {
+    super.appendFields(buff);
+    buff.append("; bucketId=").append(this.bucketId);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder buff = new StringBuilder();
+    String className = getClass().getName();
+    buff.append(className.substring(className.indexOf(PN_TOKEN) + PN_TOKEN.length())); // partition.<foo>
+    buff.append("(prid="); // make sure this is the first one
+    buff.append(this.regionId);
+
+    // Append name, if we have it
+    String name = null;
+    try {
+      PartitionedRegion region = PartitionedRegion.getPRFromId(this.regionId);
+      if (region != null) {
+        name = region.getFullPath();
+      }
+    } catch (Exception ignore) {
+      /* ignored */
+    }
+    if (name != null) {
+      buff.append(" (name = \"").append(name).append("\")");
+    }
+
+    appendFields(buff);
+    buff.append(" ,distTx=");
+    buff.append(this.isTransactionDistributed);
+    buff.append(")");
+    return buff.toString();
+  }
+
+  public static class ClearReplyMessage extends ReplyMessage {
+    /** Result of the Clear operation */
+    boolean result;
+
+    @Override
+    public boolean getInlineProcess() {
+      return true;
+    }
+
+    /**
+     * Empty constructor to conform to DataSerializable interface
+     */
+    @SuppressWarnings("unused")
+    public ClearReplyMessage() {}
+
+    private ClearReplyMessage(int processorId, boolean result, ReplyException ex) {
+      super();
+      this.result = result;
+      setProcessorId(processorId);
+      setException(ex);
+    }
+
+    /** Send an ack */
+    public static void send(InternalDistributedMember recipient, int processorId,
+        ReplySender replySender,
+        boolean result, ReplyException ex) {
+      Assert.assertTrue(recipient != null, "ClearReplyMessage NULL reply message");
+      ClearReplyMessage message = new ClearReplyMessage(processorId, result, ex);
+      message.setRecipient(recipient);
+      replySender.putOutgoing(message);
+    }
+
+    /**
+     * Processes this message. This method is invoked by the receiver of the message.
+     *
+     * @param distributionManager the distribution manager that is processing the message.
+     */
+    @Override
+    public void process(final DistributionManager distributionManager,
+        final ReplyProcessor21 replyProcessor) {
+      final long startTime = getTimestamp();
+      if (replyProcessor == null) {
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE, "{}: processor not found", this);
+        }
+        return;
+      }
+      if (replyProcessor instanceof ClearResponse) {
+        ((ClearResponse) replyProcessor).setResponse(this);
+      }
+      replyProcessor.process(this);
+
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "{} processed {}", replyProcessor, this);
+      }
+      distributionManager.getStats().incReplyMessageTime(NanoTimer.getTime() - startTime);
+    }
+
+    @Override
+    public int getDSFID() {
+      return PR_CLEAR_REPLY_MESSAGE;
+    }
+
+    @Override
+    public void fromData(DataInput in,
+        DeserializationContext context) throws IOException, ClassNotFoundException {
+      super.fromData(in, context);
+      this.result = in.readBoolean();
+    }
+
+    @Override
+    public void toData(DataOutput out,
+        SerializationContext context) throws IOException {
+      super.toData(out, context);
+      out.writeBoolean(this.result);
+    }
+
+    @Override
+    public String toString() {
+      return "ClearReplyMessage " + "processorid=" + this.processorId + " returning " + this.result
+          + " exception=" + getException();
+    }
+  }
+
+  /**
+   * A processor to capture the value returned by {@link ClearPRMessage}
+   */
+  public static class ClearResponse extends PartitionResponse {
+    private volatile boolean returnValue;
+
+    public ClearResponse(InternalDistributedSystem distributedSystem,
+        Set<InternalDistributedMember> recipients) {
+      super(distributedSystem, recipients, false);
+    }
+
+    public void setResponse(ClearReplyMessage response) {
+      this.returnValue = response.result;
+    }
+
+    /**
+     * @return the result of the remote clear operation
+     * @throws ForceReattemptException if the peer is no longer available
+     * @throws CacheException if the peer generates an error
+     */
+    public boolean waitForResult() throws CacheException, ForceReattemptException {
+      waitForCacheException();
+      return this.returnValue;
+    }
+  }
+}
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/BucketRegionJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/BucketRegionJUnitTest.java
index 72e6657..c7cf5a6 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/BucketRegionJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/BucketRegionJUnitTest.java
@@ -14,7 +14,9 @@
  */
 package org.apache.geode.internal.cache;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.ArgumentMatchers.anyInt;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Mockito.anyLong;
@@ -31,7 +33,10 @@ import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
+import org.junit.Test;
+
 import org.apache.geode.cache.RegionAttributes;
+import org.apache.geode.internal.cache.versions.RegionVersionVector;
 import org.apache.geode.internal.statistics.StatisticsClock;
 
 public class BucketRegionJUnitTest extends DistributedRegionJUnitTest {
@@ -128,4 +133,76 @@ public class BucketRegionJUnitTest extends DistributedRegionJUnitTest {
     }
   }
 
+  @Test
+  public void cmnClearRegionWillDoNothingIfNotPrimary() {
+    RegionEventImpl event = createClearRegionEvent();
+    BucketRegion region = (BucketRegion) event.getRegion();
+    BucketAdvisor ba = mock(BucketAdvisor.class);
+    RegionVersionVector rvv = mock(RegionVersionVector.class);
+    doReturn(rvv).when(region).getVersionVector();
+    doReturn(ba).when(region).getBucketAdvisor();
+    when(ba.isPrimary()).thenReturn(false);
+    region.cmnClearRegion(event, true, true);
+    verify(region, never()).clearRegionLocally(eq(event), eq(true), eq(rvv));
+  }
+
+  @Test
+  public void cmnClearRegionCalledOnPrimary() {
+    RegionEventImpl event = createClearRegionEvent();
+    BucketRegion region = (BucketRegion) event.getRegion();
+    BucketAdvisor ba = mock(BucketAdvisor.class);
+    RegionVersionVector rvv = mock(RegionVersionVector.class);
+    doReturn(rvv).when(region).getVersionVector();
+    doReturn(true).when(region).getConcurrencyChecksEnabled();
+    doReturn(ba).when(region).getBucketAdvisor();
+    doNothing().when(region).distributeClearOperation(any(), any(), any());
+    doNothing().when(region).lockLocallyForClear(any(), any(), any());
+    doNothing().when(region).clearRegionLocally(event, true, null);
+    when(ba.isPrimary()).thenReturn(true);
+    region.cmnClearRegion(event, true, true);
+    verify(region, times(1)).clearRegionLocally(eq(event), eq(true), eq(null));
+  }
+
+  @Test
+  public void clearWillUseNullAsRVVWhenConcurrencyCheckDisabled() {
+    RegionEventImpl event = createClearRegionEvent();
+    BucketRegion region = (BucketRegion) event.getRegion();
+    BucketAdvisor ba = mock(BucketAdvisor.class);
+    doReturn(false).when(region).getConcurrencyChecksEnabled();
+    doReturn(ba).when(region).getBucketAdvisor();
+    doNothing().when(region).distributeClearOperation(any(), any(), any());
+    doNothing().when(region).lockLocallyForClear(any(), any(), any());
+    doNothing().when(region).clearRegionLocally(event, true, null);
+    when(ba.isPrimary()).thenReturn(true);
+    region.cmnClearRegion(event, true, true);
+    verify(region, times(1)).clearRegionLocally(eq(event), eq(true), eq(null));
+  }
+
+  @Test
+  public void obtainWriteLocksForClearInBRShouldNotDistribute() {
+    RegionEventImpl event = createClearRegionEvent();
+    BucketRegion region = (BucketRegion) event.getRegion();
+    doNothing().when(region).lockLocallyForClear(any(), any(), any());
+    region.obtainWriteLocksForClear(event, null);
+    assertTrue(region.isUsedForPartitionedRegionBucket());
+  }
+
+  @Test
+  public void updateSizeToZeroOnClearBucketRegion() {
+    RegionEventImpl event = createClearRegionEvent();
+    BucketRegion region = (BucketRegion) event.getRegion();
+    PartitionedRegion pr = region.getPartitionedRegion();
+    PartitionedRegionDataStore prds = mock(PartitionedRegionDataStore.class);
+    PartitionedRegionStats prStats = mock(PartitionedRegionStats.class);
+    when(pr.getPrStats()).thenReturn(prStats);
+    doNothing().when(prStats).incDataStoreEntryCount(anyInt());
+    doNothing().when(prds).updateMemoryStats(anyInt());
+    when(pr.getDataStore()).thenReturn(prds);
+    region.updateSizeOnCreate("key1", 20);
+    long sizeBeforeClear = region.getTotalBytes();
+    assertEquals(20, sizeBeforeClear);
+    region.updateSizeOnClearRegion((int) sizeBeforeClear);
+    long sizeAfterClear = region.getTotalBytes();
+    assertEquals(0, sizeAfterClear);
+  }
 }
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/DistributedRegionJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/DistributedRegionJUnitTest.java
index 9fbd8fc..ca53ced 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/DistributedRegionJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/DistributedRegionJUnitTest.java
@@ -14,6 +14,7 @@
  */
 package org.apache.geode.internal.cache;
 
+import static org.apache.geode.internal.Assert.fail;
 import static org.apache.geode.internal.statistics.StatisticsClockFactory.disabledClock;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.Assert.assertFalse;
@@ -53,6 +54,14 @@ public class DistributedRegionJUnitTest
   @Override
   protected void setInternalRegionArguments(InternalRegionArguments ira) {}
 
+  protected RegionEventImpl createClearRegionEvent() {
+    DistributedRegion region = prepare(true, true);
+    DistributedMember member = mock(DistributedMember.class);
+    RegionEventImpl regionEvent = new RegionEventImpl(region, Operation.REGION_CLEAR, null, false,
+        member, true);
+    return regionEvent;
+  }
+
   @Override
   protected DistributedRegion createAndDefineRegion(boolean isConcurrencyChecksEnabled,
       RegionAttributes ra, InternalRegionArguments ira, GemFireCacheImpl cache,
@@ -246,4 +255,13 @@ public class DistributedRegionJUnitTest
     region.basicBridgeReplace("key1", "value1", false, null, client, true, clientEvent);
     assertThat(clientEvent.getVersionTag().equals(tag));
   }
+
+  @Test(expected = UnsupportedOperationException.class)
+  public void localClearIsNotSupportedOnReplicatedRegion() {
+    RegionEventImpl event = createClearRegionEvent();
+    DistributedRegion region = (DistributedRegion) event.getRegion();
+    region.basicLocalClear(event);
+    fail("Expect UnsupportedOperationException");
+  }
+
 }
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/ClearPRMessageTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/ClearPRMessageTest.java
new file mode 100644
index 0000000..2cf5231
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/ClearPRMessageTest.java
@@ -0,0 +1,288 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache.partitioned;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
+import static org.mockito.ArgumentMatchers.anyInt;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.ArgumentMatchers.notNull;
+import static org.mockito.Mockito.RETURNS_DEEP_STUBS;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.geode.distributed.DistributedLockService;
+import org.apache.geode.distributed.internal.ClusterDistributionManager;
+import org.apache.geode.distributed.internal.DMStats;
+import org.apache.geode.distributed.internal.DistributionManager;
+import org.apache.geode.distributed.internal.ReplyException;
+import org.apache.geode.distributed.internal.ReplySender;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.cache.BucketRegion;
+import org.apache.geode.internal.cache.ForceReattemptException;
+import org.apache.geode.internal.cache.PartitionedRegion;
+import org.apache.geode.internal.cache.PartitionedRegionDataStore;
+import org.apache.geode.internal.cache.PartitionedRegionStats;
+
+public class ClearPRMessageTest {
+
+  ClearPRMessage message;
+  PartitionedRegion region;
+  PartitionedRegionDataStore dataStore;
+  BucketRegion bucketRegion;
+
+  @Before
+  public void setup() throws ForceReattemptException {
+    message = spy(new ClearPRMessage());
+    region = mock(PartitionedRegion.class, RETURNS_DEEP_STUBS);
+    dataStore = mock(PartitionedRegionDataStore.class);
+    when(region.getDataStore()).thenReturn(dataStore);
+    bucketRegion = mock(BucketRegion.class);
+    when(dataStore.getInitializedBucketForId(any(), any())).thenReturn(bucketRegion);
+  }
+
+  @Test
+  public void doLocalClearThrowsExceptionWhenBucketIsNotPrimaryAtFirstCheck() {
+    when(bucketRegion.isPrimary()).thenReturn(false);
+
+    assertThatThrownBy(() -> message.doLocalClear(region))
+        .isInstanceOf(ForceReattemptException.class)
+        .hasMessageContaining(ClearPRMessage.BUCKET_NON_PRIMARY_MESSAGE);
+  }
+
+  @Test
+  public void doLocalClearThrowsExceptionWhenLockCannotBeObtained() {
+    DistributedLockService mockLockService = mock(DistributedLockService.class);
+    doReturn(mockLockService).when(message).getPartitionRegionLockService();
+
+    when(mockLockService.lock(anyString(), anyLong(), anyLong())).thenReturn(false);
+    when(bucketRegion.isPrimary()).thenReturn(true);
+
+    assertThatThrownBy(() -> message.doLocalClear(region))
+        .isInstanceOf(ForceReattemptException.class)
+        .hasMessageContaining(ClearPRMessage.BUCKET_REGION_LOCK_UNAVAILABLE_MESSAGE);
+  }
+
+  @Test
+  public void doLocalClearThrowsExceptionWhenBucketIsNotPrimaryAfterObtainingLock() {
+    DistributedLockService mockLockService = mock(DistributedLockService.class);
+    doReturn(mockLockService).when(message).getPartitionRegionLockService();
+
+    // Be primary on the first check, then be not primary on the second check
+    when(bucketRegion.isPrimary()).thenReturn(true).thenReturn(false);
+    when(mockLockService.lock(any(), anyLong(), anyLong())).thenReturn(true);
+
+    assertThatThrownBy(() -> message.doLocalClear(region))
+        .isInstanceOf(ForceReattemptException.class)
+        .hasMessageContaining(ClearPRMessage.BUCKET_NON_PRIMARY_MESSAGE);
+    // Confirm that we actually obtained and released the lock
+    verify(mockLockService, times(1)).lock(any(), anyLong(), anyLong());
+    verify(mockLockService, times(1)).unlock(any());
+  }
+
+  @Test
+  public void doLocalClearThrowsForceReattemptExceptionWhenAnExceptionIsThrownDuringClearOperation() {
+    DistributedLockService mockLockService = mock(DistributedLockService.class);
+    doReturn(mockLockService).when(message).getPartitionRegionLockService();
+    NullPointerException exception = new NullPointerException("Error encountered");
+    doThrow(exception).when(bucketRegion).cmnClearRegion(any(), anyBoolean(), anyBoolean());
+
+    // Be primary on the first check, then be not primary on the second check
+    when(bucketRegion.isPrimary()).thenReturn(true);
+    when(mockLockService.lock(any(), anyLong(), anyLong())).thenReturn(true);
+
+    assertThatThrownBy(() -> message.doLocalClear(region))
+        .isInstanceOf(ForceReattemptException.class)
+        .hasMessageContaining(ClearPRMessage.EXCEPTION_THROWN_DURING_CLEAR_OPERATION);
+
+    // Confirm that cmnClearRegion was called
+    verify(bucketRegion, times(1)).cmnClearRegion(any(), anyBoolean(), anyBoolean());
+  }
+
+  @Test
+  public void doLocalClearInvokesCmnClearRegionWhenBucketIsPrimaryAndLockIsObtained()
+      throws ForceReattemptException {
+    DistributedLockService mockLockService = mock(DistributedLockService.class);
+    doReturn(mockLockService).when(message).getPartitionRegionLockService();
+
+
+    // Be primary on the first check, then be not primary on the second check
+    when(bucketRegion.isPrimary()).thenReturn(true);
+    when(mockLockService.lock(any(), anyLong(), anyLong())).thenReturn(true);
+    assertThat(message.doLocalClear(region)).isTrue();
+
+    // Confirm that cmnClearRegion was called
+    verify(bucketRegion, times(1)).cmnClearRegion(any(), anyBoolean(), anyBoolean());
+
+    // Confirm that we actually obtained and released the lock
+    verify(mockLockService, times(1)).lock(any(), anyLong(), anyLong());
+    verify(mockLockService, times(1)).unlock(any());
+  }
+
+  @Test
+  public void initMessageSetsReplyProcessorCorrectlyWithDefinedReplyProcessor() {
+    InternalDistributedMember sender = mock(InternalDistributedMember.class);
+
+    Set<InternalDistributedMember> recipients = new HashSet<>();
+    recipients.add(sender);
+
+    ClearPRMessage.ClearResponse mockProcessor = mock(ClearPRMessage.ClearResponse.class);
+    int mockProcessorId = 5;
+    when(mockProcessor.getProcessorId()).thenReturn(mockProcessorId);
+
+    message.initMessage(region, recipients, mockProcessor);
+
+    verify(mockProcessor, times(1)).enableSevereAlertProcessing();
+    assertThat(message.getProcessorId()).isEqualTo(mockProcessorId);
+  }
+
+  @Test
+  public void initMessageSetsProcessorIdToZeroWithNullProcessor() {
+    message.initMessage(region, null, null);
+
+    assertThat(message.getProcessorId()).isEqualTo(0);
+  }
+
+  @Test
+  public void sendThrowsExceptionIfPutOutgoingMethodReturnsNonNullSetOfFailures() {
+    InternalDistributedMember recipient = mock(InternalDistributedMember.class);
+
+    DistributionManager distributionManager = mock(DistributionManager.class);
+    when(region.getDistributionManager()).thenReturn(distributionManager);
+
+    doNothing().when(message).initMessage(any(), any(), any());
+    Set<InternalDistributedMember> failures = new HashSet<>();
+    failures.add(recipient);
+
+    when(distributionManager.putOutgoing(message)).thenReturn(failures);
+
+    assertThatThrownBy(() -> message.send(recipient, region))
+        .isInstanceOf(ForceReattemptException.class)
+        .hasMessageContaining("Failed sending <" + message + ">");
+  }
+
+  @SuppressWarnings("ResultOfMethodCallIgnored")
+  @Test
+  public void operateOnPartitionedRegionCallsSendReplyWithNoExceptionWhenDoLocalClearSucceeds()
+      throws ForceReattemptException {
+    ClusterDistributionManager distributionManager = mock(ClusterDistributionManager.class);
+    InternalDistributedMember sender = mock(InternalDistributedMember.class);
+    int processorId = 1000;
+    int startTime = 0;
+
+    doReturn(true).when(message).doLocalClear(region);
+    doReturn(sender).when(message).getSender();
+    doReturn(processorId).when(message).getProcessorId();
+
+    // We don't want to deal with mocking the behavior of sendReply() in this test, so we mock it to
+    // do nothing and verify later that it was called with proper input
+    doNothing().when(message).sendReply(any(), anyInt(), any(), any(), any(), anyLong());
+
+    message.operateOnPartitionedRegion(distributionManager, region, startTime);
+
+    verify(message, times(1)).sendReply(sender, processorId, distributionManager, null, region,
+        startTime);
+  }
+
+  @SuppressWarnings("ResultOfMethodCallIgnored")
+  @Test
+  public void operateOnPartitionedRegionCallsSendReplyWithExceptionWhenDoLocalClearFailsWithException()
+      throws ForceReattemptException {
+    ClusterDistributionManager distributionManager = mock(ClusterDistributionManager.class);
+    InternalDistributedMember sender = mock(InternalDistributedMember.class);
+    int processorId = 1000;
+    int startTime = 0;
+    ForceReattemptException exception =
+        new ForceReattemptException(ClearPRMessage.BUCKET_NON_PRIMARY_MESSAGE);
+
+    doThrow(exception).when(message).doLocalClear(region);
+    doReturn(sender).when(message).getSender();
+    doReturn(processorId).when(message).getProcessorId();
+
+    // We don't want to deal with mocking the behavior of sendReply() in this test, so we mock it to
+    // do nothing and verify later that it was called with proper input
+    doNothing().when(message).sendReply(any(), anyInt(), any(), any(), any(), anyLong());
+
+    message.operateOnPartitionedRegion(distributionManager, region, startTime);
+
+    verify(message, times(1)).sendReply(any(), anyInt(), any(), notNull(), any(), anyLong());
+  }
+
+  @Test
+  public void sendReplyEndsMessageProcessingIfWeHaveARegionAndHaveStartedProcessing() {
+    DistributionManager distributionManager = mock(DistributionManager.class);
+    InternalDistributedMember recipient = mock(InternalDistributedMember.class);
+    PartitionedRegionStats partitionedRegionStats = mock(PartitionedRegionStats.class);
+    when(region.getPrStats()).thenReturn(partitionedRegionStats);
+
+    int processorId = 1000;
+    int startTime = 10000;
+    ReplyException exception = new ReplyException(ClearPRMessage.BUCKET_NON_PRIMARY_MESSAGE);
+
+    ReplySender replySender = mock(ReplySender.class);
+    doReturn(replySender).when(message).getReplySender(distributionManager);
+
+    message.sendReply(recipient, processorId, distributionManager, exception, region, startTime);
+
+    verify(partitionedRegionStats, times(1)).endPartitionMessagesProcessing(startTime);
+  }
+
+  @Test
+  public void sendReplyDoesNotEndMessageProcessingIfStartTimeIsZero() {
+    DistributionManager distributionManager = mock(DistributionManager.class);
+    InternalDistributedMember recipient = mock(InternalDistributedMember.class);
+    PartitionedRegionStats partitionedRegionStats = mock(PartitionedRegionStats.class);
+    when(region.getPrStats()).thenReturn(partitionedRegionStats);
+
+    int processorId = 1000;
+    int startTime = 0;
+    ReplyException exception = new ReplyException(ClearPRMessage.BUCKET_NON_PRIMARY_MESSAGE);
+
+    ReplySender replySender = mock(ReplySender.class);
+    doReturn(replySender).when(message).getReplySender(distributionManager);
+
+    message.sendReply(recipient, processorId, distributionManager, exception, region, startTime);
+
+    verify(partitionedRegionStats, times(0)).endPartitionMessagesProcessing(startTime);
+  }
+
+  @Test
+  public void clearReplyMessageProcessCallsSetResponseIfReplyProcessorIsInstanceOfClearResponse() {
+    DistributionManager distributionManager = mock(DistributionManager.class);
+    DMStats mockStats = mock(DMStats.class);
+    when(distributionManager.getStats()).thenReturn(mockStats);
+    ClearPRMessage.ClearReplyMessage clearReplyMessage = new ClearPRMessage.ClearReplyMessage();
+    ClearPRMessage.ClearResponse mockProcessor = mock(ClearPRMessage.ClearResponse.class);
+
+    clearReplyMessage.process(distributionManager, mockProcessor);
+
+    verify(mockProcessor, times(1)).setResponse(clearReplyMessage);
+  }
+}
diff --git a/geode-serialization/src/main/java/org/apache/geode/internal/serialization/DataSerializableFixedID.java b/geode-serialization/src/main/java/org/apache/geode/internal/serialization/DataSerializableFixedID.java
index ff1571c..3598b5d 100644
--- a/geode-serialization/src/main/java/org/apache/geode/internal/serialization/DataSerializableFixedID.java
+++ b/geode-serialization/src/main/java/org/apache/geode/internal/serialization/DataSerializableFixedID.java
@@ -57,6 +57,9 @@ public interface DataSerializableFixedID extends SerializationVersions, BasicSer
   // NOTE, codes < -65536 will take 4 bytes to serialize
   // NOTE, codes < -128 will take 2 bytes to serialize
 
+  short PR_CLEAR_REPLY_MESSAGE = -164;
+  short PR_CLEAR_MESSAGE = -163;
+
   short DISTRIBUTED_PING_MESSAGE = -162;
 
   short REGION_REDUNDANCY_STATUS = -161;


[geode] 02/22: GEODE-7682: add PR.clear API (#4755)

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 04df003f548e3c7d8006872d7cfba97e61359a28
Author: Xiaojian Zhou <ge...@users.noreply.github.com>
AuthorDate: Thu Mar 5 23:46:36 2020 -0800

    GEODE-7682: add PR.clear  API (#4755)
    
    * GEODE-7683: introduce BR.cmnClearRegion
    
    Co-authored-by: Xiaojian Zhou <gz...@pivotal.io>
---
 .../cache/PartitionedRegionClearDUnitTest.java     | 218 +++++++++++++++++++++
 .../PartitionedRegionPersistentClearDUnitTest.java |  26 +++
 ...itionedRegionSingleNodeOperationsJUnitTest.java |  66 -------
 .../codeAnalysis/sanctionedDataSerializables.txt   |   4 +-
 .../org/apache/geode/internal/DSFIDFactory.java    |   3 +
 .../geode/internal/cache/DistributedRegion.java    |   9 -
 .../apache/geode/internal/cache/LocalRegion.java   |  10 +
 .../geode/internal/cache/PartitionedRegion.java    | 214 ++++++++++++++++++--
 .../geode/internal/cache/RegionEventImpl.java      |   5 +
 .../internal/cache/partitioned/ClearPRMessage.java | 166 +++++-----------
 .../cache/partitioned/ClearPRMessageTest.java      |  50 ++---
 11 files changed, 522 insertions(+), 249 deletions(-)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java
new file mode 100644
index 0000000..fb2a81b
--- /dev/null
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java
@@ -0,0 +1,218 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.internal.Assert.fail;
+import static org.apache.geode.test.dunit.rules.ClusterStartupRule.getCache;
+import static org.apache.geode.test.dunit.rules.ClusterStartupRule.getClientCache;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.Serializable;
+import java.util.Properties;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.IntStream;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.InterestResultPolicy;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionEvent;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.client.ClientRegionShortcut;
+import org.apache.geode.cache.util.CacheListenerAdapter;
+import org.apache.geode.test.dunit.SerializableCallableIF;
+import org.apache.geode.test.dunit.rules.ClientVM;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+
+public class PartitionedRegionClearDUnitTest implements Serializable {
+  protected static final String REGION_NAME = "testPR";
+  protected static final int NUM_ENTRIES = 1000;
+
+  protected int locatorPort;
+  protected MemberVM locator;
+  protected MemberVM dataStore1, dataStore2, dataStore3, accessor;
+  protected ClientVM client1, client2;
+
+  private static final Logger logger = LogManager.getLogger();
+
+  @Rule
+  public ClusterStartupRule cluster = new ClusterStartupRule(7);
+
+  @Before
+  public void setUp() throws Exception {
+    locator = cluster.startLocatorVM(0);
+    locatorPort = locator.getPort();
+    dataStore1 = cluster.startServerVM(1, getProperties(), locatorPort);
+    dataStore2 = cluster.startServerVM(2, getProperties(), locatorPort);
+    dataStore3 = cluster.startServerVM(3, getProperties(), locatorPort);
+    accessor = cluster.startServerVM(4, getProperties(), locatorPort);
+    client1 = cluster.startClientVM(5,
+        c -> c.withPoolSubscription(true).withLocatorConnection((locatorPort)));
+    client2 = cluster.startClientVM(6,
+        c -> c.withPoolSubscription(true).withLocatorConnection((locatorPort)));
+    dataStore1.invoke(this::initDataStore);
+    dataStore2.invoke(this::initDataStore);
+    dataStore3.invoke(this::initDataStore);
+    accessor.invoke(this::initAccessor);
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+  }
+
+  protected RegionShortcut getRegionShortCut() {
+    return RegionShortcut.PARTITION_REDUNDANT;
+  }
+
+  protected Properties getProperties() {
+    Properties properties = new Properties();
+    properties.setProperty("log-level", "info");
+    return properties;
+  }
+
+  private Region getRegion(boolean isClient) {
+    if (isClient) {
+      return getClientCache().getRegion(REGION_NAME);
+    } else {
+      return getCache().getRegion(REGION_NAME);
+    }
+  }
+
+  private void verifyRegionSize(boolean isClient, int expectedNum) {
+    assertThat(getRegion(isClient).size()).isEqualTo(expectedNum);
+  }
+
+  private void initClientCache() {
+    Region region = getClientCache().createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
+        .create(REGION_NAME);
+    region.registerInterestForAllKeys(InterestResultPolicy.KEYS);
+  }
+
+  private void initDataStore() {
+    getCache().createRegionFactory(getRegionShortCut())
+        .setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(10).create())
+        .addCacheListener(new CountingCacheListener())
+        .create(REGION_NAME);
+  }
+
+  private void initAccessor() {
+    RegionShortcut shortcut = getRegionShortCut();
+    if (shortcut.isPersistent()) {
+      if (shortcut == RegionShortcut.PARTITION_PERSISTENT) {
+        shortcut = RegionShortcut.PARTITION;
+      } else if (shortcut == RegionShortcut.PARTITION_PERSISTENT_OVERFLOW) {
+        shortcut = RegionShortcut.PARTITION_OVERFLOW;
+      } else if (shortcut == RegionShortcut.PARTITION_REDUNDANT_PERSISTENT) {
+        shortcut = RegionShortcut.PARTITION_REDUNDANT;
+      } else if (shortcut == RegionShortcut.PARTITION_REDUNDANT_PERSISTENT_OVERFLOW) {
+        shortcut = RegionShortcut.PARTITION_REDUNDANT_OVERFLOW;
+      } else {
+        fail("Wrong region type:" + shortcut);
+      }
+    }
+    getCache().createRegionFactory(shortcut)
+        .setPartitionAttributes(
+            new PartitionAttributesFactory().setTotalNumBuckets(10).setLocalMaxMemory(0).create())
+        .setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(10).create())
+        .addCacheListener(new CountingCacheListener())
+        .create(REGION_NAME);
+  }
+
+  private void feed(boolean isClient) {
+    Region region = getRegion(isClient);
+    IntStream.range(0, NUM_ENTRIES).forEach(i -> region.put(i, "value" + i));
+  }
+
+  private void verifyServerRegionSize(int expectedNum) {
+    accessor.invoke(() -> verifyRegionSize(false, expectedNum));
+    dataStore1.invoke(() -> verifyRegionSize(false, expectedNum));
+    dataStore2.invoke(() -> verifyRegionSize(false, expectedNum));
+    dataStore3.invoke(() -> verifyRegionSize(false, expectedNum));
+  }
+
+  private void verifyClientRegionSize(int expectedNum) {
+    client1.invoke(() -> verifyRegionSize(true, expectedNum));
+    // TODO: notify register clients
+    // client2.invoke(()->verifyRegionSize(true, expectedNum));
+  }
+
+  private void verifyCacheListenerTriggerCount(MemberVM serverVM) {
+    SerializableCallableIF<Integer> getListenerTriggerCount = () -> {
+      CountingCacheListener countingCacheListener =
+          (CountingCacheListener) getRegion(false).getAttributes()
+              .getCacheListeners()[0];
+      return countingCacheListener.getClears();
+    };
+
+    int count = accessor.invoke(getListenerTriggerCount)
+        + dataStore1.invoke(getListenerTriggerCount)
+        + dataStore2.invoke(getListenerTriggerCount)
+        + dataStore3.invoke(getListenerTriggerCount);
+    assertThat(count).isEqualTo(1);
+
+    if (serverVM != null) {
+      assertThat(serverVM.invoke(getListenerTriggerCount)).isEqualTo(1);
+    }
+  }
+
+  @Test
+  public void normalClearFromDataStore() {
+    accessor.invoke(() -> feed(false));
+    verifyServerRegionSize(NUM_ENTRIES);
+    dataStore1.invoke(() -> getRegion(false).clear());
+    verifyServerRegionSize(0);
+    verifyCacheListenerTriggerCount(dataStore1);
+  }
+
+  @Test
+  public void normalClearFromAccessor() {
+    accessor.invoke(() -> feed(false));
+    verifyServerRegionSize(NUM_ENTRIES);
+    accessor.invoke(() -> getRegion(false).clear());
+    verifyServerRegionSize(0);
+    verifyCacheListenerTriggerCount(accessor);
+  }
+
+  @Test
+  public void normalClearFromClient() {
+    client1.invoke(() -> feed(true));
+    verifyClientRegionSize(NUM_ENTRIES);
+    verifyServerRegionSize(NUM_ENTRIES);
+
+    client1.invoke(() -> getRegion(true).clear());
+    verifyServerRegionSize(0);
+    verifyClientRegionSize(0);
+    verifyCacheListenerTriggerCount(null);
+  }
+
+  private static class CountingCacheListener extends CacheListenerAdapter {
+    private final AtomicInteger clears = new AtomicInteger();
+
+    @Override
+    public void afterRegionClear(RegionEvent event) {
+      Region region = event.getRegion();
+      logger.info("Region " + region.getFullPath() + " is cleared.");
+      clears.incrementAndGet();
+    }
+
+    int getClears() {
+      return clears.get();
+    }
+  }
+}
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionPersistentClearDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionPersistentClearDUnitTest.java
new file mode 100644
index 0000000..847699b
--- /dev/null
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionPersistentClearDUnitTest.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+
+
+import org.apache.geode.cache.RegionShortcut;
+
+public class PartitionedRegionPersistentClearDUnitTest extends PartitionedRegionClearDUnitTest {
+
+  protected RegionShortcut getRegionShortCut() {
+    return RegionShortcut.PARTITION_REDUNDANT_PERSISTENT_OVERFLOW;
+  }
+}
diff --git a/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/PartitionedRegionSingleNodeOperationsJUnitTest.java b/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/PartitionedRegionSingleNodeOperationsJUnitTest.java
index b37945b..4f36060 100644
--- a/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/PartitionedRegionSingleNodeOperationsJUnitTest.java
+++ b/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/PartitionedRegionSingleNodeOperationsJUnitTest.java
@@ -25,7 +25,6 @@ import static org.junit.Assert.fail;
 
 import java.util.Arrays;
 import java.util.Collections;
-import java.util.HashMap;
 import java.util.Iterator;
 import java.util.NoSuchElementException;
 import java.util.Set;
@@ -1298,71 +1297,6 @@ public class PartitionedRegionSingleNodeOperationsJUnitTest {
     }
   }
 
-  @Test
-  public void test023UnsupportedOps() throws Exception {
-    Region pr = null;
-    try {
-      pr = PartitionedRegionTestHelper.createPartitionedRegion("testUnsupportedOps",
-          String.valueOf(200), 0);
-
-      pr.put(new Integer(1), "one");
-      pr.put(new Integer(2), "two");
-      pr.put(new Integer(3), "three");
-      pr.getEntry("key");
-
-      try {
-        pr.clear();
-        fail(
-            "PartitionedRegionSingleNodeOperationTest:testUnSupportedOps() operation failed on a blank PartitionedRegion");
-      } catch (UnsupportedOperationException expected) {
-      }
-
-      // try {
-      // pr.entries(true);
-      // fail();
-      // }
-      // catch (UnsupportedOperationException expected) {
-      // }
-
-      // try {
-      // pr.entrySet(true);
-      // fail();
-      // }
-      // catch (UnsupportedOperationException expected) {
-      // }
-
-      try {
-        HashMap data = new HashMap();
-        data.put("foo", "bar");
-        data.put("bing", "bam");
-        data.put("supper", "hero");
-        pr.putAll(data);
-        // fail("testPutAll() does NOT throw UnsupportedOperationException");
-      } catch (UnsupportedOperationException onse) {
-      }
-
-
-      // try {
-      // pr.values();
-      // fail("testValues() does NOT throw UnsupportedOperationException");
-      // }
-      // catch (UnsupportedOperationException expected) {
-      // }
-
-
-      try {
-        pr.containsValue("foo");
-      } catch (UnsupportedOperationException ex) {
-        fail("PartitionedRegionSingleNodeOperationTest:testContainsValue() operation failed");
-      }
-
-    } finally {
-      if (pr != null) {
-        pr.destroyRegion();
-      }
-    }
-  }
-
   /**
    * This method validates size operations. It verifies that it returns correct size of the
    * PartitionedRegion.
diff --git a/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt b/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
index 8e522a2..fb83c84 100644
--- a/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
+++ b/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
@@ -1377,8 +1377,8 @@ fromData,27
 toData,27
 
 org/apache/geode/internal/cache/partitioned/ClearPRMessage,2
-fromData,30
-toData,44
+fromData,19
+toData,36
 
 org/apache/geode/internal/cache/partitioned/ClearPRMessage$ClearReplyMessage,2
 fromData,17
diff --git a/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java b/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java
index 504e7d1..26d92c9 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java
@@ -289,6 +289,7 @@ import org.apache.geode.internal.cache.partitioned.BucketCountLoadProbe;
 import org.apache.geode.internal.cache.partitioned.BucketProfileUpdateMessage;
 import org.apache.geode.internal.cache.partitioned.BucketSizeMessage;
 import org.apache.geode.internal.cache.partitioned.BucketSizeMessage.BucketSizeReplyMessage;
+import org.apache.geode.internal.cache.partitioned.ClearPRMessage;
 import org.apache.geode.internal.cache.partitioned.ContainsKeyValueMessage;
 import org.apache.geode.internal.cache.partitioned.ContainsKeyValueMessage.ContainsKeyValueReplyMessage;
 import org.apache.geode.internal.cache.partitioned.CreateBucketMessage;
@@ -985,6 +986,8 @@ public class DSFIDFactory implements DataSerializableFixedID {
     serializer.registerDSFID(GATEWAY_SENDER_QUEUE_ENTRY_SYNCHRONIZATION_ENTRY,
         GatewaySenderQueueEntrySynchronizationOperation.GatewaySenderQueueEntrySynchronizationEntry.class);
     serializer.registerDSFID(ABORT_BACKUP_REQUEST, AbortBackupRequest.class);
+    serializer.registerDSFID(PR_CLEAR_MESSAGE, ClearPRMessage.class);
+    serializer.registerDSFID(PR_CLEAR_REPLY_MESSAGE, ClearPRMessage.ClearReplyMessage.class);
     serializer.registerDSFID(HOST_AND_PORT, HostAndPort.class);
     serializer.registerDSFID(DISTRIBUTED_PING_MESSAGE, DistributedPingMessage.class);
   }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
index 489d85a..84b5a3b 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
@@ -192,10 +192,6 @@ public class DistributedRegion extends LocalRegion implements InternalDistribute
   @MutableForTesting
   public static boolean ignoreReconnect = false;
 
-  /**
-   * Lock to prevent multiple threads on this member from performing a clear at the same time.
-   */
-  private final Object clearLock = new Object();
   private final ReentrantReadWriteLock failedInitialImageLock = new ReentrantReadWriteLock(true);
 
   @MakeNotStatic
@@ -933,11 +929,6 @@ public class DistributedRegion extends LocalRegion implements InternalDistribute
     }
   }
 
-  private void lockCheckReadiness() {
-    cache.getCancelCriterion().checkCancelInProgress(null);
-    checkReadiness();
-  }
-
   @Override
   Object validatedDestroy(Object key, EntryEventImpl event)
       throws TimeoutException, EntryNotFoundException, CacheWriterException {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
index 4236042..4268786 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
@@ -471,6 +471,11 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
   private final Lock clientMetaDataLock = new ReentrantLock();
 
   /**
+   * Lock to prevent multiple threads on this member from performing a clear at the same time.
+   */
+  protected final Object clearLock = new Object();
+
+  /**
    * Lock for updating the cache service profile for the region.
    */
   private final Lock cacheServiceProfileUpdateLock = new ReentrantLock();
@@ -2748,6 +2753,11 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
     checkRegionDestroyed(true);
   }
 
+  protected void lockCheckReadiness() {
+    cache.getCancelCriterion().checkCancelInProgress(null);
+    checkReadiness();
+  }
+
   /**
    * This method should be called when the caller cannot locate an entry and that condition is
    * unexpected. This will first double check the cache and region state before throwing an
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
index 8411a13..1aa427a 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
@@ -181,6 +181,7 @@ import org.apache.geode.internal.cache.execute.PartitionedRegionFunctionResultWa
 import org.apache.geode.internal.cache.execute.RegionFunctionContextImpl;
 import org.apache.geode.internal.cache.execute.ServerToClientFunctionResultSender;
 import org.apache.geode.internal.cache.ha.ThreadIdentifier;
+import org.apache.geode.internal.cache.partitioned.ClearPRMessage;
 import org.apache.geode.internal.cache.partitioned.ContainsKeyValueMessage;
 import org.apache.geode.internal.cache.partitioned.ContainsKeyValueMessage.ContainsKeyValueResponse;
 import org.apache.geode.internal.cache.partitioned.DestroyMessage;
@@ -2173,18 +2174,202 @@ public class PartitionedRegion extends LocalRegion
     throw new UnsupportedOperationException();
   }
 
-  /**
-   * @since GemFire 5.0
-   * @throws UnsupportedOperationException OVERRIDES
-   */
   @Override
-  public void clear() {
-    throw new UnsupportedOperationException();
+  void basicClear(RegionEventImpl regionEvent, boolean cacheWrite) {
+    final boolean isDebugEnabled = logger.isDebugEnabled();
+    synchronized (clearLock) {
+      final DistributedLockService lockService = getPartitionedRegionLockService();
+      try {
+        lockService.lock("_clearOperation" + this.getFullPath().replace('/', '_'), -1, -1);
+      } catch (IllegalStateException e) {
+        lockCheckReadiness();
+        throw e;
+      }
+      try {
+        if (cache.isCacheAtShutdownAll()) {
+          throw cache.getCacheClosedException("Cache is shutting down");
+        }
+
+        // create ClearPRMessage per bucket
+        List<ClearPRMessage> clearMsgList = createClearPRMessages();
+        for (ClearPRMessage clearPRMessage : clearMsgList) {
+          int bucketId = clearPRMessage.getBucketId();
+          checkReadiness();
+          long sendMessagesStartTime = 0;
+          if (isDebugEnabled) {
+            sendMessagesStartTime = System.currentTimeMillis();
+          }
+          try {
+            sendClearMsgByBucket(bucketId, clearPRMessage);
+          } catch (PartitionOfflineException poe) {
+            // TODO add a PartialResultException
+            logger.info("PR.sendClearMsgByBucket encountered PartitionOfflineException at bucket "
+                + bucketId, poe);
+          } catch (Exception e) {
+            logger.info("PR.sendClearMsgByBucket encountered exception at bucket " + bucketId, e);
+          }
+
+          if (isDebugEnabled) {
+            long now = System.currentTimeMillis();
+            logger.debug("PR.sendClearMsgByBucket for bucket {} took {} ms", bucketId,
+                (now - sendMessagesStartTime));
+          }
+          // TODO add psStats
+        }
+      } finally {
+        try {
+          lockService.unlock("_clearOperation" + this.getFullPath().replace('/', '_'));
+        } catch (IllegalStateException e) {
+          lockCheckReadiness();
+        }
+      }
+
+      // notify bridge clients at PR level
+      regionEvent.setEventType(EnumListenerEvent.AFTER_REGION_CLEAR);
+      boolean hasListener = hasListener();
+      if (hasListener) {
+        dispatchListenerEvent(EnumListenerEvent.AFTER_REGION_CLEAR, regionEvent);
+      }
+      notifyBridgeClients(regionEvent);
+      logger.info("Partitioned region {} finsihed clear operation.", this.getFullPath());
+    }
   }
 
-  @Override
-  void basicClear(RegionEventImpl regionEvent, boolean cacheWrite) {
-    throw new UnsupportedOperationException();
+  void sendClearMsgByBucket(final Integer bucketId, ClearPRMessage clearPRMessage) {
+    RetryTimeKeeper retryTime = null;
+    InternalDistributedMember currentTarget = getNodeForBucketWrite(bucketId, null);
+    if (logger.isDebugEnabled()) {
+      logger.debug("PR.sendClearMsgByBucket:bucket {}'s currentTarget is {}", bucketId,
+          currentTarget);
+    }
+
+    long timeOut = 0;
+    int count = 0;
+    while (true) {
+      switch (count) {
+        case 0:
+          // Note we don't check for DM cancellation in common case.
+          // First time. Assume success, keep going.
+          break;
+        case 1:
+          this.cache.getCancelCriterion().checkCancelInProgress(null);
+          // Second time (first failure). Calculate timeout and keep going.
+          timeOut = System.currentTimeMillis() + this.retryTimeout;
+          break;
+        default:
+          this.cache.getCancelCriterion().checkCancelInProgress(null);
+          // test for timeout
+          long timeLeft = timeOut - System.currentTimeMillis();
+          if (timeLeft < 0) {
+            PRHARedundancyProvider.timedOut(this, null, null, "clear a bucket" + bucketId,
+                this.retryTimeout);
+            // NOTREACHED
+          }
+
+          // Didn't time out. Sleep a bit and then continue
+          boolean interrupted = Thread.interrupted();
+          try {
+            Thread.sleep(PartitionedRegionHelper.DEFAULT_WAIT_PER_RETRY_ITERATION);
+          } catch (InterruptedException ignore) {
+            interrupted = true;
+          } finally {
+            if (interrupted) {
+              Thread.currentThread().interrupt();
+            }
+          }
+          break;
+      } // switch
+      count++;
+
+      if (currentTarget == null) { // pick target
+        checkReadiness();
+        if (retryTime == null) {
+          retryTime = new RetryTimeKeeper(this.retryTimeout);
+        }
+
+        currentTarget = waitForNodeOrCreateBucket(retryTime, null, bucketId, false);
+        if (currentTarget == null) {
+          // the bucket does not exist, no need to clear
+          logger.info("Bucket " + bucketId + " does not contain data, no need to clear");
+          return;
+        } else {
+          if (logger.isDebugEnabled()) {
+            logger.debug("PR.sendClearMsgByBucket: new currentTarget is {}", currentTarget);
+          }
+        }
+
+        // It's possible this is a GemFire thread e.g. ServerConnection
+        // which got to this point because of a distributed system shutdown or
+        // region closure which uses interrupt to break any sleep() or wait() calls
+        // e.g. waitForPrimary or waitForBucketRecovery in which case throw exception
+        checkShutdown();
+        continue;
+      } // pick target
+
+      boolean result = false;
+      try {
+        final boolean isLocal = (this.localMaxMemory > 0) && currentTarget.equals(getMyId());
+        if (isLocal) {
+          result = clearPRMessage.doLocalClear(this);
+        } else {
+          ClearPRMessage.ClearResponse response = clearPRMessage.send(currentTarget, this);
+          if (response != null) {
+            this.prStats.incPartitionMessagesSent();
+            result = response.waitForResult();
+          }
+        }
+        if (result) {
+          return;
+        }
+      } catch (ForceReattemptException fre) {
+        checkReadiness();
+        InternalDistributedMember lastTarget = currentTarget;
+        if (retryTime == null) {
+          retryTime = new RetryTimeKeeper(this.retryTimeout);
+        }
+        currentTarget = getNodeForBucketWrite(bucketId, retryTime);
+        if (lastTarget.equals(currentTarget)) {
+          if (logger.isDebugEnabled()) {
+            logger.debug("PR.sendClearMsgByBucket: Retrying at the same node:{} due to {}",
+                currentTarget, fre.getMessage());
+          }
+          if (retryTime.overMaximum()) {
+            PRHARedundancyProvider.timedOut(this, null, null, "clear a bucket",
+                this.retryTimeout);
+            // NOTREACHED
+          }
+          retryTime.waitToRetryNode();
+        } else {
+          if (logger.isDebugEnabled()) {
+            logger.debug("PR.sendClearMsgByBucket: Old target was {}, Retrying {}", lastTarget,
+                currentTarget);
+          }
+        }
+      }
+
+      // It's possible this is a GemFire thread e.g. ServerConnection
+      // which got to this point because of a distributed system shutdown or
+      // region closure which uses interrupt to break any sleep() or wait()
+      // calls
+      // e.g. waitForPrimary or waitForBucketRecovery in which case throw
+      // exception
+      checkShutdown();
+
+      // If we get here, the attempt failed...
+      if (count == 1) {
+        // TODO prStats add ClearPRMsg retried
+        this.prStats.incPutAllMsgsRetried();
+      }
+    }
+  }
+
+  List<ClearPRMessage> createClearPRMessages() {
+    ArrayList<ClearPRMessage> clearMsgList = new ArrayList<>();
+    for (int bucketId = 0; bucketId < this.totalNumberOfBuckets; bucketId++) {
+      ClearPRMessage clearPRMessage = new ClearPRMessage(bucketId);
+      clearMsgList.add(clearPRMessage);
+    }
+    return clearMsgList;
   }
 
   @Override
@@ -2603,7 +2788,7 @@ public class PartitionedRegion extends LocalRegion
             retryTime = new RetryTimeKeeper(this.retryTimeout);
           }
 
-          currentTarget = waitForNodeOrCreateBucket(retryTime, event, bucketId);
+          currentTarget = waitForNodeOrCreateBucket(retryTime, event, bucketId, true);
           if (isDebugEnabled) {
             logger.debug("PR.sendMsgByBucket: event size is {}, new currentTarget is {}",
                 getEntrySize(event), currentTarget);
@@ -2742,7 +2927,7 @@ public class PartitionedRegion extends LocalRegion
             retryTime = new RetryTimeKeeper(this.retryTimeout);
           }
 
-          currentTarget = waitForNodeOrCreateBucket(retryTime, event, bucketId);
+          currentTarget = waitForNodeOrCreateBucket(retryTime, event, bucketId, true);
           if (logger.isDebugEnabled()) {
             logger.debug("PR.sendMsgByBucket: event size is {}, new currentTarget is {}",
                 getEntrySize(event), currentTarget);
@@ -2987,7 +3172,7 @@ public class PartitionedRegion extends LocalRegion
         if (retryTime == null) {
           retryTime = new RetryTimeKeeper(this.retryTimeout);
         }
-        currentTarget = waitForNodeOrCreateBucket(retryTime, event, bucketId);
+        currentTarget = waitForNodeOrCreateBucket(retryTime, event, bucketId, true);
 
         // It's possible this is a GemFire thread e.g. ServerConnection
         // which got to this point because of a distributed system shutdown or
@@ -3146,10 +3331,11 @@ public class PartitionedRegion extends LocalRegion
    * @param retryTime the RetryTimeKeeper to track retry times
    * @param event the event used to get the entry size in the event a new bucket should be created
    * @param bucketId the identity of the bucket should it be created
+   * @param createIfNotExist boolean to indicate if to create a bucket if found not exist
    * @return a Node which contains the bucket, potentially null
    */
   private InternalDistributedMember waitForNodeOrCreateBucket(RetryTimeKeeper retryTime,
-      EntryEventImpl event, Integer bucketId) {
+      EntryEventImpl event, Integer bucketId, boolean createIfNotExist) {
     InternalDistributedMember newNode;
     if (retryTime.overMaximum()) {
       PRHARedundancyProvider.timedOut(this, null, null, "allocate a bucket",
@@ -3159,7 +3345,7 @@ public class PartitionedRegion extends LocalRegion
 
     retryTime.waitForBucketsRecovery();
     newNode = getNodeForBucketWrite(bucketId, retryTime);
-    if (newNode == null) {
+    if (newNode == null && createIfNotExist) {
       newNode = createBucket(bucketId, getEntrySize(event), retryTime);
     }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/RegionEventImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/RegionEventImpl.java
index fba513d..49dc932 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/RegionEventImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/RegionEventImpl.java
@@ -119,6 +119,11 @@ public class RegionEventImpl
     return region;
   }
 
+  public void setRegion(LocalRegion region) {
+    this.region = region;
+    this.distributedMember = region.getMyId();
+  }
+
   @Override
   public Operation getOperation() {
     return this.op;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java
index 1a8aba1..9fa8057 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java
@@ -26,7 +26,8 @@ import org.apache.logging.log4j.Logger;
 import org.apache.geode.DataSerializer;
 import org.apache.geode.annotations.VisibleForTesting;
 import org.apache.geode.cache.CacheException;
-import org.apache.geode.distributed.DistributedLockService;
+import org.apache.geode.cache.Operation;
+import org.apache.geode.cache.persistence.PartitionOfflineException;
 import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.internal.ClusterDistributionManager;
 import org.apache.geode.distributed.internal.DirectReplyProcessor;
@@ -44,7 +45,6 @@ import org.apache.geode.internal.cache.BucketRegion;
 import org.apache.geode.internal.cache.EventID;
 import org.apache.geode.internal.cache.ForceReattemptException;
 import org.apache.geode.internal.cache.PartitionedRegion;
-import org.apache.geode.internal.cache.PartitionedRegionHelper;
 import org.apache.geode.internal.cache.RegionEventImpl;
 import org.apache.geode.internal.logging.log4j.LogMarker;
 import org.apache.geode.internal.serialization.DeserializationContext;
@@ -54,16 +54,10 @@ import org.apache.geode.logging.internal.log4j.api.LogService;
 public class ClearPRMessage extends PartitionMessageWithDirectReply {
   private static final Logger logger = LogService.getLogger();
 
-  private RegionEventImpl regionEvent;
-
   private Integer bucketId;
 
-  /** The time in ms to wait for a lock to be obtained during doLocalClear() */
-  public static final int LOCK_WAIT_TIMEOUT_MS = 1000;
   public static final String BUCKET_NON_PRIMARY_MESSAGE =
       "The bucket region on target member is no longer primary";
-  public static final String BUCKET_REGION_LOCK_UNAVAILABLE_MESSAGE =
-      "A lock for the bucket region could not be obtained.";
   public static final String EXCEPTION_THROWN_DURING_CLEAR_OPERATION =
       "An exception was thrown during the local clear operation: ";
 
@@ -79,14 +73,6 @@ public class ClearPRMessage extends PartitionMessageWithDirectReply {
 
   public ClearPRMessage(int bucketId) {
     this.bucketId = bucketId;
-
-    // These are both used by the parent class, but don't apply to this message type
-    this.notificationOnly = false;
-    this.posDup = false;
-  }
-
-  public void setRegionEvent(RegionEventImpl event) {
-    regionEvent = event;
   }
 
   public void initMessage(PartitionedRegion region, Set<InternalDistributedMember> recipients,
@@ -103,16 +89,6 @@ public class ClearPRMessage extends PartitionMessageWithDirectReply {
     }
   }
 
-  @Override
-  public boolean isSevereAlertCompatible() {
-    // allow forced-disconnect processing for all cache op messages
-    return true;
-  }
-
-  public RegionEventImpl getRegionEvent() {
-    return regionEvent;
-  }
-
   public ClearResponse send(DistributedMember recipient, PartitionedRegion region)
       throws ForceReattemptException {
     Set<InternalDistributedMember> recipients =
@@ -125,7 +101,7 @@ public class ClearPRMessage extends PartitionMessageWithDirectReply {
 
     Set<InternalDistributedMember> failures = region.getDistributionManager().putOutgoing(this);
     if (failures != null && failures.size() > 0) {
-      throw new ForceReattemptException("Failed sending <" + this + ">");
+      throw new ForceReattemptException("Failed sending <" + this + "> due to " + failures);
     }
     return clearResponse;
   }
@@ -143,7 +119,6 @@ public class ClearPRMessage extends PartitionMessageWithDirectReply {
     } else {
       InternalDataSerializer.writeSignedVL(bucketId, out);
     }
-    DataSerializer.writeObject(regionEvent, out);
   }
 
   @Override
@@ -151,12 +126,11 @@ public class ClearPRMessage extends PartitionMessageWithDirectReply {
       throws IOException, ClassNotFoundException {
     super.fromData(in, context);
     this.bucketId = (int) InternalDataSerializer.readSignedVL(in);
-    this.regionEvent = DataSerializer.readObject(in);
   }
 
   @Override
   public EventID getEventID() {
-    return regionEvent.getEventId();
+    return null;
   }
 
   /**
@@ -169,60 +143,51 @@ public class ClearPRMessage extends PartitionMessageWithDirectReply {
   protected boolean operateOnPartitionedRegion(ClusterDistributionManager distributionManager,
       PartitionedRegion region, long startTime) {
     try {
-      result = doLocalClear(region);
+      this.result = doLocalClear(region);
     } catch (ForceReattemptException ex) {
       sendReply(getSender(), getProcessorId(), distributionManager, new ReplyException(ex), region,
           startTime);
       return false;
     }
-    sendReply(getSender(), getProcessorId(), distributionManager, null, region, startTime);
-    return false;
+    return this.result;
   }
 
-  public boolean doLocalClear(PartitionedRegion region) throws ForceReattemptException {
+  public Integer getBucketId() {
+    return this.bucketId;
+  }
+
+  public boolean doLocalClear(PartitionedRegion region)
+      throws ForceReattemptException {
     // Retrieve local bucket region which matches target bucketId
-    BucketRegion bucketRegion = region.getDataStore().getInitializedBucketForId(null, bucketId);
+    BucketRegion bucketRegion =
+        region.getDataStore().getInitializedBucketForId(null, this.bucketId);
 
-    // Check if we are primary, throw exception if not
-    if (!bucketRegion.isPrimary()) {
+    boolean lockedForPrimary = bucketRegion.doLockForPrimary(false);
+    // Check if we obtained primary lock, throw exception if not
+    if (!lockedForPrimary) {
       throw new ForceReattemptException(BUCKET_NON_PRIMARY_MESSAGE);
     }
-
-    DistributedLockService lockService = getPartitionRegionLockService();
-    String lockName = bucketRegion.getFullPath();
     try {
-      boolean locked = lockService.lock(lockName, LOCK_WAIT_TIMEOUT_MS, -1);
-
-      if (!locked) {
-        throw new ForceReattemptException(BUCKET_REGION_LOCK_UNAVAILABLE_MESSAGE);
-      }
-
-      // Double check if we are still primary, as this could have changed between our first check
-      // and obtaining the lock
-      if (!bucketRegion.isPrimary()) {
-        throw new ForceReattemptException(BUCKET_NON_PRIMARY_MESSAGE);
-      }
-
-      try {
-        bucketRegion.cmnClearRegion(regionEvent, true, true);
-      } catch (Exception ex) {
-        throw new ForceReattemptException(
-            EXCEPTION_THROWN_DURING_CLEAR_OPERATION + ex.getClass().getName(), ex);
-      }
-
+      RegionEventImpl regionEvent = new RegionEventImpl();
+      regionEvent.setOperation(Operation.REGION_CLEAR);
+      regionEvent.setRegion(bucketRegion);
+      bucketRegion.cmnClearRegion(regionEvent, true, true);
+    } catch (PartitionOfflineException poe) {
+      logger.info(
+          "All members holding data for bucket {} are offline, no more retries will be attempted",
+          this.bucketId,
+          poe);
+      throw poe;
+    } catch (Exception ex) {
+      throw new ForceReattemptException(
+          EXCEPTION_THROWN_DURING_CLEAR_OPERATION + ex.getClass().getName(), ex);
     } finally {
-      lockService.unlock(lockName);
+      bucketRegion.doUnlockForPrimary();
     }
 
     return true;
   }
 
-  // Extracted for testing
-  protected DistributedLockService getPartitionRegionLockService() {
-    return DistributedLockService
-        .getServiceNamed(PartitionedRegionHelper.PARTITION_LOCK_SERVICE_NAME);
-  }
-
   @Override
   public boolean canStartRemoteTransaction() {
     return false;
@@ -247,39 +212,7 @@ public class ClearPRMessage extends PartitionMessageWithDirectReply {
     buff.append("; bucketId=").append(this.bucketId);
   }
 
-  @Override
-  public String toString() {
-    StringBuilder buff = new StringBuilder();
-    String className = getClass().getName();
-    buff.append(className.substring(className.indexOf(PN_TOKEN) + PN_TOKEN.length())); // partition.<foo>
-    buff.append("(prid="); // make sure this is the first one
-    buff.append(this.regionId);
-
-    // Append name, if we have it
-    String name = null;
-    try {
-      PartitionedRegion region = PartitionedRegion.getPRFromId(this.regionId);
-      if (region != null) {
-        name = region.getFullPath();
-      }
-    } catch (Exception ignore) {
-      /* ignored */
-    }
-    if (name != null) {
-      buff.append(" (name = \"").append(name).append("\")");
-    }
-
-    appendFields(buff);
-    buff.append(" ,distTx=");
-    buff.append(this.isTransactionDistributed);
-    buff.append(")");
-    return buff.toString();
-  }
-
   public static class ClearReplyMessage extends ReplyMessage {
-    /** Result of the Clear operation */
-    boolean result;
-
     @Override
     public boolean getInlineProcess() {
       return true;
@@ -293,16 +226,21 @@ public class ClearPRMessage extends PartitionMessageWithDirectReply {
 
     private ClearReplyMessage(int processorId, boolean result, ReplyException ex) {
       super();
-      this.result = result;
       setProcessorId(processorId);
-      setException(ex);
+      if (ex != null) {
+        setException(ex);
+      } else {
+        setReturnValue(result);
+      }
     }
 
-    /** Send an ack */
+    /**
+     * Send an ack
+     */
     public static void send(InternalDistributedMember recipient, int processorId,
         ReplySender replySender,
         boolean result, ReplyException ex) {
-      Assert.assertTrue(recipient != null, "ClearReplyMessage NULL reply message");
+      Assert.assertNotNull(recipient, "ClearReplyMessage recipient was NULL.");
       ClearReplyMessage message = new ClearReplyMessage(processorId, result, ex);
       message.setRecipient(recipient);
       replySender.putOutgoing(message);
@@ -340,23 +278,11 @@ public class ClearPRMessage extends PartitionMessageWithDirectReply {
     }
 
     @Override
-    public void fromData(DataInput in,
-        DeserializationContext context) throws IOException, ClassNotFoundException {
-      super.fromData(in, context);
-      this.result = in.readBoolean();
-    }
-
-    @Override
-    public void toData(DataOutput out,
-        SerializationContext context) throws IOException {
-      super.toData(out, context);
-      out.writeBoolean(this.result);
-    }
-
-    @Override
     public String toString() {
-      return "ClearReplyMessage " + "processorid=" + this.processorId + " returning " + this.result
-          + " exception=" + getException();
+      StringBuilder stringBuilder = new StringBuilder(super.toString());
+      stringBuilder.append(" returnValue=");
+      stringBuilder.append(getReturnValue());
+      return stringBuilder.toString();
     }
   }
 
@@ -372,7 +298,9 @@ public class ClearPRMessage extends PartitionMessageWithDirectReply {
     }
 
     public void setResponse(ClearReplyMessage response) {
-      this.returnValue = response.result;
+      if (response.getException() == null) {
+        this.returnValue = (boolean) response.getReturnValue();
+      }
     }
 
     /**
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/ClearPRMessageTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/ClearPRMessageTest.java
index 2cf5231..acdd4fc 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/ClearPRMessageTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/ClearPRMessageTest.java
@@ -20,7 +20,6 @@ import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.ArgumentMatchers.anyInt;
 import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.ArgumentMatchers.anyString;
 import static org.mockito.ArgumentMatchers.notNull;
 import static org.mockito.Mockito.RETURNS_DEEP_STUBS;
 import static org.mockito.Mockito.doNothing;
@@ -38,7 +37,6 @@ import java.util.Set;
 import org.junit.Before;
 import org.junit.Test;
 
-import org.apache.geode.distributed.DistributedLockService;
 import org.apache.geode.distributed.internal.ClusterDistributionManager;
 import org.apache.geode.distributed.internal.DMStats;
 import org.apache.geode.distributed.internal.DistributionManager;
@@ -50,6 +48,7 @@ import org.apache.geode.internal.cache.ForceReattemptException;
 import org.apache.geode.internal.cache.PartitionedRegion;
 import org.apache.geode.internal.cache.PartitionedRegionDataStore;
 import org.apache.geode.internal.cache.PartitionedRegionStats;
+import org.apache.geode.internal.cache.RegionEventImpl;
 
 public class ClearPRMessageTest {
 
@@ -61,11 +60,14 @@ public class ClearPRMessageTest {
   @Before
   public void setup() throws ForceReattemptException {
     message = spy(new ClearPRMessage());
+    InternalDistributedMember member = mock(InternalDistributedMember.class);
     region = mock(PartitionedRegion.class, RETURNS_DEEP_STUBS);
     dataStore = mock(PartitionedRegionDataStore.class);
     when(region.getDataStore()).thenReturn(dataStore);
+    when(region.getFullPath()).thenReturn("/test");
     bucketRegion = mock(BucketRegion.class);
     when(dataStore.getInitializedBucketForId(any(), any())).thenReturn(bucketRegion);
+    RegionEventImpl bucketRegionEventImpl = mock(RegionEventImpl.class);
   }
 
   @Test
@@ -79,44 +81,19 @@ public class ClearPRMessageTest {
 
   @Test
   public void doLocalClearThrowsExceptionWhenLockCannotBeObtained() {
-    DistributedLockService mockLockService = mock(DistributedLockService.class);
-    doReturn(mockLockService).when(message).getPartitionRegionLockService();
-
-    when(mockLockService.lock(anyString(), anyLong(), anyLong())).thenReturn(false);
-    when(bucketRegion.isPrimary()).thenReturn(true);
-
-    assertThatThrownBy(() -> message.doLocalClear(region))
-        .isInstanceOf(ForceReattemptException.class)
-        .hasMessageContaining(ClearPRMessage.BUCKET_REGION_LOCK_UNAVAILABLE_MESSAGE);
-  }
-
-  @Test
-  public void doLocalClearThrowsExceptionWhenBucketIsNotPrimaryAfterObtainingLock() {
-    DistributedLockService mockLockService = mock(DistributedLockService.class);
-    doReturn(mockLockService).when(message).getPartitionRegionLockService();
-
-    // Be primary on the first check, then be not primary on the second check
-    when(bucketRegion.isPrimary()).thenReturn(true).thenReturn(false);
-    when(mockLockService.lock(any(), anyLong(), anyLong())).thenReturn(true);
+    when(bucketRegion.doLockForPrimary(false)).thenReturn(false);
 
     assertThatThrownBy(() -> message.doLocalClear(region))
         .isInstanceOf(ForceReattemptException.class)
         .hasMessageContaining(ClearPRMessage.BUCKET_NON_PRIMARY_MESSAGE);
-    // Confirm that we actually obtained and released the lock
-    verify(mockLockService, times(1)).lock(any(), anyLong(), anyLong());
-    verify(mockLockService, times(1)).unlock(any());
   }
 
   @Test
   public void doLocalClearThrowsForceReattemptExceptionWhenAnExceptionIsThrownDuringClearOperation() {
-    DistributedLockService mockLockService = mock(DistributedLockService.class);
-    doReturn(mockLockService).when(message).getPartitionRegionLockService();
     NullPointerException exception = new NullPointerException("Error encountered");
     doThrow(exception).when(bucketRegion).cmnClearRegion(any(), anyBoolean(), anyBoolean());
 
-    // Be primary on the first check, then be not primary on the second check
-    when(bucketRegion.isPrimary()).thenReturn(true);
-    when(mockLockService.lock(any(), anyLong(), anyLong())).thenReturn(true);
+    when(bucketRegion.doLockForPrimary(false)).thenReturn(true);
 
     assertThatThrownBy(() -> message.doLocalClear(region))
         .isInstanceOf(ForceReattemptException.class)
@@ -129,21 +106,13 @@ public class ClearPRMessageTest {
   @Test
   public void doLocalClearInvokesCmnClearRegionWhenBucketIsPrimaryAndLockIsObtained()
       throws ForceReattemptException {
-    DistributedLockService mockLockService = mock(DistributedLockService.class);
-    doReturn(mockLockService).when(message).getPartitionRegionLockService();
-
 
     // Be primary on the first check, then be not primary on the second check
-    when(bucketRegion.isPrimary()).thenReturn(true);
-    when(mockLockService.lock(any(), anyLong(), anyLong())).thenReturn(true);
+    when(bucketRegion.doLockForPrimary(false)).thenReturn(true);
     assertThat(message.doLocalClear(region)).isTrue();
 
     // Confirm that cmnClearRegion was called
     verify(bucketRegion, times(1)).cmnClearRegion(any(), anyBoolean(), anyBoolean());
-
-    // Confirm that we actually obtained and released the lock
-    verify(mockLockService, times(1)).lock(any(), anyLong(), anyLong());
-    verify(mockLockService, times(1)).unlock(any());
   }
 
   @Test
@@ -197,6 +166,7 @@ public class ClearPRMessageTest {
     int processorId = 1000;
     int startTime = 0;
 
+    doReturn(0).when(message).getBucketId();
     doReturn(true).when(message).doLocalClear(region);
     doReturn(sender).when(message).getSender();
     doReturn(processorId).when(message).getProcessorId();
@@ -206,8 +176,9 @@ public class ClearPRMessageTest {
     doNothing().when(message).sendReply(any(), anyInt(), any(), any(), any(), anyLong());
 
     message.operateOnPartitionedRegion(distributionManager, region, startTime);
+    assertThat(message.result).isTrue();
 
-    verify(message, times(1)).sendReply(sender, processorId, distributionManager, null, region,
+    verify(message, times(0)).sendReply(sender, processorId, distributionManager, null, region,
         startTime);
   }
 
@@ -222,6 +193,7 @@ public class ClearPRMessageTest {
     ForceReattemptException exception =
         new ForceReattemptException(ClearPRMessage.BUCKET_NON_PRIMARY_MESSAGE);
 
+    doReturn(0).when(message).getBucketId();
     doThrow(exception).when(message).doLocalClear(region);
     doReturn(sender).when(message).getSender();
     doReturn(processorId).when(message).getProcessorId();


[geode] 17/22: GEODE-7680: PR.clear must be successful when interacting with rebalance (#5095)

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 02336cdcd9cfb734c7f3abff97a65f51bec0e727
Author: Donal Evans <do...@pivotal.io>
AuthorDate: Thu Jul 23 08:14:17 2020 -0700

    GEODE-7680: PR.clear must be successful when interacting with rebalance (#5095)
    
    - Added DUnit tests to confirm that clear does not interfere with
    rebalance or vice versa
    - Test when member departs during clear/rebalance
    - Test when member joins during clear/rebalance
    - Fixed typo in PartitionedRegionClearWithExpirationDUnitTest
    - Fixed typo in PartitionedRegion
    - Call assignBucketsToPartitions() on leader colocated region during clear
    instead of target region
    
    Authored-by: Donal Evans <do...@pivotal.io>
---
 ...rtitionedRegionClearWithRebalanceDUnitTest.java | 578 +++++++++++++++++++++
 .../geode/internal/cache/ColocationHelper.java     |  10 +-
 .../internal/cache/PartitionedRegionClear.java     |   3 +-
 3 files changed, 583 insertions(+), 8 deletions(-)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithRebalanceDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithRebalanceDUnitTest.java
new file mode 100644
index 0000000..f53fab7
--- /dev/null
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithRebalanceDUnitTest.java
@@ -0,0 +1,578 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.cache.PartitionAttributesFactory.GLOBAL_MAX_BUCKETS_DEFAULT;
+import static org.apache.geode.cache.RegionShortcut.PARTITION;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_REDUNDANT;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_REDUNDANT_PERSISTENT;
+import static org.apache.geode.internal.util.ArrayUtils.asList;
+import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.apache.geode.test.dunit.VM.getVM;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.Serializable;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.stream.IntStream;
+
+import junitparams.JUnitParamsRunner;
+import junitparams.Parameters;
+import junitparams.naming.TestCaseName;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import org.apache.geode.cache.CacheWriterException;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionEvent;
+import org.apache.geode.cache.RegionFactory;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.control.RebalanceFactory;
+import org.apache.geode.cache.control.RebalanceOperation;
+import org.apache.geode.cache.control.RebalanceResults;
+import org.apache.geode.cache.util.CacheWriterAdapter;
+import org.apache.geode.distributed.internal.DMStats;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.membership.api.MembershipManagerHelper;
+import org.apache.geode.test.awaitility.GeodeAwaitility;
+import org.apache.geode.test.dunit.AsyncInvocation;
+import org.apache.geode.test.dunit.DUnitBlackboard;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.rules.CacheRule;
+import org.apache.geode.test.dunit.rules.DistributedDiskDirRule;
+import org.apache.geode.test.dunit.rules.DistributedRule;
+
+@RunWith(JUnitParamsRunner.class)
+public class PartitionedRegionClearWithRebalanceDUnitTest implements Serializable {
+  private static final long serialVersionUID = -7183993832801073933L;
+
+  private static final Integer BUCKETS = GLOBAL_MAX_BUCKETS_DEFAULT;
+  private static final String REGION_NAME = "testRegion";
+  private static final String COLOCATED_REGION = "childColocatedRegion";
+  private static final int ENTRIES = 10000;
+  private static final String DISK_STORE_SUFFIX = "DiskStore";
+  private static final String REBALANCE_HAS_BEGUN = "rebalance-begun";
+  private static final String CLEAR_HAS_BEGUN = "clear-begun";
+
+  @Rule
+  public DistributedRule distributedRule = new DistributedRule(4);
+
+  @Rule
+  public CacheRule cacheRule = CacheRule.builder().createCacheInAll().build();
+
+  @Rule
+  public DistributedDiskDirRule distributedDiskDirRule = new DistributedDiskDirRule();
+
+  private static transient DUnitBlackboard blackboard;
+
+  private VM accessor;
+  private VM server1;
+  private VM server2;
+  private VM server3;
+
+  private enum TestVM {
+    ACCESSOR(0), SERVER1(1), SERVER2(2), SERVER3(3);
+
+    final int vmNumber;
+
+    TestVM(int vmNumber) {
+      this.vmNumber = vmNumber;
+    }
+  }
+
+  @SuppressWarnings("unused")
+  static Object[] coordinatorVMsAndRegionTypes() {
+    return new Object[] {
+        // {ClearCoordinatorVM, regionShortcut}
+        new Object[] {TestVM.SERVER1, PARTITION_REDUNDANT},
+        new Object[] {TestVM.ACCESSOR, PARTITION_REDUNDANT},
+        new Object[] {TestVM.SERVER1, PARTITION_REDUNDANT_PERSISTENT},
+        new Object[] {TestVM.ACCESSOR, PARTITION_REDUNDANT_PERSISTENT}
+    };
+  }
+
+  @SuppressWarnings("unused")
+  static Object[] coordinatorVMsAndRegionTypesNoAccessor() {
+    return new Object[] {
+        // {ClearCoordinatorVM, regionShortcut}
+        new Object[] {TestVM.SERVER1, PARTITION_REDUNDANT},
+        new Object[] {TestVM.SERVER2, PARTITION_REDUNDANT},
+        new Object[] {TestVM.SERVER1, PARTITION_REDUNDANT_PERSISTENT},
+        new Object[] {TestVM.SERVER2, PARTITION_REDUNDANT_PERSISTENT}
+    };
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    getBlackboard().initBlackboard();
+    server1 = getVM(TestVM.SERVER1.vmNumber);
+    server2 = getVM(TestVM.SERVER2.vmNumber);
+    server3 = getVM(TestVM.SERVER3.vmNumber);
+    accessor = getVM(TestVM.ACCESSOR.vmNumber);
+  }
+
+  private static DUnitBlackboard getBlackboard() {
+    if (blackboard == null) {
+      blackboard = new DUnitBlackboard();
+    }
+    return blackboard;
+  }
+
+  private RegionShortcut getRegionAccessorShortcut(RegionShortcut dataStoreRegionShortcut) {
+    if (dataStoreRegionShortcut.isPersistent()) {
+      switch (dataStoreRegionShortcut) {
+        case PARTITION_PERSISTENT:
+          return PARTITION;
+        case PARTITION_REDUNDANT_PERSISTENT:
+          return PARTITION_REDUNDANT;
+        default:
+          throw new IllegalArgumentException(
+              "Invalid RegionShortcut specified: " + dataStoreRegionShortcut);
+      }
+    }
+
+    return dataStoreRegionShortcut;
+  }
+
+  private void initAccessor(RegionShortcut regionShortcut, Collection<String> regionNames) {
+    RegionShortcut accessorShortcut = getRegionAccessorShortcut(regionShortcut);
+    // StartupRecoveryDelay is set to infinite to prevent automatic rebalancing when creating the
+    // region on other members
+    regionNames.forEach(regionName -> {
+      PartitionAttributesFactory<String, String> attributesFactory =
+          new PartitionAttributesFactory<String, String>()
+              .setTotalNumBuckets(BUCKETS)
+              .setStartupRecoveryDelay(-1)
+              .setLocalMaxMemory(0);
+
+      if (regionName.equals(COLOCATED_REGION)) {
+        attributesFactory.setColocatedWith(REGION_NAME);
+      }
+
+      cacheRule.getCache()
+          .<String, String>createRegionFactory(accessorShortcut)
+          .setPartitionAttributes(attributesFactory.create())
+          .create(regionName);
+    });
+  }
+
+  private void initDataStore(RegionShortcut regionShortcut, Collection<String> regionNames) {
+    // StartupRecoveryDelay is set to infinite to prevent automatic rebalancing when creating the
+    // region on other members
+    regionNames.forEach(regionName -> {
+      PartitionAttributesFactory<String, String> attributesFactory =
+          new PartitionAttributesFactory<String, String>()
+              .setTotalNumBuckets(BUCKETS)
+              .setStartupRecoveryDelay(-1);
+
+      if (regionName.equals(COLOCATED_REGION)) {
+        attributesFactory.setColocatedWith(REGION_NAME);
+      }
+
+      RegionFactory<String, String> factory = cacheRule.getCache()
+          .<String, String>createRegionFactory(regionShortcut)
+          .setPartitionAttributes(attributesFactory.create())
+          .setCacheWriter(new BlackboardSignaller());
+
+      // Set up the disk store if the region is persistent
+      if (regionShortcut.isPersistent()) {
+        factory.setDiskStoreName(cacheRule.getCache()
+            .createDiskStoreFactory()
+            .create(regionName + DISK_STORE_SUFFIX)
+            .getName());
+      }
+
+      factory.create(regionName);
+    });
+  }
+
+  private void parametrizedSetup(RegionShortcut regionShortcut, Collection<String> regionNames,
+      boolean useAccessor) {
+    // Create and populate the region on server1 first, to create an unbalanced distribution of data
+    server1.invoke(() -> {
+      initDataStore(regionShortcut, regionNames);
+      regionNames.forEach(regionName -> {
+        Region<String, String> region = cacheRule.getCache().getRegion(regionName);
+        IntStream.range(0, ENTRIES).forEach(i -> region.put("key" + i, "value" + i));
+      });
+    });
+    server2.invoke(() -> initDataStore(regionShortcut, regionNames));
+    if (useAccessor) {
+      accessor.invoke(() -> initAccessor(regionShortcut, regionNames));
+    } else {
+      server3.invoke(() -> initDataStore(regionShortcut, regionNames));
+    }
+  }
+
+  private void setBlackboardSignallerCacheWriter(String regionName) {
+    cacheRule.getCache().<String, String>getRegion(regionName).getAttributesMutator()
+        .setCacheWriter(new BlackboardSignaller());
+  }
+
+  private AsyncInvocation<?> startClearAsync(TestVM clearCoordinatorVM, String regionName,
+      boolean waitForRebalance) {
+    return getVM(clearCoordinatorVM.vmNumber).invokeAsync(() -> {
+      Region<String, String> region = cacheRule.getCache().getRegion(regionName);
+      if (waitForRebalance) {
+        // Wait for the signal from the blackboard before triggering the clear to start
+        getBlackboard().waitForGate(REBALANCE_HAS_BEGUN, GeodeAwaitility.getTimeout().toMillis(),
+            TimeUnit.MILLISECONDS);
+      }
+      region.clear();
+    });
+  }
+
+  // Trigger a rebalance and wait until it has started restoring redundancy before signalling the
+  // blackboard
+  private AsyncInvocation<?> startRebalanceAsyncAndSignalBlackboard(boolean waitForClear) {
+    return server1.invokeAsync(() -> {
+      RebalanceFactory rebalance =
+          cacheRule.getCache().getResourceManager().createRebalanceFactory();
+      if (waitForClear) {
+        // Wait for the signal from the blackboard before triggering the rebalance to start
+        getBlackboard().waitForGate(CLEAR_HAS_BEGUN, GeodeAwaitility.getTimeout().toMillis(),
+            TimeUnit.MILLISECONDS);
+      }
+      RebalanceOperation op = rebalance.start();
+      await().untilAsserted(() -> assertThat(cacheRule.getCache().getInternalResourceManager()
+          .getStats().getRebalanceBucketCreatesCompleted()).isGreaterThan(0));
+      getBlackboard().signalGate(REBALANCE_HAS_BEGUN);
+      op.getResults();
+    });
+  }
+
+  private void executeClearAndRebalanceAsyncInvocations(TestVM clearCoordinatorVM,
+      String regionToClear, boolean rebalanceFirst) throws InterruptedException {
+    getVM(clearCoordinatorVM.vmNumber)
+        .invoke(() -> setBlackboardSignallerCacheWriter(regionToClear));
+
+    AsyncInvocation<?> clearInvocation = startClearAsync(clearCoordinatorVM, regionToClear,
+        rebalanceFirst);
+
+    AsyncInvocation<?> rebalanceInvocation =
+        startRebalanceAsyncAndSignalBlackboard(!rebalanceFirst);
+
+    clearInvocation.await();
+    rebalanceInvocation.await();
+  }
+
+  private void prepareMemberToShutdownOnClear() throws TimeoutException, InterruptedException {
+    getBlackboard().waitForGate(CLEAR_HAS_BEGUN, GeodeAwaitility.getTimeout().toMillis(),
+        TimeUnit.MILLISECONDS);
+    InternalDistributedSystem.getConnectedInstance().stopReconnectingNoDisconnect();
+    MembershipManagerHelper.crashDistributedSystem(
+        InternalDistributedSystem.getConnectedInstance());
+    await().untilAsserted(
+        () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNull());
+  }
+
+  private void waitForSilenceOnRegion(String regionName) {
+    DMStats dmStats = cacheRule.getSystem().getDistributionManager().getStats();
+    PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(regionName);
+    PartitionedRegionStats partitionedRegionStats = region.getPrStats();
+    await().untilAsserted(() -> {
+      assertThat(dmStats.getReplyWaitsInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getVolunteeringInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getBucketCreatesInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getPrimaryTransfersInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getRebalanceBucketCreatesInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getRebalancePrimaryTransfersInProgress()).isEqualTo(0);
+    });
+  }
+
+  private void assertRegionIsEmpty(List<VM> vms, String regionName) {
+    vms.forEach(vm -> vm.invoke(() -> {
+      waitForSilenceOnRegion(regionName);
+      PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(regionName);
+
+      assertThat(region.getLocalSize()).as("Region local size should be 0 for region " + regionName)
+          .isEqualTo(0);
+    }));
+  }
+
+  private void assertRegionIsNotEmpty(List<VM> vms, String regionName) {
+    vms.forEach(vm -> vm.invoke(() -> {
+      waitForSilenceOnRegion(regionName);
+      PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(regionName);
+
+      assertThat(region.size()).as("Region size should be " + ENTRIES + " for region " + regionName)
+          .isEqualTo(ENTRIES);
+    }));
+  }
+
+  private void assertRebalanceDoesNoWork() {
+    server1.invoke(() -> {
+      RebalanceResults results =
+          cacheRule.getCache().getResourceManager().createRebalanceFactory().start().getResults();
+
+      assertThat(results.getTotalBucketTransfersCompleted())
+          .as("Expected bucket transfers to be zero").isEqualTo(0);
+      assertThat(results.getTotalBucketCreatesCompleted()).as("Expected bucket creates to be zero")
+          .isEqualTo(0);
+      assertThat(results.getTotalPrimaryTransfersCompleted())
+          .as("Expected primary transfers to be zero").isEqualTo(0);
+    });
+  }
+
+  @Test
+  @Parameters(method = "coordinatorVMsAndRegionTypes")
+  @TestCaseName("[{index}] {method}(ClearCoordinator:{0}, RegionType:{1})")
+  public void clearRegionStartedAfterRebalanceClearsRegion(TestVM clearCoordinatorVM,
+      RegionShortcut regionType) throws InterruptedException {
+    parametrizedSetup(regionType, Collections.singleton(REGION_NAME), true);
+
+    executeClearAndRebalanceAsyncInvocations(clearCoordinatorVM, REGION_NAME, true);
+
+    // Assert that the region is empty
+    assertRegionIsEmpty(asList(accessor, server1, server2), REGION_NAME);
+
+    // Assert that the region was successfully rebalanced (a second rebalance should do no work)
+    assertRebalanceDoesNoWork();
+  }
+
+  @Test
+  @Parameters(method = "coordinatorVMsAndRegionTypes")
+  @TestCaseName("[{index}] {method}(ClearCoordinator:{0}, RegionType:{1})")
+  public void clearRegionStartedBeforeRebalanceClearsRegion(TestVM clearCoordinatorVM,
+      RegionShortcut regionType) throws InterruptedException {
+    parametrizedSetup(regionType, Collections.singleton(REGION_NAME), true);
+
+    executeClearAndRebalanceAsyncInvocations(clearCoordinatorVM, REGION_NAME, false);
+
+    // Assert that the region is empty
+    assertRegionIsEmpty(asList(accessor, server1, server2), REGION_NAME);
+
+    // Assert that the region was successfully rebalanced (a second rebalance should do no work)
+    assertRebalanceDoesNoWork();
+  }
+
+  @Test
+  @Parameters(method = "coordinatorVMsAndRegionTypes")
+  @TestCaseName("[{index}] {method}(ClearCoordinator:{0}, RegionType:{1})")
+  public void clearParentColocatedRegionStartedAfterRebalanceOfColocatedRegionsClearsRegionAndDoesNotInterfereWithRebalance(
+      TestVM clearCoordinatorVM, RegionShortcut regionType)
+      throws InterruptedException {
+    parametrizedSetup(regionType, asList(REGION_NAME, COLOCATED_REGION), true);
+
+    executeClearAndRebalanceAsyncInvocations(clearCoordinatorVM, REGION_NAME, true);
+
+    // Assert that the parent region is empty
+    assertRegionIsEmpty(asList(accessor, server1, server2), REGION_NAME);
+
+    // Assert that the colocated region is the correct size
+    assertRegionIsNotEmpty(asList(accessor, server1, server2), COLOCATED_REGION);
+
+    // Assert that the regions were successfully rebalanced (a second rebalance should do no work)
+    assertRebalanceDoesNoWork();
+  }
+
+  @Test
+  @Parameters(method = "coordinatorVMsAndRegionTypes")
+  @TestCaseName("[{index}] {method}(ClearCoordinator:{0}, RegionType:{1})")
+  public void clearParentColocatedRegionStartedBeforeRebalanceOfColocatedRegionsClearsRegionAndDoesNotInterfereWithRebalance(
+      TestVM clearCoordinatorVM, RegionShortcut regionType)
+      throws InterruptedException {
+    parametrizedSetup(regionType, asList(REGION_NAME, COLOCATED_REGION), true);
+
+    executeClearAndRebalanceAsyncInvocations(clearCoordinatorVM, REGION_NAME, false);
+
+    // Assert that the parent region is empty
+    assertRegionIsEmpty(asList(accessor, server1, server2), REGION_NAME);
+
+    // Assert that the colocated region is the correct size
+    assertRegionIsNotEmpty(asList(accessor, server1, server2), COLOCATED_REGION);
+
+    // Assert that the regions were successfully rebalanced (a second rebalance should do no work)
+    assertRebalanceDoesNoWork();
+  }
+
+  @Test
+  @Parameters(method = "coordinatorVMsAndRegionTypes")
+  @TestCaseName("[{index}] {method}(ClearCoordinator:{0}, RegionType:{1})")
+  public void clearChildColocatedRegionStartedAfterRebalanceOfColocatedRegionsClearsRegionAndDoesNotInterfereWithRebalance(
+      TestVM clearCoordinatorVM, RegionShortcut regionType)
+      throws InterruptedException {
+    parametrizedSetup(regionType, asList(REGION_NAME, COLOCATED_REGION), true);
+
+    executeClearAndRebalanceAsyncInvocations(clearCoordinatorVM, COLOCATED_REGION, true);
+
+    // Assert that the colocated region is empty
+    assertRegionIsEmpty(asList(accessor, server1, server2), COLOCATED_REGION);
+
+    // Assert that the parent region is the correct size
+    assertRegionIsNotEmpty(asList(accessor, server1, server2), REGION_NAME);
+
+    // Assert that the regions were successfully rebalanced (a second rebalance should do no work)
+    assertRebalanceDoesNoWork();
+  }
+
+  @Test
+  @Parameters(method = "coordinatorVMsAndRegionTypes")
+  @TestCaseName("[{index}] {method}(ClearCoordinator:{0}, RegionType:{1})")
+  public void clearChildColocatedRegionStartedBeforeRebalanceOfColocatedRegionsClearsRegionAndDoesNotInterfereWithRebalance(
+      TestVM clearCoordinatorVM, RegionShortcut regionType)
+      throws InterruptedException {
+    parametrizedSetup(regionType, asList(REGION_NAME, COLOCATED_REGION), true);
+
+    executeClearAndRebalanceAsyncInvocations(clearCoordinatorVM, COLOCATED_REGION, false);
+
+    // Assert that the colocated region is empty
+    assertRegionIsEmpty(asList(accessor, server1, server2), COLOCATED_REGION);
+
+    // Assert that the parent region is the correct size
+    assertRegionIsNotEmpty(asList(accessor, server1, server2), REGION_NAME);
+
+    // Assert that the regions were successfully rebalanced (a second rebalance should do no work)
+    assertRebalanceDoesNoWork();
+  }
+
+  @Test
+  @Parameters(method = "coordinatorVMsAndRegionTypesNoAccessor")
+  @TestCaseName("[{index}] {method}(ClearCoordinator:{0}, RegionType:{1})")
+  public void clearStartedBeforeRebalanceClearsRegionWhenNonCoordinatorMemberIsKilled(
+      TestVM clearCoordinatorVM, RegionShortcut regionType)
+      throws InterruptedException {
+    parametrizedSetup(regionType, Collections.singleton(REGION_NAME), false);
+
+    getVM(clearCoordinatorVM.vmNumber).invoke(() -> setBlackboardSignallerCacheWriter(REGION_NAME));
+
+    // Make server3 shut down when it receives the signal from the blackboard that clear has started
+    AsyncInvocation<?> shutdownInvocation =
+        server3.invokeAsync(this::prepareMemberToShutdownOnClear);
+
+    executeClearAndRebalanceAsyncInvocations(clearCoordinatorVM, REGION_NAME, false);
+
+    shutdownInvocation.await();
+
+    // Assert that the region is empty
+    assertRegionIsEmpty(asList(server1, server2), REGION_NAME);
+  }
+
+  @Test
+  @Parameters(method = "coordinatorVMsAndRegionTypesNoAccessor")
+  @TestCaseName("[{index}] {method}(ClearCoordinator:{0}, RegionType:{1})")
+  public void clearStartedAfterRebalanceClearsRegionWhenNewMemberJoins(TestVM clearCoordinatorVM,
+      RegionShortcut regionType) throws InterruptedException {
+
+    // Load the data on server1 before creating the region on other servers, to create an imbalanced
+    // system
+    server1.invoke(() -> {
+      initDataStore(regionType, Collections.singleton(REGION_NAME));
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      IntStream.range(0, ENTRIES).forEach(i -> region.put("key" + i, "value" + i));
+    });
+    server2.invoke(() -> initDataStore(regionType, Collections.singleton(REGION_NAME)));
+
+    // Wait for rebalance to start, then create the region on server3
+    AsyncInvocation<?> createRegion = server3.invokeAsync(() -> {
+      cacheRule.createCache();
+
+      PartitionAttributesFactory<String, String> attributesFactory =
+          new PartitionAttributesFactory<String, String>()
+              .setTotalNumBuckets(BUCKETS)
+              .setStartupRecoveryDelay(-1);
+
+      RegionFactory<String, String> factory = cacheRule.getCache()
+          .<String, String>createRegionFactory(regionType)
+          .setPartitionAttributes(attributesFactory.create())
+          .setCacheWriter(new BlackboardSignaller());
+
+      if (regionType.isPersistent()) {
+        factory.setDiskStoreName(cacheRule.getCache()
+            .createDiskStoreFactory()
+            .create(REGION_NAME + DISK_STORE_SUFFIX)
+            .getName());
+      }
+
+      getBlackboard().waitForGate(REBALANCE_HAS_BEGUN, GeodeAwaitility.getTimeout().toMillis(),
+          TimeUnit.MILLISECONDS);
+
+      factory.create(REGION_NAME);
+    });
+
+    executeClearAndRebalanceAsyncInvocations(clearCoordinatorVM, REGION_NAME, true);
+
+    createRegion.await();
+
+    // Assert that the region is empty
+    assertRegionIsEmpty(asList(server1, server2, server3), REGION_NAME);
+  }
+
+
+  @Test
+  @Parameters(method = "coordinatorVMsAndRegionTypesNoAccessor")
+  @TestCaseName("[{index}] {method}(ClearCoordinator:{0}, RegionType:{1})")
+  public void clearStartedBeforeRebalanceClearsRegionWhenNewMemberJoins(TestVM clearCoordinatorVM,
+      RegionShortcut regionType) throws InterruptedException {
+
+    // Load the data on server1 before creating the region on other servers, to create an imbalanced
+    // system
+    server1.invoke(() -> {
+      initDataStore(regionType, Collections.singleton(REGION_NAME));
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      IntStream.range(0, ENTRIES).forEach(i -> region.put("key" + i, "value" + i));
+    });
+
+    server2.invoke(() -> initDataStore(regionType, Collections.singleton(REGION_NAME)));
+
+    // Wait for clear to start, then create the region on server3
+    AsyncInvocation<?> createRegion = server3.invokeAsync(() -> {
+      cacheRule.createCache();
+
+      PartitionAttributesFactory<String, String> attributesFactory =
+          new PartitionAttributesFactory<String, String>()
+              .setTotalNumBuckets(BUCKETS)
+              .setStartupRecoveryDelay(-1);
+
+      RegionFactory<String, String> factory = cacheRule.getCache()
+          .<String, String>createRegionFactory(regionType)
+          .setPartitionAttributes(attributesFactory.create())
+          .setCacheWriter(new BlackboardSignaller());
+
+      if (regionType.isPersistent()) {
+        factory.setDiskStoreName(cacheRule.getCache()
+            .createDiskStoreFactory()
+            .create(REGION_NAME + DISK_STORE_SUFFIX)
+            .getName());
+      }
+
+      getBlackboard().waitForGate(CLEAR_HAS_BEGUN, GeodeAwaitility.getTimeout().toMillis(),
+          TimeUnit.MILLISECONDS);
+
+      factory.create(REGION_NAME);
+    });
+
+    executeClearAndRebalanceAsyncInvocations(clearCoordinatorVM, REGION_NAME, false);
+
+    createRegion.await();
+
+    // Assert that the region is empty
+    assertRegionIsEmpty(asList(server1, server2, server3), REGION_NAME);
+  }
+
+  public static class BlackboardSignaller extends CacheWriterAdapter<String, String> {
+    @Override
+    public synchronized void beforeRegionClear(RegionEvent<String, String> event)
+        throws CacheWriterException {
+      getBlackboard().signalGate(CLEAR_HAS_BEGUN);
+    }
+  }
+}
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java b/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java
index 4e30d64..f7c5c7f 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java
@@ -279,15 +279,11 @@ public class ColocationHelper {
   }
 
   /**
-   * An utility method to retrieve all partitioned regions(excluding self) in a colocation chain<br>
+   * A utility method to retrieve all partitioned regions(excluding self) in a colocation chain<br>
    * <p>
-   * For example, shipmentPR is colocated with orderPR and orderPR is colocated with customerPR <br>
-   * <br>
-   * getAllColocationRegions(customerPR) --> List{orderPR, shipmentPR}<br>
-   * getAllColocationRegions(orderPR) --> List{customerPR, shipmentPR}<br>
-   * getAllColocationRegions(shipmentPR) --> List{customerPR, orderPR}<br>
    *
-   * @return List of all partitioned regions (excluding self) in a colocated chain
+   * @return Map<String, PartitionedRegion> of all partitioned regions (excluding self) in a
+   *         colocated chain. Keys are the full paths of the PartitionedRegion values.
    * @since GemFire 5.8Beta
    */
   public static Map<String, PartitionedRegion> getAllColocationRegions(
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
index 5a0621d..1c9d5b2 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
@@ -382,7 +382,8 @@ public class PartitionedRegionClear {
   }
 
   protected void assignAllPrimaryBuckets() {
-    PartitionRegionHelper.assignBucketsToPartitions(partitionedRegion);
+    PartitionedRegion leader = ColocationHelper.getLeaderRegion(partitionedRegion);
+    PartitionRegionHelper.assignBucketsToPartitions(leader);
   }
 
   protected void handleClearFromDepartedMember(InternalDistributedMember departedMember) {


[geode] 16/22: GEODE-7670: PR Clear with Concurrent Ops DUnitTests (#4848)

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 7d6c0504077a03d9411396ac8736cf7b962b0575
Author: Juan José Ramos <ju...@users.noreply.github.com>
AuthorDate: Wed Jul 22 09:35:59 2020 +0100

    GEODE-7670: PR Clear with Concurrent Ops DUnitTests (#4848)
    
    Added distributed tests to verify that the clear operation on
    Partitioned Regions works as expected when there are other
    concurrent operations happening on the cache (put, putAll, get,
    remove, removeAll, members added and members removed).
---
 ...gionClearWithConcurrentOperationsDUnitTest.java | 747 +++++++++++++++++++++
 1 file changed, 747 insertions(+)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java
new file mode 100644
index 0000000..fdb91c7
--- /dev/null
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java
@@ -0,0 +1,747 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.internal.util.ArrayUtils.asList;
+import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.apache.geode.test.dunit.VM.getVM;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+import java.io.Serializable;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.IntStream;
+
+import junitparams.JUnitParamsRunner;
+import junitparams.Parameters;
+import junitparams.naming.TestCaseName;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import org.apache.geode.ForcedDisconnectException;
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.PartitionAttributes;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.PartitionedRegionPartialClearException;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.partition.PartitionRegionHelper;
+import org.apache.geode.distributed.DistributedSystemDisconnectedException;
+import org.apache.geode.distributed.internal.ClusterDistributionManager;
+import org.apache.geode.distributed.internal.DMStats;
+import org.apache.geode.distributed.internal.DistributionMessage;
+import org.apache.geode.distributed.internal.DistributionMessageObserver;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.membership.api.MembershipManagerHelper;
+import org.apache.geode.internal.cache.versions.RegionVersionHolder;
+import org.apache.geode.internal.cache.versions.RegionVersionVector;
+import org.apache.geode.internal.cache.versions.VersionSource;
+import org.apache.geode.test.awaitility.GeodeAwaitility;
+import org.apache.geode.test.dunit.AsyncInvocation;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.rules.CacheRule;
+import org.apache.geode.test.dunit.rules.DistributedRule;
+
+/**
+ * Tests to verify that {@link PartitionedRegion#clear()} operation can be executed multiple times
+ * on the same region while other cache operations are being executed concurrently and members are
+ * added or removed.
+ */
+@RunWith(JUnitParamsRunner.class)
+public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements Serializable {
+  private static final Integer BUCKETS = 13;
+  private static final String REGION_NAME = "PartitionedRegion";
+  private static final String TEST_CASE_NAME =
+      "[{index}] {method}(Coordinator:{0}, RegionType:{1})";
+
+  @Rule
+  public DistributedRule distributedRule = new DistributedRule(3);
+
+  @Rule
+  public CacheRule cacheRule = CacheRule.builder().createCacheInAll().build();
+
+  private VM server1;
+  private VM server2;
+  private VM accessor;
+
+  private enum TestVM {
+    ACCESSOR(0), SERVER1(1), SERVER2(2);
+
+    final int vmNumber;
+
+    TestVM(int vmNumber) {
+      this.vmNumber = vmNumber;
+    }
+  }
+
+  static RegionShortcut[] regionTypes() {
+    return new RegionShortcut[] {
+        RegionShortcut.PARTITION, RegionShortcut.PARTITION_REDUNDANT
+    };
+  }
+
+  @SuppressWarnings("unused")
+  static TestVM[] coordinators() {
+    return new TestVM[] {
+        TestVM.SERVER1, TestVM.ACCESSOR
+    };
+  }
+
+  @SuppressWarnings("unused")
+  static Object[] coordinatorsAndRegionTypes() {
+    ArrayList<Object[]> parameters = new ArrayList<>();
+    RegionShortcut[] regionShortcuts = regionTypes();
+
+    Arrays.stream(regionShortcuts).forEach(regionShortcut -> {
+      parameters.add(new Object[] {TestVM.SERVER1, regionShortcut});
+      parameters.add(new Object[] {TestVM.ACCESSOR, regionShortcut});
+    });
+
+    return parameters.toArray();
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    server1 = getVM(TestVM.SERVER1.vmNumber);
+    server2 = getVM(TestVM.SERVER2.vmNumber);
+    accessor = getVM(TestVM.ACCESSOR.vmNumber);
+  }
+
+  private void initAccessor(RegionShortcut regionShortcut) {
+    PartitionAttributes<String, String> attrs = new PartitionAttributesFactory<String, String>()
+        .setTotalNumBuckets(BUCKETS)
+        .setLocalMaxMemory(0)
+        .create();
+
+    cacheRule.getCache().createRegionFactory(regionShortcut)
+        .setPartitionAttributes(attrs)
+        .create(REGION_NAME);
+
+  }
+
+  private void initDataStore(RegionShortcut regionShortcut) {
+    PartitionAttributes<String, String> attrs = new PartitionAttributesFactory<String, String>()
+        .setTotalNumBuckets(BUCKETS)
+        .create();
+
+    cacheRule.getCache().createRegionFactory(regionShortcut)
+        .setPartitionAttributes(attrs)
+        .create(REGION_NAME);
+  }
+
+  private void parametrizedSetup(RegionShortcut regionShortcut) {
+    server1.invoke(() -> initDataStore(regionShortcut));
+    server2.invoke(() -> initDataStore(regionShortcut));
+    accessor.invoke(() -> initAccessor(regionShortcut));
+  }
+
+  private void waitForSilence() {
+    DMStats dmStats = cacheRule.getSystem().getDistributionManager().getStats();
+    PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME);
+    PartitionedRegionStats partitionedRegionStats = region.getPrStats();
+
+    await().untilAsserted(() -> {
+      assertThat(dmStats.getReplyWaitsInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getVolunteeringInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getBucketCreatesInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getPrimaryTransfersInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getRebalanceBucketCreatesInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getRebalancePrimaryTransfersInProgress()).isEqualTo(0);
+    });
+  }
+
+  /**
+   * Populates the region and verifies the data on the selected VMs.
+   */
+  private void populateRegion(VM feeder, int entryCount, List<VM> vms) {
+    feeder.invoke(() -> {
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      IntStream.range(0, entryCount).forEach(i -> region.put(String.valueOf(i), "Value_" + i));
+    });
+
+    vms.forEach(vm -> vm.invoke(() -> {
+      waitForSilence();
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+
+      IntStream.range(0, entryCount)
+          .forEach(i -> assertThat(region.get(String.valueOf(i))).isEqualTo("Value_" + i));
+    }));
+  }
+
+  /**
+   * Asserts that the RegionVersionVectors for both buckets are consistent.
+   *
+   * @param bucketId Id of the bucket to compare.
+   * @param bucketDump1 First bucketDump.
+   * @param bucketDump2 Second bucketDump.
+   */
+  private void assertRegionVersionVectorsConsistency(int bucketId, BucketDump bucketDump1,
+      BucketDump bucketDump2) {
+    RegionVersionVector<?> rvv1 = bucketDump1.getRvv();
+    RegionVersionVector<?> rvv2 = bucketDump2.getRvv();
+
+    if (rvv1 == null) {
+      assertThat(rvv2)
+          .as("Bucket " + bucketId + " has an RVV on member " + bucketDump2.getMember()
+              + ", but does not on member " + bucketDump1.getMember())
+          .isNull();
+    }
+
+    if (rvv2 == null) {
+      assertThat(rvv1)
+          .as("Bucket " + bucketId + " has an RVV on member " + bucketDump1.getMember()
+              + ", but does not on member " + bucketDump2.getMember())
+          .isNull();
+    }
+
+    assertThat(rvv1).isNotNull();
+    assertThat(rvv2).isNotNull();
+    Map<VersionSource<?>, RegionVersionHolder<?>> rvv2Members =
+        new HashMap<>(rvv1.getMemberToVersion());
+    Map<VersionSource<?>, RegionVersionHolder<?>> rvv1Members =
+        new HashMap<>(rvv1.getMemberToVersion());
+    for (Map.Entry<VersionSource<?>, RegionVersionHolder<?>> entry : rvv1Members.entrySet()) {
+      VersionSource<?> memberId = entry.getKey();
+      RegionVersionHolder<?> versionHolder1 = entry.getValue();
+      RegionVersionHolder<?> versionHolder2 = rvv2Members.remove(memberId);
+      assertThat(versionHolder1)
+          .as("RegionVersionVector for bucket " + bucketId + " on member " + bucketDump1.getMember()
+              + " is not consistent with member " + bucketDump2.getMember())
+          .isEqualTo(versionHolder2);
+    }
+  }
+
+  /**
+   * Asserts that the region data is consistent across buckets.
+   */
+  private void assertRegionBucketsConsistency() throws ForceReattemptException {
+    List<BucketDump> bucketDumps;
+    PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME);
+    // Redundant copies + 1 primary.
+    int expectedCopies = region.getRedundantCopies() + 1;
+
+    for (int bId = 0; bId < BUCKETS; bId++) {
+      final int bucketId = bId;
+      bucketDumps = region.getAllBucketEntries(bucketId);
+      assertThat(bucketDumps.size())
+          .as("Bucket " + bucketId + " should have " + expectedCopies + " copies, but has "
+              + bucketDumps.size())
+          .isEqualTo(expectedCopies);
+
+      // Check that all copies of the bucket have the same data.
+      if (bucketDumps.size() > 1) {
+        BucketDump firstDump = bucketDumps.get(0);
+
+        for (int j = 1; j < bucketDumps.size(); j++) {
+          BucketDump otherDump = bucketDumps.get(j);
+          assertRegionVersionVectorsConsistency(bucketId, firstDump, otherDump);
+
+          await().untilAsserted(() -> assertThat(otherDump.getValues())
+              .as("Values for bucket " + bucketId + " on member " + otherDump.getMember()
+                  + " are not consistent with member " + firstDump.getMember())
+              .isEqualTo(firstDump.getValues()));
+
+          await().untilAsserted(() -> assertThat(otherDump.getVersions())
+              .as("Versions for bucket " + bucketId + " on member " + otherDump.getMember()
+                  + " are not consistent with member " + firstDump.getMember())
+              .isEqualTo(firstDump.getVersions()));
+        }
+      }
+    }
+  }
+
+  /**
+   * Continuously execute get operations on the PartitionedRegion for the given durationInMillis.
+   */
+  private void executeGets(final int numEntries, final long durationInMillis) {
+    Cache cache = cacheRule.getCache();
+    Region<String, String> region = cache.getRegion(REGION_NAME);
+    Instant finishTime = Instant.now().plusMillis(durationInMillis);
+
+    while (Instant.now().isBefore(finishTime)) {
+      // Region might have been cleared in between, that's why we check for null.
+      IntStream.range(0, numEntries).forEach(i -> {
+        Optional<String> nullableValue = Optional.ofNullable(region.get(String.valueOf(i)));
+        nullableValue.ifPresent(value -> assertThat(value).isEqualTo("Value_" + i));
+      });
+    }
+  }
+
+  /**
+   * Continuously execute put operations on the PartitionedRegion for the given durationInMillis.
+   */
+  private void executePuts(final int numEntries, final long durationInMillis) {
+    Cache cache = cacheRule.getCache();
+    Region<String, String> region = cache.getRegion(REGION_NAME);
+    Instant finishTime = Instant.now().plusMillis(durationInMillis);
+
+    while (Instant.now().isBefore(finishTime)) {
+      IntStream.range(0, numEntries).forEach(i -> region.put(String.valueOf(i), "Value_" + i));
+    }
+  }
+
+  /**
+   * Continuously execute putAll operations on the PartitionedRegion for the given
+   * durationInMillis.
+   */
+  private void executePutAlls(final int startKey, final int finalKey, final long durationInMillis) {
+    Cache cache = cacheRule.getCache();
+    Map<String, String> valuesToInsert = new HashMap<>();
+    Region<String, String> region = cache.getRegion(REGION_NAME);
+    IntStream.range(startKey, finalKey)
+        .forEach(i -> valuesToInsert.put(String.valueOf(i), "Value_" + i));
+    Instant finishTime = Instant.now().plusMillis(durationInMillis);
+
+    while (Instant.now().isBefore(finishTime)) {
+      region.putAll(valuesToInsert);
+    }
+  }
+
+  /**
+   * Continuously execute remove operations on the PartitionedRegion for the given
+   * durationInMillis.
+   */
+  private void executeRemoves(final int numEntries, final long durationInMillis) {
+    Cache cache = cacheRule.getCache();
+    Region<String, String> region = cache.getRegion(REGION_NAME);
+    Instant finishTime = Instant.now().plusMillis(durationInMillis);
+
+    while (Instant.now().isBefore(finishTime)) {
+      // Region might have been cleared in between, that's why we check for null.
+      IntStream.range(0, numEntries).forEach(i -> {
+        Optional<String> nullableValue = Optional.ofNullable(region.remove(String.valueOf(i)));
+        nullableValue.ifPresent(value -> assertThat(value).isEqualTo("Value_" + i));
+      });
+    }
+  }
+
+  /**
+   * Continuously execute removeAll operations on the PartitionedRegion for the given
+   * durationInMillis.
+   */
+  private void executeRemoveAlls(final int startKey, final int finalKey,
+      final long durationInMillis) {
+    Cache cache = cacheRule.getCache();
+    List<String> keysToRemove = new ArrayList<>();
+    Region<String, String> region = cache.getRegion(REGION_NAME);
+    IntStream.range(startKey, finalKey).forEach(i -> keysToRemove.add(String.valueOf(i)));
+    Instant finishTime = Instant.now().plusMillis(durationInMillis);
+
+    while (Instant.now().isBefore(finishTime)) {
+      region.removeAll(keysToRemove);
+    }
+  }
+
+  /**
+   * Execute the clear operation and retry until success.
+   */
+  private void executeClearWithRetry(VM coordinator) {
+    coordinator.invoke(() -> {
+      boolean retry;
+
+      do {
+        retry = false;
+
+        try {
+          cacheRule.getCache().getRegion(REGION_NAME).clear();
+        } catch (PartitionedRegionPartialClearException pce) {
+          retry = true;
+        }
+
+      } while (retry);
+    });
+  }
+
+  /**
+   * Continuously execute clear operations on the PartitionedRegion every periodInMillis for the
+   * given
+   * durationInMillis.
+   */
+  private void executeClears(final long durationInMillis, final long periodInMillis)
+      throws InterruptedException {
+    Cache cache = cacheRule.getCache();
+    AtomicLong invocationCount = new AtomicLong(0);
+    Region<String, String> region = cache.getRegion(REGION_NAME);
+    Long minimumInvocationCount = (durationInMillis / periodInMillis);
+    ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
+    ScheduledFuture<?> scheduledFuture = executor.scheduleWithFixedDelay(() -> {
+      region.clear();
+      invocationCount.incrementAndGet();
+    }, 0, periodInMillis, TimeUnit.MILLISECONDS);
+
+    await().untilAsserted(
+        () -> assertThat(invocationCount.get()).isGreaterThanOrEqualTo(minimumInvocationCount));
+    scheduledFuture.cancel(false);
+    executor.shutdown();
+    executor.awaitTermination(GeodeAwaitility.getTimeout().getSeconds(), TimeUnit.SECONDS);
+  }
+
+  /**
+   * The test does the following (clear coordinator and regionType are parametrized):
+   * - Launches one thread per VM to continuously execute removes, puts and gets for a given time.
+   * - Clears the Partition Region continuously every X milliseconds for a given time.
+   * - Asserts that, after the clears have finished, the Region Buckets are consistent across
+   * members.
+   */
+  @Test
+  @TestCaseName(TEST_CASE_NAME)
+  @Parameters(method = "coordinatorsAndRegionTypes")
+  public void clearWithConcurrentPutGetRemoveShouldWorkCorrectly(TestVM coordinatorVM,
+      RegionShortcut regionShortcut) throws InterruptedException {
+    final int entries = 15000;
+    final int workMillis = 60000;
+    parametrizedSetup(regionShortcut);
+
+    // Let all VMs continuously execute puts and gets for 60 seconds.
+    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
+        server1.invokeAsync(() -> executePuts(entries, workMillis)),
+        server2.invokeAsync(() -> executeGets(entries, workMillis)),
+        accessor.invokeAsync(() -> executeRemoves(entries, workMillis)));
+
+    // Clear the region every second for 60 seconds.
+    getVM(coordinatorVM.vmNumber).invoke(() -> executeClears(workMillis, 1000));
+
+    // Let asyncInvocations finish.
+    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
+      asyncInvocation.await();
+    }
+
+    // Assert Region Buckets are consistent.
+    asList(accessor, server1, server2).forEach(vm -> vm.invoke(this::waitForSilence));
+    accessor.invoke(this::assertRegionBucketsConsistency);
+  }
+
+  /**
+   * The test does the following (clear coordinator and regionType are parametrized):
+   * - Launches two threads per VM to continuously execute putAll and removeAll for a given time.
+   * - Clears the Partition Region continuously every X milliseconds for a given time.
+   * - Asserts that, after the clears have finished, the Region Buckets are consistent across
+   * members.
+   */
+  @Test
+  @TestCaseName(TEST_CASE_NAME)
+  @Parameters(method = "coordinatorsAndRegionTypes")
+  public void clearWithConcurrentPutAllRemoveAllShouldWorkCorrectly(TestVM coordinatorVM,
+      RegionShortcut regionShortcut) throws InterruptedException {
+    final int workMillis = 15000;
+    parametrizedSetup(regionShortcut);
+
+    // Let all VMs continuously execute putAll and removeAll for 15 seconds.
+    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
+        server1.invokeAsync(() -> executePutAlls(0, 2000, workMillis)),
+        server1.invokeAsync(() -> executeRemoveAlls(0, 2000, workMillis)),
+        server2.invokeAsync(() -> executePutAlls(2000, 4000, workMillis)),
+        server2.invokeAsync(() -> executeRemoveAlls(2000, 4000, workMillis)),
+        accessor.invokeAsync(() -> executePutAlls(4000, 6000, workMillis)),
+        accessor.invokeAsync(() -> executeRemoveAlls(4000, 6000, workMillis)));
+
+    // Clear the region every half second for 15 seconds.
+    getVM(coordinatorVM.vmNumber).invoke(() -> executeClears(workMillis, 500));
+
+    // Let asyncInvocations finish.
+    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
+      asyncInvocation.await();
+    }
+
+    // Assert Region Buckets are consistent.
+    asList(accessor, server1, server2).forEach(vm -> vm.invoke(this::waitForSilence));
+    accessor.invoke(this::assertRegionBucketsConsistency);
+  }
+
+  /**
+   * The test does the following (regionType is parametrized):
+   * - Populates the Partition Region.
+   * - Verifies that the entries are synchronized on all members.
+   * - Sets the {@link MemberKiller} as a {@link DistributionMessageObserver} to stop the
+   * coordinator VM while the clear is in progress.
+   * - Clears the Partition Region (at this point the coordinator is restarted).
+   * - Asserts that, after the member joins again, the Region Buckets are consistent.
+   */
+  @Test
+  @TestCaseName("[{index}] {method}(RegionType:{0})")
+  @Parameters(method = "regionTypes")
+  public void clearShouldFailWhenCoordinatorMemberIsBounced(RegionShortcut regionShortcut) {
+    final int entries = 1000;
+    parametrizedSetup(regionShortcut);
+    populateRegion(accessor, entries, asList(accessor, server1, server2));
+
+    // Set the CoordinatorMemberKiller and try to clear the region
+    server1.invoke(() -> {
+      DistributionMessageObserver.setInstance(new MemberKiller(true));
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      assertThatThrownBy(region::clear)
+          .isInstanceOf(DistributedSystemDisconnectedException.class)
+          .hasCauseInstanceOf(ForcedDisconnectException.class);
+    });
+
+    // Wait for member to get back online and assign all buckets.
+    server1.invoke(() -> {
+      cacheRule.createCache();
+      initDataStore(regionShortcut);
+      await().untilAsserted(
+          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
+      PartitionRegionHelper.assignBucketsToPartitions(cacheRule.getCache().getRegion(REGION_NAME));
+    });
+
+    // Assert Region Buckets are consistent.
+    asList(accessor, server1, server2).forEach(vm -> vm.invoke(this::waitForSilence));
+    accessor.invoke(this::assertRegionBucketsConsistency);
+  }
+
+  /**
+   * The test does the following (clear coordinator is chosen through parameters):
+   * - Populates the Partition Region.
+   * - Verifies that the entries are synchronized on all members.
+   * - Sets the {@link MemberKiller} as a {@link DistributionMessageObserver} to stop a
+   * non-coordinator VM while the clear is in progress (the member has primary buckets, though, so
+   * participates on the clear operation).
+   * - Launches two threads per VM to continuously execute gets, puts and removes for a given time.
+   * - Clears the Partition Region (at this point the non-coordinator is restarted).
+   * - Asserts that, after the clear has finished, the Region Buckets are consistent across members.
+   */
+  @Test
+  @Parameters(method = "coordinators")
+  @TestCaseName("[{index}] {method}(Coordinator:{0})")
+  public void clearOnRedundantPartitionRegionWithConcurrentPutGetRemoveShouldWorkCorrectlyWhenNonCoordinatorMembersAreBounced(
+      TestVM coordinatorVM) throws InterruptedException {
+    final int entries = 7500;
+    final int workMillis = 30000;
+    parametrizedSetup(RegionShortcut.PARTITION_REDUNDANT);
+    populateRegion(accessor, entries, asList(accessor, server1, server2));
+    server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
+
+    // Let all VMs (except the one to kill) continuously execute gets, put and removes for 30"
+    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
+        server1.invokeAsync(() -> executeGets(entries, workMillis)),
+        server1.invokeAsync(() -> executePuts(entries, workMillis)),
+        accessor.invokeAsync(() -> executeGets(entries, workMillis)),
+        accessor.invokeAsync(() -> executeRemoves(entries, workMillis)));
+
+    // Retry the clear operation on the region until success (server2 will go down, but other
+    // members will eventually become primary for those buckets previously hosted by server2).
+    executeClearWithRetry(getVM(coordinatorVM.vmNumber));
+
+    // Wait for member to get back online.
+    server2.invoke(() -> {
+      cacheRule.createCache();
+      initDataStore(RegionShortcut.PARTITION_REDUNDANT);
+      await().untilAsserted(
+          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
+    });
+
+    // Let asyncInvocations finish.
+    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
+      asyncInvocation.await();
+    }
+
+    // Assert Region Buckets are consistent.
+    asList(accessor, server1, server2).forEach(vm -> vm.invoke(this::waitForSilence));
+    accessor.invoke(this::assertRegionBucketsConsistency);
+  }
+
+  /**
+   * The test does the following (clear coordinator is chosen through parameters):
+   * - Populates the Partition Region.
+   * - Verifies that the entries are synchronized on all members.
+   * - Sets the {@link MemberKiller} as a {@link DistributionMessageObserver} to stop a
+   * non-coordinator VM while the clear is in progress (the member has primary buckets, though, so
+   * participates on the clear operation).
+   * - Launches two threads per VM to continuously execute gets, puts and removes for a given time.
+   * - Clears the Partition Region (at this point the non-coordinator is restarted).
+   * - Asserts that the clear operation failed with PartitionedRegionPartialClearException (primary
+   * buckets on the the restarted members are not available).
+   */
+  @Test
+  @Parameters(method = "coordinators")
+  @TestCaseName("[{index}] {method}(Coordinator:{0})")
+  public void clearOnNonRedundantPartitionRegionWithConcurrentPutGetRemoveShouldFailWhenNonCoordinatorMembersAreBounced(
+      TestVM coordinatorVM) throws InterruptedException {
+    final int entries = 7500;
+    final int workMillis = 30000;
+    parametrizedSetup(RegionShortcut.PARTITION);
+    populateRegion(accessor, entries, asList(accessor, server1, server2));
+    server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
+
+    // Let all VMs (except the one to kill) continuously execute gets, put and removes for 30"
+    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
+        server1.invokeAsync(() -> executeGets(entries, workMillis)),
+        server1.invokeAsync(() -> executePuts(entries, workMillis)),
+        accessor.invokeAsync(() -> executeGets(entries, workMillis)),
+        accessor.invokeAsync(() -> executeRemoves(entries, workMillis)));
+
+    // Clear the region.
+    getVM(coordinatorVM.vmNumber).invoke(() -> {
+      assertThatThrownBy(() -> cacheRule.getCache().getRegion(REGION_NAME).clear())
+          .isInstanceOf(PartitionedRegionPartialClearException.class);
+    });
+
+    // Let asyncInvocations finish.
+    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
+      asyncInvocation.await();
+    }
+  }
+
+  /**
+   * The test does the following (clear coordinator is chosen through parameters):
+   * - Sets the {@link MemberKiller} as a {@link DistributionMessageObserver} to stop a
+   * non-coordinator VM while the clear is in progress (the member has primary buckets, though, so
+   * participates on the clear operation).
+   * - Launches one thread per VM to continuously execute putAll/removeAll for a given time.
+   * - Clears the Partition Region (at this point the non-coordinator is restarted).
+   * - Asserts that, after the clear has finished, the Region Buckets are consistent across members.
+   */
+  @Test
+  @Parameters(method = "coordinators")
+  @TestCaseName("[{index}] {method}(Coordinator:{0})")
+  public void clearOnRedundantPartitionRegionWithConcurrentPutAllRemoveAllShouldWorkCorrectlyWhenNonCoordinatorMembersAreBounced(
+      TestVM coordinatorVM) throws InterruptedException {
+    final int workMillis = 30000;
+    parametrizedSetup(RegionShortcut.PARTITION_REDUNDANT);
+    server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
+
+    // Let all VMs continuously execute putAll/removeAll for 30 seconds.
+    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
+        server1.invokeAsync(() -> executePutAlls(0, 6000, workMillis)),
+        accessor.invokeAsync(() -> executeRemoveAlls(2000, 4000, workMillis)));
+
+    // Retry the clear operation on the region until success (server2 will go down, but other
+    // members will eventually become primary for those buckets previously hosted by server2).
+    executeClearWithRetry(getVM(coordinatorVM.vmNumber));
+
+    // Wait for member to get back online.
+    server2.invoke(() -> {
+      cacheRule.createCache();
+      initDataStore(RegionShortcut.PARTITION_REDUNDANT);
+      await().untilAsserted(
+          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
+    });
+
+    // Let asyncInvocations finish.
+    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
+      asyncInvocation.await();
+    }
+
+    // Assert Region Buckets are consistent.
+    asList(accessor, server1, server2).forEach(vm -> vm.invoke(this::waitForSilence));
+    accessor.invoke(this::assertRegionBucketsConsistency);
+  }
+
+  /**
+   * The test does the following (clear coordinator is chosen through parameters):
+   * - Sets the {@link MemberKiller} as a {@link DistributionMessageObserver} to stop a
+   * non-coordinator VM while the clear is in progress (the member has primary buckets, though, so
+   * participates on the clear operation).
+   * - Launches one thread per VM to continuously execute putAll/removeAll for a given time.
+   * - Clears the Partition Region (at this point the non-coordinator is restarted).
+   * - Asserts that the clear operation failed with PartitionedRegionPartialClearException (primary
+   * buckets on the the restarted members are not available).
+   */
+  @Test
+  @Parameters(method = "coordinators")
+  @TestCaseName("[{index}] {method}(Coordinator:{0})")
+  public void clearOnNonRedundantPartitionRegionWithConcurrentPutAllRemoveAllShouldFailWhenNonCoordinatorMembersAreBounced(
+      TestVM coordinatorVM) throws InterruptedException {
+    final int workMillis = 30000;
+    parametrizedSetup(RegionShortcut.PARTITION);
+    server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
+
+    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
+        server1.invokeAsync(() -> executePutAlls(0, 6000, workMillis)),
+        accessor.invokeAsync(() -> executeRemoveAlls(2000, 4000, workMillis)));
+
+    // Clear the region.
+    getVM(coordinatorVM.vmNumber).invoke(() -> {
+      assertThatThrownBy(() -> cacheRule.getCache().getRegion(REGION_NAME).clear())
+          .isInstanceOf(PartitionedRegionPartialClearException.class);
+    });
+
+    // Let asyncInvocations finish.
+    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
+      asyncInvocation.await();
+    }
+  }
+
+  /**
+   * Shutdowns a coordinator member while the clear operation is in progress.
+   */
+  public static class MemberKiller extends DistributionMessageObserver {
+    private final boolean coordinator;
+
+    public MemberKiller(boolean coordinator) {
+      this.coordinator = coordinator;
+    }
+
+    /**
+     * Shutdowns the VM whenever the message is an instance of
+     * {@link PartitionedRegionClearMessage}.
+     */
+    private void shutdownMember(DistributionMessage message) {
+      if (message instanceof PartitionedRegionClearMessage) {
+        if (((PartitionedRegionClearMessage) message)
+            .getOp() == PartitionedRegionClearMessage.OperationType.OP_PR_CLEAR) {
+          DistributionMessageObserver.setInstance(null);
+          InternalDistributedSystem.getConnectedInstance().stopReconnectingNoDisconnect();
+          MembershipManagerHelper
+              .crashDistributedSystem(InternalDistributedSystem.getConnectedInstance());
+          await().untilAsserted(
+              () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNull());
+        }
+      }
+    }
+
+    /**
+     * Invoked only on clear coordinator VM.
+     *
+     * @param dm the distribution manager that received the message
+     * @param message The message itself
+     */
+    @Override
+    public void beforeSendMessage(ClusterDistributionManager dm, DistributionMessage message) {
+      if (coordinator) {
+        shutdownMember(message);
+      } else {
+        super.beforeSendMessage(dm, message);
+      }
+    }
+
+    /**
+     * Invoked only on non clear coordinator VM.
+     *
+     * @param dm the distribution manager that received the message
+     * @param message The message itself
+     */
+    @Override
+    public void beforeProcessMessage(ClusterDistributionManager dm, DistributionMessage message) {
+      if (!coordinator) {
+        shutdownMember(message);
+      } else {
+        super.beforeProcessMessage(dm, message);
+      }
+    }
+  }
+}


[geode] 14/22: GEODE-8334: PR.clear should sync with putAll or removeAll on rvvLock (#5365)

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 5948b185a9f73ad8e12f01d449e2f1ec4d07bc62
Author: Xiaojian Zhou <ge...@users.noreply.github.com>
AuthorDate: Fri Jul 10 08:59:26 2020 -0700

    GEODE-8334: PR.clear should sync with putAll or removeAll on rvvLock (#5365)
    
    
        Co-authored-by: Xiaojian Zhou <zh...@vmware.com>
        Co-authored-by: Anil Gingade <ag...@vmware.com>
---
 .../apache/geode/internal/cache/BucketRegion.java  |  8 +-----
 .../cache/partitioned/PutAllPRMessage.java         |  9 +++++++
 .../cache/partitioned/RemoveAllPRMessage.java      |  9 +++++++
 .../cache/partitioned/PutAllPRMessageTest.java     | 29 ++++++++++++++++++++++
 .../cache/partitioned/RemoveAllPRMessageTest.java  | 29 ++++++++++++++++++++++
 5 files changed, 77 insertions(+), 7 deletions(-)

diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
index 3329e42..454db5c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
@@ -569,12 +569,6 @@ public class BucketRegion extends DistributedRegion implements Bucket {
       return;
     }
 
-    boolean enableRVV = useRVV && getConcurrencyChecksEnabled();
-    RegionVersionVector rvv = null;
-    if (enableRVV) {
-      rvv = getVersionVector().getCloneForTransmission();
-    }
-
     // get rvvLock
     Set<InternalDistributedMember> participants =
         getCacheDistributionAdvisor().adviseInvalidateRegion();
@@ -588,7 +582,7 @@ public class BucketRegion extends DistributedRegion implements Bucket {
       // no need to dominate my own rvv.
       // Clear is on going here, there won't be GII for this member
       clearRegionLocally(regionEvent, cacheWrite, null);
-      distributeClearOperation(regionEvent, rvv, participants);
+      distributeClearOperation(regionEvent, null, participants);
 
       // TODO: call reindexUserDataRegion if there're lucene indexes
     } finally {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PutAllPRMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PutAllPRMessage.java
index 0c690c5..6bb666c 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PutAllPRMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PutAllPRMessage.java
@@ -413,6 +413,7 @@ public class PutAllPRMessage extends PartitionMessageWithDirectReply {
       Object[] keys = getKeysToBeLocked();
       if (!notificationOnly) {
         boolean locked = false;
+        boolean rvvLocked = false;
         try {
           if (putAllPRData.length > 0) {
             if (this.posDup && bucketRegion.getConcurrencyChecksEnabled()) {
@@ -438,6 +439,10 @@ public class PutAllPRMessage extends PartitionMessageWithDirectReply {
             bucketRegion.recordBulkOpStart(membershipID, eventID);
           }
           locked = bucketRegion.waitUntilLocked(keys);
+          if (!rvvLocked) {
+            bucketRegion.lockRVVForBulkOp();
+            rvvLocked = true;
+          }
           boolean lockedForPrimary = false;
           final HashMap succeeded = new HashMap();
           PutAllPartialResult partialKeys = new PutAllPartialResult(putAllPRDataSize);
@@ -518,6 +523,10 @@ public class PutAllPRMessage extends PartitionMessageWithDirectReply {
         } catch (RegionDestroyedException e) {
           ds.checkRegionDestroyedOnBucket(bucketRegion, true, e);
         } finally {
+          if (rvvLocked) {
+            bucketRegion.unlockRVVForBulkOp();
+            rvvLocked = false;
+          }
           if (locked) {
             bucketRegion.removeAndNotifyKeys(keys);
           }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RemoveAllPRMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RemoveAllPRMessage.java
index 6f355d6..f295136 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RemoveAllPRMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RemoveAllPRMessage.java
@@ -406,6 +406,7 @@ public class RemoveAllPRMessage extends PartitionMessageWithDirectReply {
 
       if (!notificationOnly) {
         boolean locked = false;
+        boolean rvvLocked = false;
         try {
           if (removeAllPRData.length > 0) {
             if (this.posDup && bucketRegion.getConcurrencyChecksEnabled()) {
@@ -431,6 +432,10 @@ public class RemoveAllPRMessage extends PartitionMessageWithDirectReply {
             bucketRegion.recordBulkOpStart(membershipID, eventID);
           }
           locked = bucketRegion.waitUntilLocked(keys);
+          if (!rvvLocked) {
+            bucketRegion.lockRVVForBulkOp();
+            rvvLocked = true;
+          }
           boolean lockedForPrimary = false;
           final ArrayList<Object> succeeded = new ArrayList<Object>();
           PutAllPartialResult partialKeys = new PutAllPartialResult(removeAllPRDataSize);
@@ -526,6 +531,10 @@ public class RemoveAllPRMessage extends PartitionMessageWithDirectReply {
         } catch (RegionDestroyedException e) {
           ds.checkRegionDestroyedOnBucket(bucketRegion, true, e);
         } finally {
+          if (rvvLocked) {
+            bucketRegion.unlockRVVForBulkOp();
+            rvvLocked = false;
+          }
           if (locked) {
             bucketRegion.removeAndNotifyKeys(keys);
           }
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PutAllPRMessageTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PutAllPRMessageTest.java
index ab82a93..f5480a5 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PutAllPRMessageTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PutAllPRMessageTest.java
@@ -15,9 +15,11 @@
 package org.apache.geode.internal.cache.partitioned;
 
 
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.eq;
 import static org.mockito.Mockito.RETURNS_DEEP_STUBS;
+import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.inOrder;
 import static org.mockito.Mockito.mock;
@@ -119,4 +121,31 @@ public class PutAllPRMessageTest {
         eq(regionDestroyedException));
   }
 
+  @Test
+  public void rvvLockedAfterKeysAreLockedAndUnlockRVVBeforeKeys() throws Exception {
+    PutAllPRMessage message = spy(new PutAllPRMessage(bucketId, 1, false, false, false, null));
+    message.addEntry(entryData);
+    doReturn(keys).when(message).getKeysToBeLocked();
+    when(bucketRegion.waitUntilLocked(keys)).thenReturn(true);
+    when(bucketRegion.doLockForPrimary(false)).thenThrow(new PrimaryBucketException());
+    doNothing().when(bucketRegion).lockRVVForBulkOp();
+    doNothing().when(bucketRegion).unlockRVVForBulkOp();
+
+    InternalCache cache = mock(InternalCache.class);
+    InternalDistributedSystem ids = mock(InternalDistributedSystem.class);
+    when(bucketRegion.getCache()).thenReturn(cache);
+    when(cache.getDistributedSystem()).thenReturn(ids);
+    when(ids.getOffHeapStore()).thenReturn(null);
+
+    assertThatThrownBy(
+        () -> message.doLocalPutAll(partitionedRegion, mock(InternalDistributedMember.class), 1))
+            .isInstanceOf(PrimaryBucketException.class);
+
+    InOrder inOrder = inOrder(bucketRegion);
+    inOrder.verify(bucketRegion).waitUntilLocked(keys);
+    inOrder.verify(bucketRegion).lockRVVForBulkOp();
+    inOrder.verify(bucketRegion).unlockRVVForBulkOp();
+    inOrder.verify(bucketRegion).removeAndNotifyKeys(keys);
+  }
+
 }
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/RemoveAllPRMessageTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/RemoveAllPRMessageTest.java
index 2309cb0..a3ee31b 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/RemoveAllPRMessageTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/RemoveAllPRMessageTest.java
@@ -14,9 +14,11 @@
  */
 package org.apache.geode.internal.cache.partitioned;
 
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.eq;
 import static org.mockito.Mockito.RETURNS_DEEP_STUBS;
+import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.inOrder;
 import static org.mockito.Mockito.mock;
@@ -131,4 +133,31 @@ public class RemoveAllPRMessageTest {
     verify(dataStore).checkRegionDestroyedOnBucket(eq(bucketRegion), eq(true),
         eq(regionDestroyedException));
   }
+
+  @Test
+  public void rvvLockedAfterKeysAreLockedAndUnlockRVVBeforeKeys() throws Exception {
+    RemoveAllPRMessage message =
+        spy(new RemoveAllPRMessage(bucketId, 1, false, false, false, null));
+    message.addEntry(entryData);
+    doReturn(keys).when(message).getKeysToBeLocked();
+    when(bucketRegion.waitUntilLocked(keys)).thenReturn(true);
+    when(bucketRegion.doLockForPrimary(false)).thenThrow(new PrimaryBucketException());
+    doNothing().when(bucketRegion).lockRVVForBulkOp();
+    doNothing().when(bucketRegion).unlockRVVForBulkOp();
+
+    InternalCache cache = mock(InternalCache.class);
+    InternalDistributedSystem ids = mock(InternalDistributedSystem.class);
+    when(bucketRegion.getCache()).thenReturn(cache);
+    when(cache.getDistributedSystem()).thenReturn(ids);
+    when(ids.getOffHeapStore()).thenReturn(null);
+
+    assertThatThrownBy(() -> message.doLocalRemoveAll(partitionedRegion,
+        mock(InternalDistributedMember.class), true)).isInstanceOf(PrimaryBucketException.class);
+
+    InOrder inOrder = inOrder(bucketRegion);
+    inOrder.verify(bucketRegion).waitUntilLocked(keys);
+    inOrder.verify(bucketRegion).lockRVVForBulkOp();
+    inOrder.verify(bucketRegion).unlockRVVForBulkOp();
+    inOrder.verify(bucketRegion).removeAndNotifyKeys(keys);
+  }
 }


[geode] 04/22: GEODE-7912: cacheWriter should be triggered when PR.clear (#4882)

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 647b8e0938939d225a7f2cc16f1671b400355e02
Author: Xiaojian Zhou <ge...@users.noreply.github.com>
AuthorDate: Mon Mar 30 19:34:35 2020 -0700

    GEODE-7912: cacheWriter should be triggered when PR.clear (#4882)
    
    
            Co-authored-by: Anil <ag...@pivotal.io>
            Co-authored-by: Xiaojian Zhou <gz...@pivotal.io>
---
 .../cache/PartitionedRegionClearDUnitTest.java     | 228 +++++++++++++++++----
 .../apache/geode/internal/cache/LocalRegion.java   |   4 +-
 .../geode/internal/cache/PartitionedRegion.java    |  56 +++--
 3 files changed, 223 insertions(+), 65 deletions(-)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java
index fb2a81b..a5a22b9 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java
@@ -20,6 +20,7 @@ import static org.apache.geode.test.dunit.rules.ClusterStartupRule.getClientCach
 import static org.assertj.core.api.Assertions.assertThat;
 
 import java.io.Serializable;
+import java.util.HashMap;
 import java.util.Properties;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.stream.IntStream;
@@ -30,13 +31,15 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 
+import org.apache.geode.cache.CacheWriterException;
 import org.apache.geode.cache.InterestResultPolicy;
 import org.apache.geode.cache.PartitionAttributesFactory;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionEvent;
+import org.apache.geode.cache.RegionFactory;
 import org.apache.geode.cache.RegionShortcut;
 import org.apache.geode.cache.client.ClientRegionShortcut;
-import org.apache.geode.cache.util.CacheListenerAdapter;
+import org.apache.geode.cache.util.CacheWriterAdapter;
 import org.apache.geode.test.dunit.SerializableCallableIF;
 import org.apache.geode.test.dunit.rules.ClientVM;
 import org.apache.geode.test.dunit.rules.ClusterStartupRule;
@@ -68,12 +71,6 @@ public class PartitionedRegionClearDUnitTest implements Serializable {
         c -> c.withPoolSubscription(true).withLocatorConnection((locatorPort)));
     client2 = cluster.startClientVM(6,
         c -> c.withPoolSubscription(true).withLocatorConnection((locatorPort)));
-    dataStore1.invoke(this::initDataStore);
-    dataStore2.invoke(this::initDataStore);
-    dataStore3.invoke(this::initDataStore);
-    accessor.invoke(this::initAccessor);
-    client1.invoke(this::initClientCache);
-    client2.invoke(this::initClientCache);
   }
 
   protected RegionShortcut getRegionShortCut() {
@@ -104,14 +101,18 @@ public class PartitionedRegionClearDUnitTest implements Serializable {
     region.registerInterestForAllKeys(InterestResultPolicy.KEYS);
   }
 
-  private void initDataStore() {
-    getCache().createRegionFactory(getRegionShortCut())
-        .setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(10).create())
-        .addCacheListener(new CountingCacheListener())
-        .create(REGION_NAME);
+  private void initDataStore(boolean withWriter) {
+    RegionFactory factory = getCache().createRegionFactory(getRegionShortCut())
+        .setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(10).create());
+    if (withWriter) {
+      factory.setCacheWriter(new CountingCacheWriter());
+    }
+    factory.create(REGION_NAME);
+    clearsByRegion = new HashMap<>();
+    destroysByRegion = new HashMap<>();
   }
 
-  private void initAccessor() {
+  private void initAccessor(boolean withWriter) {
     RegionShortcut shortcut = getRegionShortCut();
     if (shortcut.isPersistent()) {
       if (shortcut == RegionShortcut.PARTITION_PERSISTENT) {
@@ -126,12 +127,16 @@ public class PartitionedRegionClearDUnitTest implements Serializable {
         fail("Wrong region type:" + shortcut);
       }
     }
-    getCache().createRegionFactory(shortcut)
+    RegionFactory factory = getCache().createRegionFactory(shortcut)
         .setPartitionAttributes(
             new PartitionAttributesFactory().setTotalNumBuckets(10).setLocalMaxMemory(0).create())
-        .setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(10).create())
-        .addCacheListener(new CountingCacheListener())
-        .create(REGION_NAME);
+        .setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(10).create());
+    if (withWriter) {
+      factory.setCacheWriter(new CountingCacheWriter());
+    }
+    factory.create(REGION_NAME);
+    clearsByRegion = new HashMap<>();
+    destroysByRegion = new HashMap<>();
   }
 
   private void feed(boolean isClient) {
@@ -152,45 +157,148 @@ public class PartitionedRegionClearDUnitTest implements Serializable {
     // client2.invoke(()->verifyRegionSize(true, expectedNum));
   }
 
-  private void verifyCacheListenerTriggerCount(MemberVM serverVM) {
-    SerializableCallableIF<Integer> getListenerTriggerCount = () -> {
-      CountingCacheListener countingCacheListener =
-          (CountingCacheListener) getRegion(false).getAttributes()
-              .getCacheListeners()[0];
-      return countingCacheListener.getClears();
-    };
+  SerializableCallableIF<Integer> getWriterClears = () -> {
+    int clears =
+        clearsByRegion.get(REGION_NAME) == null ? 0 : clearsByRegion.get(REGION_NAME).get();
+    return clears;
+  };
 
-    int count = accessor.invoke(getListenerTriggerCount)
-        + dataStore1.invoke(getListenerTriggerCount)
-        + dataStore2.invoke(getListenerTriggerCount)
-        + dataStore3.invoke(getListenerTriggerCount);
-    assertThat(count).isEqualTo(1);
+  SerializableCallableIF<Integer> getWriterDestroys = () -> {
+    int destroys =
+        destroysByRegion.get(REGION_NAME) == null ? 0 : destroysByRegion.get(REGION_NAME).get();
+    return destroys;
+  };
 
-    if (serverVM != null) {
-      assertThat(serverVM.invoke(getListenerTriggerCount)).isEqualTo(1);
-    }
+  void configureServers(boolean dataStoreWithWriter, boolean accessorWithWriter) {
+    dataStore1.invoke(() -> initDataStore(dataStoreWithWriter));
+    dataStore2.invoke(() -> initDataStore(dataStoreWithWriter));
+    dataStore3.invoke(() -> initDataStore(dataStoreWithWriter));
+    accessor.invoke(() -> initAccessor(accessorWithWriter));
+    // make sure only datastore3 has cacheWriter
+    dataStore1.invoke(() -> {
+      Region region = getRegion(false);
+      region.getAttributesMutator().setCacheWriter(null);
+    });
+    dataStore2.invoke(() -> {
+      Region region = getRegion(false);
+      region.getAttributesMutator().setCacheWriter(null);
+    });
+  }
+
+  @Test
+  public void normalClearFromDataStoreWithWriterOnDataStore() {
+    configureServers(true, true);
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
+    accessor.invoke(() -> feed(false));
+    verifyServerRegionSize(NUM_ENTRIES);
+    dataStore3.invoke(() -> getRegion(false).clear());
+    verifyServerRegionSize(0);
+
+    // do the region destroy to compare that the same callbacks will be triggered
+    dataStore3.invoke(() -> {
+      Region region = getRegion(false);
+      region.destroyRegion();
+    });
+
+    assertThat(dataStore1.invoke(getWriterDestroys)).isEqualTo(dataStore1.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(dataStore2.invoke(getWriterDestroys)).isEqualTo(dataStore2.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(dataStore3.invoke(getWriterDestroys)).isEqualTo(dataStore3.invoke(getWriterClears))
+        .isEqualTo(1);
+    assertThat(accessor.invoke(getWriterDestroys)).isEqualTo(accessor.invoke(getWriterClears))
+        .isEqualTo(0);
   }
 
   @Test
-  public void normalClearFromDataStore() {
+  public void normalClearFromDataStoreWithoutWriterOnDataStore() {
+    configureServers(false, true);
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
     accessor.invoke(() -> feed(false));
     verifyServerRegionSize(NUM_ENTRIES);
     dataStore1.invoke(() -> getRegion(false).clear());
     verifyServerRegionSize(0);
-    verifyCacheListenerTriggerCount(dataStore1);
+
+    // do the region destroy to compare that the same callbacks will be triggered
+    dataStore1.invoke(() -> {
+      Region region = getRegion(false);
+      region.destroyRegion();
+    });
+
+    assertThat(dataStore1.invoke(getWriterDestroys)).isEqualTo(dataStore1.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(dataStore2.invoke(getWriterDestroys)).isEqualTo(dataStore2.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(dataStore3.invoke(getWriterDestroys)).isEqualTo(dataStore3.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(accessor.invoke(getWriterDestroys)).isEqualTo(accessor.invoke(getWriterClears))
+        .isEqualTo(1);
   }
 
   @Test
-  public void normalClearFromAccessor() {
+  public void normalClearFromAccessorWithWriterOnDataStore() {
+    configureServers(true, true);
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
     accessor.invoke(() -> feed(false));
     verifyServerRegionSize(NUM_ENTRIES);
     accessor.invoke(() -> getRegion(false).clear());
     verifyServerRegionSize(0);
-    verifyCacheListenerTriggerCount(accessor);
+
+    // do the region destroy to compare that the same callbacks will be triggered
+    accessor.invoke(() -> {
+      Region region = getRegion(false);
+      region.destroyRegion();
+    });
+
+    assertThat(dataStore1.invoke(getWriterDestroys)).isEqualTo(dataStore1.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(dataStore2.invoke(getWriterDestroys)).isEqualTo(dataStore2.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(dataStore3.invoke(getWriterDestroys)).isEqualTo(dataStore3.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(accessor.invoke(getWriterDestroys)).isEqualTo(accessor.invoke(getWriterClears))
+        .isEqualTo(1);
+  }
+
+  @Test
+  public void normalClearFromAccessorWithoutWriterButWithWriterOnDataStore() {
+    configureServers(true, false);
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
+    accessor.invoke(() -> feed(false));
+    verifyServerRegionSize(NUM_ENTRIES);
+    accessor.invoke(() -> getRegion(false).clear());
+    verifyServerRegionSize(0);
+
+    // do the region destroy to compare that the same callbacks will be triggered
+    accessor.invoke(() -> {
+      Region region = getRegion(false);
+      region.destroyRegion();
+    });
+
+    assertThat(dataStore1.invoke(getWriterDestroys)).isEqualTo(dataStore1.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(dataStore2.invoke(getWriterDestroys)).isEqualTo(dataStore2.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(dataStore3.invoke(getWriterDestroys)).isEqualTo(dataStore3.invoke(getWriterClears))
+        .isEqualTo(1);
+    assertThat(accessor.invoke(getWriterDestroys)).isEqualTo(accessor.invoke(getWriterClears))
+        .isEqualTo(0);
   }
 
   @Test
   public void normalClearFromClient() {
+    configureServers(true, false);
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
     client1.invoke(() -> feed(true));
     verifyClientRegionSize(NUM_ENTRIES);
     verifyServerRegionSize(NUM_ENTRIES);
@@ -198,21 +306,53 @@ public class PartitionedRegionClearDUnitTest implements Serializable {
     client1.invoke(() -> getRegion(true).clear());
     verifyServerRegionSize(0);
     verifyClientRegionSize(0);
-    verifyCacheListenerTriggerCount(null);
+
+    // do the region destroy to compare that the same callbacks will be triggered
+    client1.invoke(() -> {
+      Region region = getRegion(true);
+      region.destroyRegion();
+    });
+
+    assertThat(dataStore1.invoke(getWriterDestroys)).isEqualTo(dataStore1.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(dataStore2.invoke(getWriterDestroys)).isEqualTo(dataStore2.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(dataStore3.invoke(getWriterDestroys)).isEqualTo(dataStore3.invoke(getWriterClears))
+        .isEqualTo(1);
+    assertThat(accessor.invoke(getWriterDestroys)).isEqualTo(accessor.invoke(getWriterClears))
+        .isEqualTo(0);
   }
 
-  private static class CountingCacheListener extends CacheListenerAdapter {
-    private final AtomicInteger clears = new AtomicInteger();
+  public static HashMap<String, AtomicInteger> clearsByRegion = new HashMap<>();
+  public static HashMap<String, AtomicInteger> destroysByRegion = new HashMap<>();
 
+  private static class CountingCacheWriter extends CacheWriterAdapter {
     @Override
-    public void afterRegionClear(RegionEvent event) {
+    public void beforeRegionClear(RegionEvent event) throws CacheWriterException {
       Region region = event.getRegion();
-      logger.info("Region " + region.getFullPath() + " is cleared.");
-      clears.incrementAndGet();
+      AtomicInteger clears = clearsByRegion.get(region.getName());
+      if (clears == null) {
+        clears = new AtomicInteger(1);
+        clearsByRegion.put(region.getName(), clears);
+      } else {
+        clears.incrementAndGet();
+      }
+      logger
+          .info("Region " + region.getName() + " will be cleared, clear count is:" + clears.get());
     }
 
-    int getClears() {
-      return clears.get();
+    @Override
+    public void beforeRegionDestroy(RegionEvent event) throws CacheWriterException {
+      Region region = event.getRegion();
+      AtomicInteger destroys = destroysByRegion.get(region.getName());
+      if (destroys == null) {
+        destroys = new AtomicInteger(1);
+        destroysByRegion.put(region.getName(), destroys);
+      } else {
+        destroys.incrementAndGet();
+      }
+      logger.info(
+          "Region " + region.getName() + " will be destroyed, destroy count is:" + destroys.get());
     }
   }
 }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
index 4268786..d96c6f8 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
@@ -3000,7 +3000,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
   /**
    * @since GemFire 5.7
    */
-  private void serverRegionClear(RegionEventImpl regionEvent) {
+  protected void serverRegionClear(RegionEventImpl regionEvent) {
     if (regionEvent.getOperation().isDistributed()) {
       ServerRegionProxy mySRP = getServerProxy();
       if (mySRP != null) {
@@ -3119,7 +3119,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
     return result;
   }
 
-  private void cacheWriteBeforeRegionClear(RegionEventImpl event)
+  void cacheWriteBeforeRegionClear(RegionEventImpl event)
       throws CacheWriterException, TimeoutException {
     // copy into local var to prevent race condition
     CacheWriter writer = basicGetWriter();
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
index ffb01af..950ec63 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
@@ -2190,6 +2190,9 @@ public class PartitionedRegion extends LocalRegion
           throw cache.getCacheClosedException("Cache is shutting down");
         }
 
+        // do cacheWrite
+        cacheWriteBeforeRegionClear(regionEvent);
+
         // create ClearPRMessage per bucket
         List<ClearPRMessage> clearMsgList = createClearPRMessages(regionEvent.getEventId());
         for (ClearPRMessage clearPRMessage : clearMsgList) {
@@ -4455,6 +4458,26 @@ public class PartitionedRegion extends LocalRegion
     return null;
   }
 
+  boolean triggerWriter(RegionEventImpl event, SearchLoadAndWriteProcessor processor, int paction,
+      String theKey) {
+    CacheWriter localWriter = basicGetWriter();
+    Set netWriteRecipients = localWriter == null ? this.distAdvisor.adviseNetWrite() : null;
+
+    if (localWriter == null && (netWriteRecipients == null || netWriteRecipients.isEmpty())) {
+      return false;
+    }
+
+    final long start = getCachePerfStats().startCacheWriterCall();
+    try {
+      processor.initialize(this, theKey, null);
+      processor.doNetWrite(event, netWriteRecipients, localWriter, paction);
+      processor.release();
+    } finally {
+      getCachePerfStats().endCacheWriterCall(start);
+    }
+    return true;
+  }
+
   /**
    * This invokes a cache writer before a destroy operation. Although it has the same method
    * signature as the method in LocalRegion, it is invoked in a different code path. LocalRegion
@@ -4464,31 +4487,26 @@ public class PartitionedRegion extends LocalRegion
   @Override
   boolean cacheWriteBeforeRegionDestroy(RegionEventImpl event)
       throws CacheWriterException, TimeoutException {
-
     if (event.getOperation().isDistributed()) {
       serverRegionDestroy(event);
-      CacheWriter localWriter = basicGetWriter();
-      Set netWriteRecipients = localWriter == null ? this.distAdvisor.adviseNetWrite() : null;
-
-      if (localWriter == null && (netWriteRecipients == null || netWriteRecipients.isEmpty())) {
-        return false;
-      }
-
-      final long start = getCachePerfStats().startCacheWriterCall();
-      try {
-        SearchLoadAndWriteProcessor processor = SearchLoadAndWriteProcessor.getProcessor();
-        processor.initialize(this, "preDestroyRegion", null);
-        processor.doNetWrite(event, netWriteRecipients, localWriter,
-            SearchLoadAndWriteProcessor.BEFOREREGIONDESTROY);
-        processor.release();
-      } finally {
-        getCachePerfStats().endCacheWriterCall(start);
-      }
-      return true;
+      SearchLoadAndWriteProcessor processor = SearchLoadAndWriteProcessor.getProcessor();
+      return triggerWriter(event, processor, SearchLoadAndWriteProcessor.BEFOREREGIONDESTROY,
+          "preDestroyRegion");
     }
     return false;
   }
 
+  @Override
+  void cacheWriteBeforeRegionClear(RegionEventImpl event)
+      throws CacheWriterException, TimeoutException {
+    if (event.getOperation().isDistributed()) {
+      serverRegionClear(event);
+      SearchLoadAndWriteProcessor processor = SearchLoadAndWriteProcessor.getProcessor();
+      triggerWriter(event, processor, SearchLoadAndWriteProcessor.BEFOREREGIONCLEAR,
+          "preClearRegion");
+    }
+  }
+
   /**
    * Test Method: Get the DistributedMember identifier for the vm containing a key
    *


[geode] 07/22: GEODE-7667: Add a 'clear' gfsh command for PR and RR clear (#4818)

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit a7adc4276ea1e1fec93f2c39f07c4b33d645b650
Author: BenjaminPerryRoss <39...@users.noreply.github.com>
AuthorDate: Tue May 5 11:40:34 2020 -0700

    GEODE-7667: Add a 'clear' gfsh command for PR and RR clear (#4818)
    
    * Added clear command and modified remove functionality to clear PR
    
    Authored-by: Benjamin Ross <br...@pivotal.io>
---
 .../geode/management/internal/i18n/CliStrings.java |  14 ++-
 .../cli/commands/ClearCommandDUnitTest.java        | 120 +++++++++++++++++++++
 .../cli/commands/RemoveCommandDUnitTest.java       |  13 ++-
 .../{RemoveCommand.java => ClearCommand.java}      |  53 ++++-----
 .../cli/commands/CommandAvailabilityIndicator.java |   1 +
 .../internal/cli/commands/RemoveCommand.java       |   9 +-
 .../internal/cli/domain/DataCommandResult.java     |  12 +++
 .../cli/functions/DataCommandFunction.java         |  23 ++--
 .../internal/cli/commands/ClearCommandTest.java    | 115 ++++++++++++++++++++
 9 files changed, 309 insertions(+), 51 deletions(-)

diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/i18n/CliStrings.java b/geode-core/src/main/java/org/apache/geode/management/internal/i18n/CliStrings.java
index c354d030..6df013e 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/i18n/CliStrings.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/i18n/CliStrings.java
@@ -811,6 +811,14 @@ public class CliStrings {
   public static final String CLEAR_DEFINED_INDEX__SUCCESS__MSG =
       "Index definitions successfully cleared";
 
+  /* clear region */
+  public static final String CLEAR_REGION = "clear region";
+  public static final String CLEAR_REGION_HELP =
+      "Clears/Removes all keys from the specified region.";
+  public static final String CLEAR_REGION_REGION_NAME = "name";
+  public static final String CLEAR_REGION_REGION_NAME_HELP = "Region to clear keys from.";
+  public static final String CLEAR_REGION_CLEARED_ALL_KEYS = "Cleared all keys in the region";
+
   /* create region */
   public static final String CREATE_REGION = "create region";
   public static final String CREATE_REGION__HELP =
@@ -1930,9 +1938,9 @@ public class CliStrings {
   public static final String REMOVE__MSG__KEY_EMPTY = "Key is Null";
   public static final String REMOVE__MSG__REGION_NOT_FOUND = "Region <{0}> Not Found";
   public static final String REMOVE__MSG__KEY_NOT_FOUND_REGION = "Key is not present in the region";
-  public static final String REMOVE__MSG__CLEARED_ALL_CLEARS = "Cleared all keys in the region";
-  public static final String REMOVE__MSG__CLEARALL_NOT_SUPPORTED_FOR_PARTITIONREGION =
-      "Option --" + REMOVE__ALL + " is not supported on partitioned region";
+  public static final String REMOVE__MSG__CLEARALL_DEPRECATION_WARNING =
+      "Warning: The --all option for the 'remove' command is deprecated. Please"
+          + " use the 'clear' command instead.";
 
   /* resume gateway-sender */
   public static final String RESUME_GATEWAYSENDER = "resume gateway-sender";
diff --git a/geode-gfsh/src/distributedTest/java/org/apache/geode/management/internal/cli/commands/ClearCommandDUnitTest.java b/geode-gfsh/src/distributedTest/java/org/apache/geode/management/internal/cli/commands/ClearCommandDUnitTest.java
new file mode 100644
index 0000000..e51fc0f
--- /dev/null
+++ b/geode-gfsh/src/distributedTest/java/org/apache/geode/management/internal/cli/commands/ClearCommandDUnitTest.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.management.internal.cli.commands;
+
+import static org.apache.geode.management.internal.cli.commands.RemoveCommand.REGION_NOT_FOUND;
+import static org.apache.geode.management.internal.i18n.CliStrings.CLEAR_REGION_CLEARED_ALL_KEYS;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import junitparams.JUnitParamsRunner;
+import junitparams.Parameters;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.CacheFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.management.internal.cli.util.CommandStringBuilder;
+import org.apache.geode.management.internal.i18n.CliStrings;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+import org.apache.geode.test.junit.rules.GfshCommandRule;
+import org.apache.geode.test.junit.rules.VMProvider;
+
+@RunWith(JUnitParamsRunner.class)
+public class ClearCommandDUnitTest {
+  private static final String REPLICATE_REGION_NAME = "replicateRegion";
+  private static final String PARTITIONED_REGION_NAME = "partitionedRegion";
+  private static final String EMPTY_STRING = "";
+  private static final int NUM_ENTRIES = 200;
+
+  @Rule
+  public ClusterStartupRule clusterStartupRule = new ClusterStartupRule();
+
+  @Rule
+  public GfshCommandRule gfsh = new GfshCommandRule();
+
+  private MemberVM locator;
+  private MemberVM server1;
+  private MemberVM server2;
+
+  @Before
+  public void setup() throws Exception {
+    locator = clusterStartupRule.startLocatorVM(0);
+    server1 = clusterStartupRule.startServerVM(1, locator.getPort());
+    server2 = clusterStartupRule.startServerVM(2, locator.getPort());
+
+    gfsh.connectAndVerify(locator);
+    gfsh.executeAndAssertThat("create region --name=" + REPLICATE_REGION_NAME + " --type=REPLICATE")
+        .statusIsSuccess();
+    gfsh.executeAndAssertThat(
+        "create region --name=" + PARTITIONED_REGION_NAME + " --type=PARTITION").statusIsSuccess();
+
+    locator.waitUntilRegionIsReadyOnExactlyThisManyServers("/" + REPLICATE_REGION_NAME, 2);
+    locator.waitUntilRegionIsReadyOnExactlyThisManyServers("/" + PARTITIONED_REGION_NAME, 2);
+
+    VMProvider.invokeInEveryMember(ClearCommandDUnitTest::populateTestRegions, server1, server2);
+  }
+
+  private static void populateTestRegions() {
+    Cache cache = CacheFactory.getAnyInstance();
+
+    Region<String, String> replicateRegion = cache.getRegion(REPLICATE_REGION_NAME);
+    replicateRegion.put(EMPTY_STRING, "valueForEmptyKey");
+    for (int i = 0; i < NUM_ENTRIES; i++) {
+      replicateRegion.put("key" + i, "value" + i);
+    }
+
+    Region<String, String> partitionedRegion = cache.getRegion(PARTITIONED_REGION_NAME);
+    replicateRegion.put(EMPTY_STRING, "valueForEmptyKey");
+    for (int i = 0; i < NUM_ENTRIES; i++) {
+      partitionedRegion.put("key" + i, "value" + i);
+    }
+  }
+
+  @Test
+  public void clearFailsWhenRegionIsNotFound() {
+    String invalidRegionName = "NotAValidRegion";
+    String command = new CommandStringBuilder(CliStrings.CLEAR_REGION)
+        .addOption(CliStrings.CLEAR_REGION_REGION_NAME, invalidRegionName).getCommandString();
+    gfsh.executeAndAssertThat(command).statusIsError()
+        .containsOutput(String.format(REGION_NOT_FOUND, "/" + invalidRegionName));
+  }
+
+  @Test
+  @Parameters({REPLICATE_REGION_NAME, PARTITIONED_REGION_NAME})
+  public void clearSucceedsWithValidRegion(String regionName) {
+    String command = new CommandStringBuilder(CliStrings.CLEAR_REGION)
+        .addOption(CliStrings.CLEAR_REGION_REGION_NAME, regionName).getCommandString();
+
+    gfsh.executeAndAssertThat(command).statusIsSuccess();
+
+    assertThat(gfsh.getGfshOutput()).contains(CLEAR_REGION_CLEARED_ALL_KEYS);
+
+    server1.invoke(() -> verifyAllKeysAreRemoved(regionName));
+    server2.invoke(() -> verifyAllKeysAreRemoved(regionName));
+  }
+
+  private static void verifyAllKeysAreRemoved(String regionName) {
+    Region region = getRegion(regionName);
+    assertThat(region.size()).isEqualTo(0);
+  }
+
+  private static Region getRegion(String regionName) {
+    return CacheFactory.getAnyInstance().getRegion(regionName);
+  }
+}
diff --git a/geode-gfsh/src/distributedTest/java/org/apache/geode/management/internal/cli/commands/RemoveCommandDUnitTest.java b/geode-gfsh/src/distributedTest/java/org/apache/geode/management/internal/cli/commands/RemoveCommandDUnitTest.java
index 92a65ec..2a88d64 100644
--- a/geode-gfsh/src/distributedTest/java/org/apache/geode/management/internal/cli/commands/RemoveCommandDUnitTest.java
+++ b/geode-gfsh/src/distributedTest/java/org/apache/geode/management/internal/cli/commands/RemoveCommandDUnitTest.java
@@ -16,6 +16,7 @@ package org.apache.geode.management.internal.cli.commands;
 
 import static org.apache.geode.cache.Region.SEPARATOR;
 import static org.apache.geode.management.internal.cli.commands.RemoveCommand.REGION_NOT_FOUND;
+import static org.apache.geode.management.internal.i18n.CliStrings.CLEAR_REGION_CLEARED_ALL_KEYS;
 import static org.assertj.core.api.Assertions.assertThat;
 
 import org.junit.Before;
@@ -25,6 +26,7 @@ import org.junit.Test;
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheFactory;
 import org.apache.geode.cache.Region;
+import org.apache.geode.management.internal.i18n.CliStrings;
 import org.apache.geode.test.dunit.rules.ClusterStartupRule;
 import org.apache.geode.test.dunit.rules.MemberVM;
 import org.apache.geode.test.junit.rules.GfshCommandRule;
@@ -128,7 +130,8 @@ public class RemoveCommandDUnitTest {
     gfsh.executeAndAssertThat("list regions").statusIsSuccess();
     gfsh.executeAndAssertThat(command).statusIsSuccess();
 
-    assertThat(gfsh.getGfshOutput()).contains("Cleared all keys in the region");
+    assertThat(gfsh.getGfshOutput()).contains(CLEAR_REGION_CLEARED_ALL_KEYS)
+        .contains(CliStrings.REMOVE__MSG__CLEARALL_DEPRECATION_WARNING);
 
     server1.invoke(() -> verifyAllKeysAreRemoved(REPLICATE_REGION_NAME));
     server2.invoke(() -> verifyAllKeysAreRemoved(REPLICATE_REGION_NAME));
@@ -139,11 +142,13 @@ public class RemoveCommandDUnitTest {
   public void removeAllFromPartitionedRegion() {
     String command = "remove --all --region=" + PARTITIONED_REGION_NAME;
 
-    // Maybe this should return an "error" status, but the current behavior is status "OK"
     gfsh.executeAndAssertThat(command).statusIsSuccess();
 
-    assertThat(gfsh.getGfshOutput())
-        .contains("Option --all is not supported on partitioned region");
+    assertThat(gfsh.getGfshOutput()).contains(CLEAR_REGION_CLEARED_ALL_KEYS)
+        .contains(CliStrings.REMOVE__MSG__CLEARALL_DEPRECATION_WARNING);;
+
+    server1.invoke(() -> verifyAllKeysAreRemoved(PARTITIONED_REGION_NAME));
+    server2.invoke(() -> verifyAllKeysAreRemoved(PARTITIONED_REGION_NAME));
   }
 
   /**
diff --git a/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/RemoveCommand.java b/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/ClearCommand.java
similarity index 63%
copy from geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/RemoveCommand.java
copy to geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/ClearCommand.java
index c15f01f..75ac720 100644
--- a/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/RemoveCommand.java
+++ b/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/ClearCommand.java
@@ -38,59 +38,52 @@ import org.apache.geode.management.internal.i18n.CliStrings;
 import org.apache.geode.security.ResourcePermission.Operation;
 import org.apache.geode.security.ResourcePermission.Resource;
 
-public class RemoveCommand extends GfshCommand {
+public class ClearCommand extends GfshCommand {
   public static final String REGION_NOT_FOUND = "Region <%s> not found in any of the members";
 
   @CliMetaData(relatedTopic = {CliStrings.TOPIC_GEODE_DATA, CliStrings.TOPIC_GEODE_REGION})
-  @CliCommand(value = {CliStrings.REMOVE}, help = CliStrings.REMOVE__HELP)
-  public ResultModel remove(
-      @CliOption(key = {CliStrings.REMOVE__KEY}, help = CliStrings.REMOVE__KEY__HELP,
-          specifiedDefaultValue = "") String key,
-      @CliOption(key = {CliStrings.REMOVE__REGION}, mandatory = true,
-          help = CliStrings.REMOVE__REGION__HELP,
-          optionContext = ConverterHint.REGION_PATH) String regionPath,
-      @CliOption(key = CliStrings.REMOVE__ALL, help = CliStrings.REMOVE__ALL__HELP,
-          specifiedDefaultValue = "true", unspecifiedDefaultValue = "false") boolean removeAllKeys,
-      @CliOption(key = {CliStrings.REMOVE__KEYCLASS},
-          help = CliStrings.REMOVE__KEYCLASS__HELP) String keyClass) {
-    Cache cache = getCache();
+  @CliCommand(value = {CliStrings.CLEAR_REGION}, help = CliStrings.CLEAR_REGION_HELP)
+  public ResultModel clear(
+      @CliOption(key = {CliStrings.CLEAR_REGION_REGION_NAME}, mandatory = true,
+          help = CliStrings.CLEAR_REGION_REGION_NAME_HELP,
+          optionContext = ConverterHint.REGION_PATH) String regionPath) {
 
-    if (!removeAllKeys && (key == null)) {
-      return new ResultModel().createError(CliStrings.REMOVE__MSG__KEY_EMPTY);
-    }
+    Cache cache = getCache();
 
-    if (removeAllKeys) {
-      authorize(Resource.DATA, Operation.WRITE, regionPath);
-    } else {
-      authorize(Resource.DATA, Operation.WRITE, regionPath, key);
-    }
+    authorize(Resource.DATA, Operation.WRITE, regionPath);
 
-    key = DataCommandsUtils.makeBrokenJsonCompliant(key);
 
     Region region = cache.getRegion(regionPath);
-    DataCommandFunction removefn = new DataCommandFunction();
+    DataCommandFunction clearfn = createCommandFunction();
     DataCommandResult dataResult;
     if (region == null) {
       Set<DistributedMember> memberList = findAnyMembersForRegion(regionPath);
 
       if (CollectionUtils.isEmpty(memberList)) {
-        return new ResultModel().createError(String.format(REGION_NOT_FOUND, regionPath));
+        return ResultModel.createError(String.format(REGION_NOT_FOUND, regionPath));
       }
 
       DataCommandRequest request = new DataCommandRequest();
       request.setCommand(CliStrings.REMOVE);
-      request.setKey(key);
-      request.setKeyClass(keyClass);
-      request.setRemoveAllKeys(removeAllKeys ? "ALL" : null);
+      request.setRemoveAllKeys("ALL");
       request.setRegionName(regionPath);
-      dataResult = callFunctionForRegion(request, removefn, memberList);
+      dataResult = callFunctionForRegion(request, clearfn, memberList);
     } else {
-      dataResult = removefn.remove(key, keyClass, regionPath, removeAllKeys ? "ALL" : null,
+      dataResult = clearfn.remove(null, null, regionPath, "ALL",
           (InternalCache) cache);
     }
 
-    dataResult.setKeyClass(keyClass);
+    dataResult.setKeyClass(null);
 
     return dataResult.toResultModel();
   }
+
+  DataCommandResult callFunctionForRegion(DataCommandRequest request, DataCommandFunction clearfn,
+      Set<DistributedMember> memberList) {
+    return DataCommandsUtils.callFunctionForRegion(request, clearfn, memberList);
+  }
+
+  DataCommandFunction createCommandFunction() {
+    return new DataCommandFunction();
+  }
 }
diff --git a/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/CommandAvailabilityIndicator.java b/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/CommandAvailabilityIndicator.java
index ae0b7c4..50f811d 100644
--- a/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/CommandAvailabilityIndicator.java
+++ b/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/CommandAvailabilityIndicator.java
@@ -26,6 +26,7 @@ public class CommandAvailabilityIndicator extends GfshCommand {
       CliStrings.DESCRIBE_CONFIG, CliStrings.EXPORT_CONFIG, CliStrings.ALTER_RUNTIME_CONFIG,
       CliStrings.ALTER_REGION, CliStrings.CREATE_REGION, CliStrings.DESTROY_REGION,
       CliStrings.REBALANCE, CliStrings.GET, CliStrings.PUT, CliStrings.REMOVE,
+      CliStrings.CLEAR_REGION,
       CliStrings.LOCATE_ENTRY, CliStrings.QUERY, CliStrings.IMPORT_DATA, CliStrings.EXPORT_DATA,
       CliStrings.DEPLOY, CliStrings.UNDEPLOY, CliStrings.LIST_DEPLOYED,
       CliStrings.BACKUP_DISK_STORE, CliStrings.COMPACT_DISK_STORE, CliStrings.DESCRIBE_DISK_STORE,
diff --git a/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/RemoveCommand.java b/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/RemoveCommand.java
index c15f01f..062428d 100644
--- a/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/RemoveCommand.java
+++ b/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/RemoveCommand.java
@@ -90,7 +90,14 @@ public class RemoveCommand extends GfshCommand {
     }
 
     dataResult.setKeyClass(keyClass);
+    ResultModel result;
 
-    return dataResult.toResultModel();
+    if (removeAllKeys) {
+      result = dataResult.toResultModel(CliStrings.REMOVE__MSG__CLEARALL_DEPRECATION_WARNING);
+    } else {
+      result = dataResult.toResultModel();
+    }
+
+    return result;
   }
 }
diff --git a/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/domain/DataCommandResult.java b/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/domain/DataCommandResult.java
index 3f00a95..6bc91cb 100644
--- a/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/domain/DataCommandResult.java
+++ b/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/domain/DataCommandResult.java
@@ -33,6 +33,7 @@ import org.apache.geode.cache.query.internal.Undefined;
 import org.apache.geode.management.cli.Result;
 import org.apache.geode.management.internal.cli.GfshParser;
 import org.apache.geode.management.internal.cli.result.model.DataResultModel;
+import org.apache.geode.management.internal.cli.result.model.InfoResultModel;
 import org.apache.geode.management.internal.cli.result.model.ResultModel;
 import org.apache.geode.management.internal.cli.result.model.TabularResultModel;
 import org.apache.geode.management.internal.i18n.CliStrings;
@@ -49,6 +50,7 @@ public class DataCommandResult implements Serializable {
 
   public static final Logger logger = LogManager.getLogger();
   public static final String DATA_INFO_SECTION = "data-info";
+  public static final String WARNING_INFO_SECTION = "header-info";
   public static final String QUERY_SECTION = "query";
   public static final String LOCATION_SECTION = "location";
   private String command;
@@ -372,6 +374,10 @@ public class DataCommandResult implements Serializable {
   }
 
   public ResultModel toResultModel() {
+    return toResultModel("");
+  }
+
+  public ResultModel toResultModel(String warningMessage) {
     if (StringUtils.isEmpty(keyClass)) {
       keyClass = "java.lang.String";
     }
@@ -381,6 +387,12 @@ public class DataCommandResult implements Serializable {
     }
 
     ResultModel result = new ResultModel();
+
+    if (warningMessage != null && !warningMessage.isEmpty()) {
+      InfoResultModel info = result.addInfo(WARNING_INFO_SECTION);
+      info.addLine(warningMessage);
+    }
+
     DataResultModel data = result.addData(DATA_INFO_SECTION);
 
     if (errorString != null) {
diff --git a/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/functions/DataCommandFunction.java b/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/functions/DataCommandFunction.java
index 17cf20f..44c84b5 100644
--- a/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/functions/DataCommandFunction.java
+++ b/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/functions/DataCommandFunction.java
@@ -29,7 +29,6 @@ import com.fasterxml.jackson.databind.ObjectMapper;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.logging.log4j.Logger;
 
-import org.apache.geode.cache.DataPolicy;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.execute.FunctionContext;
 import org.apache.geode.cache.partition.PartitionRegionHelper;
@@ -315,22 +314,20 @@ public class DataCommandFunction implements InternalFunction<DataCommandRequest>
               CliStrings.REMOVE__MSG__KEY_NOT_FOUND_REGION, false);
         }
       } else {
-        DataPolicy policy = region.getAttributes().getDataPolicy();
-        if (!policy.withPartitioning()) {
-          region.clear();
-          if (logger.isDebugEnabled()) {
-            logger.debug("Cleared all keys in the region - {}", regionName);
-          }
-          return DataCommandResult.createRemoveInfoResult(key, null, null,
-              CliStrings.format(CliStrings.REMOVE__MSG__CLEARED_ALL_CLEARS, regionName), true);
-        } else {
-          return DataCommandResult.createRemoveInfoResult(key, null, null,
-              CliStrings.REMOVE__MSG__CLEARALL_NOT_SUPPORTED_FOR_PARTITIONREGION, false);
-        }
+        return clear(region, regionName);
       }
     }
   }
 
+  public DataCommandResult clear(Region region, String regionName) {
+    region.clear();
+    if (logger.isDebugEnabled()) {
+      logger.debug("Cleared all keys in the region - {}", regionName);
+    }
+    return DataCommandResult.createRemoveInfoResult(null, null, null,
+        CliStrings.format(CliStrings.CLEAR_REGION_CLEARED_ALL_KEYS, regionName), true);
+  }
+
   @SuppressWarnings({"rawtypes"})
   public DataCommandResult get(Object principal, String key, String keyClass, String valueClass,
       String regionName, Boolean loadOnCacheMiss, InternalCache cache) {
diff --git a/geode-gfsh/src/test/java/org/apache/geode/management/internal/cli/commands/ClearCommandTest.java b/geode-gfsh/src/test/java/org/apache/geode/management/internal/cli/commands/ClearCommandTest.java
new file mode 100644
index 0000000..a716ef5
--- /dev/null
+++ b/geode-gfsh/src/test/java/org/apache/geode/management/internal/cli/commands/ClearCommandTest.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.management.internal.cli.commands;
+
+
+import static org.apache.geode.management.internal.cli.commands.ClearCommand.REGION_NOT_FOUND;
+import static org.apache.geode.management.internal.i18n.CliStrings.CLEAR_REGION;
+import static org.apache.geode.management.internal.i18n.CliStrings.CLEAR_REGION_REGION_NAME;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+
+import org.apache.geode.cache.Region;
+import org.apache.geode.distributed.DistributedMember;
+import org.apache.geode.internal.cache.InternalCache;
+import org.apache.geode.management.internal.cli.domain.DataCommandResult;
+import org.apache.geode.management.internal.cli.functions.DataCommandFunction;
+import org.apache.geode.management.internal.cli.result.model.ResultModel;
+import org.apache.geode.test.junit.rules.GfshParserRule;
+
+public class ClearCommandTest {
+
+  @ClassRule
+  public static GfshParserRule gfsh = new GfshParserRule();
+
+  static final String regionName = "regionName";
+  static final String success = "SUCCESS";
+
+  InternalCache cache;
+  ClearCommand command;
+  Region<Object, Object> region;
+  Set<DistributedMember> membersList;
+  DistributedMember member;
+  DataCommandResult dataResult;
+
+  @SuppressWarnings("unchecked")
+  @Before
+  public void setup() {
+    cache = mock(InternalCache.class);
+    command = spy(new ClearCommand());
+    region = mock(Region.class);
+    dataResult = mock(DataCommandResult.class);
+
+    membersList = new HashSet<>();
+    membersList.add(member);
+
+    doNothing().when(command).authorize(any(), any(), anyString());
+    doReturn(cache).when(command).getCache();
+    doReturn(membersList).when(command).findAnyMembersForRegion(anyString());
+
+    ResultModel result = ResultModel.createInfo(success);
+    doReturn(result).when(dataResult).toResultModel();
+  }
+
+  @Test
+  public void commandReturnsErrorIfRegionIsNotFound() {
+    membersList.clear();
+
+    gfsh.executeAndAssertThat(command,
+        CLEAR_REGION + " --" + CLEAR_REGION_REGION_NAME + "=/" + regionName)
+        .statusIsError().containsOutput(String.format(REGION_NOT_FOUND, "/" + regionName));
+  }
+
+  @Test
+  public void commandReturnsSuccessfullyIfRegionIsFoundOnServersButNotLocator() {
+    doReturn(dataResult).when(command).callFunctionForRegion(any(), any(), any());
+
+    gfsh.executeAndAssertThat(command,
+        CLEAR_REGION + " --" + CLEAR_REGION_REGION_NAME + "=/" + regionName)
+        .statusIsSuccess().containsOutput(success);
+
+    verify(command).callFunctionForRegion(any(), any(), any());
+  }
+
+  @Test
+  public void commandReturnsSuccessfullyIfRegionIsFoundOnLocator() {
+    DataCommandFunction dataCommandFunction = mock(DataCommandFunction.class);
+    doReturn(dataCommandFunction).when(command).createCommandFunction();
+    when(cache.getRegion("/" + regionName)).thenReturn(region);
+
+    doReturn(dataResult).when(dataCommandFunction)
+        .remove(null, null, "/" + regionName, "ALL", cache);
+
+    gfsh.executeAndAssertThat(command,
+        CLEAR_REGION + " --" + CLEAR_REGION_REGION_NAME + "=/" + regionName)
+        .statusIsSuccess().containsOutput(success);
+
+    verify(dataCommandFunction).remove(null, null, "/" + regionName,
+        "ALL", cache);
+  }
+}


[geode] 18/22: GEODE-7846: Adding Stats for Partitioned Region Clear (#5391)

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 91b19ccbc345d49483151e889b6e354634e187a7
Author: BenjaminPerryRoss <39...@users.noreply.github.com>
AuthorDate: Wed Aug 19 14:40:09 2020 -0700

    GEODE-7846: Adding Stats for Partitioned Region Clear (#5391)
    
    Added stats to CachePerfStats for PR Clear
    - Changed clears to 'regionClears' and 'bucketClears' to differentiate between the number of times the region was cleared and the number of times a bucket was cleared in a PartitionedRegion
    - Added Local and Total duration stats to record how long clear has been running for a specific region, as well as how long it was spent clearing any specific member
---
 .../cache/RegionClearStatsDistributedTest.java     |  2 +-
 .../cache/PartitionedRegionClearDUnitTest.java     | 66 ++++++++++++++++++++++
 .../geode/internal/cache/AbstractRegionMap.java    |  7 ++-
 .../geode/internal/cache/CachePerfStats.java       | 59 ++++++++++++++++---
 .../geode/internal/cache/PartitionedRegion.java    |  8 +++
 .../internal/cache/PartitionedRegionClear.java     | 14 ++++-
 .../geode/internal/cache/RegionPerfStats.java      | 12 +++-
 .../apache/geode/internal/cache/RegionStats.java   |  4 +-
 .../geode/internal/cache/CachePerfStatsTest.java   | 53 +++++++++++++----
 9 files changed, 200 insertions(+), 25 deletions(-)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/RegionClearStatsDistributedTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/RegionClearStatsDistributedTest.java
index 52a4ade..50cea82 100755
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/RegionClearStatsDistributedTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/RegionClearStatsDistributedTest.java
@@ -169,7 +169,7 @@ public class RegionClearStatsDistributedTest implements Serializable {
   }
 
   private void validateClearCountStat() {
-    assertThat(cacheRule.getCache().getCachePerfStats().getClearCount())
+    assertThat(cacheRule.getCache().getCachePerfStats().getRegionClearCount())
         .isEqualTo(EXPECTED_CLEAR_COUNT_STAT_VALUE);
   }
 }
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java
index a3b311c..b871926 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java
@@ -152,6 +152,34 @@ public class PartitionedRegionClearDUnitTest implements Serializable {
     dataStore3.invoke(() -> verifyRegionSize(false, expectedNum));
   }
 
+  private void verifyDatastoreStats(MemberVM datastore, boolean isCoordinator) {
+    datastore.invoke(() -> {
+      PartitionedRegion region = (PartitionedRegion) getRegion(false);
+      long clearCount = 0L;
+      int bucketCount = region.getDataStore().getAllLocalBucketRegions().size();
+
+      for (BucketRegion bucket : region.getDataStore().getAllLocalBucketRegions()) {
+        if (clearCount == 0) {
+          clearCount = bucket.getCachePerfStats().getBucketClearCount();
+        }
+        assertThat(bucket.getCachePerfStats().getBucketClearCount()).isEqualTo(bucketCount);
+      }
+
+      CachePerfStats stats = region.getRegionCachePerfStats();
+
+      assertThat(stats.getRegionClearCount()).isEqualTo(1);
+      assertThat(stats.getPartitionedRegionClearLocalDuration())
+          .isGreaterThan(0);
+      if (isCoordinator) {
+        assertThat(stats.getPartitionedRegionClearTotalDuration())
+            .isGreaterThan(0);
+      } else {
+        assertThat(stats.getPartitionedRegionClearTotalDuration())
+            .isEqualTo(0);
+      }
+    });
+  }
+
   private void verifyClientRegionSize(int expectedNum) {
     client1.invoke(() -> verifyRegionSize(true, expectedNum));
     // TODO: notify register clients
@@ -331,6 +359,44 @@ public class PartitionedRegionClearDUnitTest implements Serializable {
   }
 
   @Test
+  public void normalClearFromDataStoreUpdatesStats() {
+    configureServers(false, true);
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
+    // Verify no clears have been recorded in stats
+    dataStore1.invoke(() -> {
+      PartitionedRegion region = (PartitionedRegion) getRegion(false);
+
+      for (BucketRegion bucket : region.getDataStore().getAllLocalBucketRegions()) {
+        long clearCount = bucket.getCachePerfStats().getRegionClearCount();
+        assertThat(clearCount).isEqualTo(0);
+      }
+    });
+
+    accessor.invoke(() -> feed(false));
+    verifyServerRegionSize(NUM_ENTRIES);
+    dataStore1.invoke(() -> getRegion(false).clear());
+    verifyServerRegionSize(0);
+
+    // Verify the stats were properly updated for the bucket regions
+    verifyDatastoreStats(dataStore1, true);
+    verifyDatastoreStats(dataStore2, false);
+    verifyDatastoreStats(dataStore3, false);
+
+
+    // The accessor shouldn't increment the region clear count
+    accessor.invoke(() -> {
+      PartitionedRegion region = (PartitionedRegion) getRegion(false);
+
+      assertThat(region.getRegionCachePerfStats()).isNull();
+      assertThat(region.getCachePerfStats().getRegionClearCount()).isEqualTo(0);
+      assertThat(region.getCachePerfStats().getPartitionedRegionClearLocalDuration()).isEqualTo(0);
+      assertThat(region.getCachePerfStats().getPartitionedRegionClearTotalDuration()).isEqualTo(0);
+    });
+  }
+
+  @Test
   public void normalClearFromClient() {
     configureServers(true, false);
     client1.invoke(this::initClientCache);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java
index f1f765e..dc26126 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java
@@ -344,7 +344,12 @@ public abstract class AbstractRegionMap extends BaseRegionMap
     if (lr != null && !(lr instanceof HARegion)) {
       CachePerfStats stats = lr.getCachePerfStats();
       if (stats != null) {
-        stats.incClearCount();
+        if (lr.isUsedForPartitionedRegionBucket()) {
+          stats.incBucketClearCount();
+        } else {
+          stats.incRegionClearCount();
+        }
+
       }
     }
   }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java
index 5bbca52..25fdca2 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java
@@ -118,7 +118,11 @@ public class CachePerfStats {
   static final int indexUpdateInProgressId;
   static final int indexUpdateCompletedId;
   static final int indexUpdateTimeId;
-  static final int clearsId;
+  static final int bucketClearsId;
+  static final int regionClearsId;
+  static final int partitionedRegionClearLocalDurationId;
+  static final int partitionedRegionClearTotalDurationId;
+
   private static final int indexInitializationInProgressId;
   private static final int indexInitializationCompletedId;
   private static final int indexInitializationTimeId;
@@ -286,7 +290,14 @@ public class CachePerfStats {
         "Current number of regions configured for reliablity that are missing required roles with Limited access";
     final String reliableRegionsMissingNoAccessDesc =
         "Current number of regions configured for reliablity that are missing required roles with No access";
-    final String clearsDesc = "The total number of times a clear has been done on this cache.";
+    final String regionClearsDesc =
+        "The total number of times a clear has been done on this cache.";
+    final String bucketClearsDesc =
+        "The total number of times a clear has been done on this region and it's bucket regions";
+    final String partitionedRegionClearLocalDurationDesc =
+        "The time in nanoseconds partitioned region clear has been running for the region on this member";
+    final String partitionedRegionClearTotalDurationDesc =
+        "The time in nanoseconds partitioned region clear has been running for the region with this member as coordinator.";
     final String metaDataRefreshCountDesc =
         "Total number of times the meta data is refreshed due to hopping observed.";
     final String conflatedEventsDesc =
@@ -465,7 +476,12 @@ public class CachePerfStats {
             f.createIntCounter("retries",
                 "Number of times a concurrent destroy followed by a create has caused an entry operation to need to retry.",
                 "operations"),
-            f.createLongCounter("clears", clearsDesc, "operations"),
+            f.createLongCounter("regionClears", regionClearsDesc, "operations"),
+            f.createLongCounter("bucketClears", bucketClearsDesc, "operations"),
+            f.createLongCounter("partitionedRegionClearLocalDuration",
+                partitionedRegionClearLocalDurationDesc, "nanoseconds"),
+            f.createLongCounter("partitionedRegionClearTotalDuration",
+                partitionedRegionClearTotalDurationDesc, "nanoseconds"),
             f.createIntGauge("diskTasksWaiting",
                 "Current number of disk tasks (oplog compactions, asynchronous recoveries, etc) that are waiting for a thread to run the operation",
                 "operations"),
@@ -608,7 +624,10 @@ public class CachePerfStats {
     eventsQueuedId = type.nameToId("eventsQueued");
 
     retriesId = type.nameToId("retries");
-    clearsId = type.nameToId("clears");
+    regionClearsId = type.nameToId("regionClears");
+    bucketClearsId = type.nameToId("bucketClears");
+    partitionedRegionClearLocalDurationId = type.nameToId("partitionedRegionClearLocalDuration");
+    partitionedRegionClearTotalDurationId = type.nameToId("partitionedRegionClearTotalDuration");
 
     diskTasksWaitingId = type.nameToId("diskTasksWaiting");
     evictorJobsStartedId = type.nameToId("evictorJobsStarted");
@@ -1394,12 +1413,36 @@ public class CachePerfStats {
     };
   }
 
-  public long getClearCount() {
-    return stats.getLong(clearsId);
+  public long getRegionClearCount() {
+    return stats.getLong(regionClearsId);
+  }
+
+  public long getBucketClearCount() {
+    return stats.getLong(bucketClearsId);
+  }
+
+  public long getPartitionedRegionClearLocalDuration() {
+    return stats.getLong(partitionedRegionClearLocalDurationId);
+  }
+
+  public long getPartitionedRegionClearTotalDuration() {
+    return stats.getLong(partitionedRegionClearTotalDurationId);
+  }
+
+  public void incRegionClearCount() {
+    stats.incLong(regionClearsId, 1L);
+  }
+
+  public void incBucketClearCount() {
+    stats.incLong(bucketClearsId, 1L);
+  }
+
+  public void incPartitionedRegionClearLocalDuration(long durationNanos) {
+    stats.incLong(partitionedRegionClearLocalDurationId, durationNanos);
   }
 
-  public void incClearCount() {
-    stats.incLong(clearsId, 1L);
+  public void incPartitionedRegionClearTotalDuration(long durationNanos) {
+    stats.incLong(partitionedRegionClearTotalDurationId, durationNanos);
   }
 
   public long getConflatedEventsCount() {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
index 25481ae..6bfd0cf 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
@@ -574,6 +574,14 @@ public class PartitionedRegion extends LocalRegion
     return this.partitionListeners;
   }
 
+  public CachePerfStats getRegionCachePerfStats() {
+    if (dataStore != null && dataStore.getAllLocalBucketRegions().size() > 0) {
+      BucketRegion bucket = dataStore.getAllLocalBucketRegions().iterator().next();
+      return bucket.getCachePerfStats();
+    }
+    return null;
+  }
+
   /**
    * Return canonical representation for a bucket (for logging)
    *
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
index 1c9d5b2..4796a17 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
@@ -123,6 +123,7 @@ public class PartitionedRegionClear {
 
   public Set<Integer> clearRegionLocal(RegionEventImpl regionEvent) {
     Set<Integer> clearedBuckets = new HashSet<>();
+    long clearStartTime = System.nanoTime();
     setMembershipChange(false);
     // Synchronized to handle the requester departure.
     synchronized (lockForListenerAndClientNotification) {
@@ -156,6 +157,11 @@ public class PartitionedRegionClear {
           doAfterClear(regionEvent);
         } finally {
           partitionedRegion.getDataStore().unlockBucketCreationForRegionClear();
+          if (clearedBuckets.size() != 0 && partitionedRegion.getCachePerfStats() != null) {
+            partitionedRegion.getRegionCachePerfStats().incRegionClearCount();
+            partitionedRegion.getRegionCachePerfStats()
+                .incPartitionedRegionClearLocalDuration(System.nanoTime() - clearStartTime);
+          }
         }
       } else {
         // Non data-store with client queue and listener
@@ -327,10 +333,12 @@ public class PartitionedRegionClear {
 
   void doClear(RegionEventImpl regionEvent, boolean cacheWrite) {
     String lockName = CLEAR_OPERATION + partitionedRegion.getName();
+    long clearStartTime = 0;
 
     try {
       // distributed lock to make sure only one clear op is in progress in the cluster.
       acquireDistributedClearLock(lockName);
+      clearStartTime = System.nanoTime();
 
       // Force all primary buckets to be created before clear.
       assignAllPrimaryBuckets();
@@ -367,9 +375,13 @@ public class PartitionedRegionClear {
           releaseLockForClear(regionEvent);
         }
       }
-
     } finally {
       releaseDistributedClearLock(lockName);
+      CachePerfStats stats = partitionedRegion.getRegionCachePerfStats();
+      if (stats != null) {
+        partitionedRegion.getRegionCachePerfStats()
+            .incPartitionedRegionClearTotalDuration(System.nanoTime() - clearStartTime);
+      }
     }
   }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/RegionPerfStats.java b/geode-core/src/main/java/org/apache/geode/internal/cache/RegionPerfStats.java
index d3c9891..30d60bf 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/RegionPerfStats.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/RegionPerfStats.java
@@ -520,9 +520,15 @@ class RegionPerfStats extends CachePerfStats implements RegionStats {
   }
 
   @Override
-  public void incClearCount() {
-    stats.incLong(clearsId, 1L);
-    cachePerfStats.incClearCount();
+  public void incRegionClearCount() {
+    stats.incLong(regionClearsId, 1L);
+    cachePerfStats.incRegionClearCount();
+  }
+
+  @Override
+  public void incBucketClearCount() {
+    stats.incLong(bucketClearsId, 1L);
+    cachePerfStats.incBucketClearCount();
   }
 
   @Override
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/RegionStats.java b/geode-core/src/main/java/org/apache/geode/internal/cache/RegionStats.java
index 2fe6cc1..4c0e446 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/RegionStats.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/RegionStats.java
@@ -135,7 +135,9 @@ public interface RegionStats {
 
   void incEvictWorkTime(long delta);
 
-  void incClearCount();
+  void incBucketClearCount();
+
+  void incRegionClearCount();
 
   void incPRQueryRetries();
 
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/CachePerfStatsTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/CachePerfStatsTest.java
index 7a81fdd..f1f303c 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/CachePerfStatsTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/CachePerfStatsTest.java
@@ -14,9 +14,9 @@
  */
 package org.apache.geode.internal.cache;
 
+import static org.apache.geode.internal.cache.CachePerfStats.bucketClearsId;
 import static org.apache.geode.internal.cache.CachePerfStats.cacheListenerCallsCompletedId;
 import static org.apache.geode.internal.cache.CachePerfStats.cacheWriterCallsCompletedId;
-import static org.apache.geode.internal.cache.CachePerfStats.clearsId;
 import static org.apache.geode.internal.cache.CachePerfStats.createsId;
 import static org.apache.geode.internal.cache.CachePerfStats.deltaFailedUpdatesId;
 import static org.apache.geode.internal.cache.CachePerfStats.deltaFullValuesRequestedId;
@@ -43,10 +43,13 @@ import static org.apache.geode.internal.cache.CachePerfStats.loadsCompletedId;
 import static org.apache.geode.internal.cache.CachePerfStats.missesId;
 import static org.apache.geode.internal.cache.CachePerfStats.netloadsCompletedId;
 import static org.apache.geode.internal.cache.CachePerfStats.netsearchesCompletedId;
+import static org.apache.geode.internal.cache.CachePerfStats.partitionedRegionClearLocalDurationId;
+import static org.apache.geode.internal.cache.CachePerfStats.partitionedRegionClearTotalDurationId;
 import static org.apache.geode.internal.cache.CachePerfStats.putAllsId;
 import static org.apache.geode.internal.cache.CachePerfStats.putTimeId;
 import static org.apache.geode.internal.cache.CachePerfStats.putsId;
 import static org.apache.geode.internal.cache.CachePerfStats.queryExecutionsId;
+import static org.apache.geode.internal.cache.CachePerfStats.regionClearsId;
 import static org.apache.geode.internal.cache.CachePerfStats.removeAllsId;
 import static org.apache.geode.internal.cache.CachePerfStats.retriesId;
 import static org.apache.geode.internal.cache.CachePerfStats.txCommitChangesId;
@@ -428,28 +431,58 @@ public class CachePerfStatsTest {
 
   @Test
   public void getClearsDelegatesToStatistics() {
-    statistics.incLong(clearsId, Long.MAX_VALUE);
+    statistics.incLong(regionClearsId, Long.MAX_VALUE);
 
-    assertThat(cachePerfStats.getClearCount()).isEqualTo(Long.MAX_VALUE);
+    assertThat(cachePerfStats.getRegionClearCount()).isEqualTo(Long.MAX_VALUE);
   }
 
   @Test
-  public void incClearCountIncrementsClears() {
-    cachePerfStats.incClearCount();
+  public void incRegionClearCountIncrementsClears() {
+    cachePerfStats.incRegionClearCount();
 
-    assertThat(statistics.getLong(clearsId)).isEqualTo(1L);
+    assertThat(statistics.getLong(regionClearsId)).isEqualTo(1L);
+  }
+
+  @Test
+  public void incBucketClearCountIncrementsClears() {
+    cachePerfStats.incBucketClearCount();
+
+    assertThat(statistics.getLong(bucketClearsId)).isEqualTo(1L);
+  }
+
+  @Test
+  public void incPartitionedRegionClearLocalDurationIncrementsPartitionedRegionClearLocalDuration() {
+    cachePerfStats.incPartitionedRegionClearLocalDuration(100L);
+
+    assertThat(statistics.getLong(partitionedRegionClearLocalDurationId)).isEqualTo(100L);
+  }
+
+  @Test
+  public void incPartitionedRegionClearTotalDurationIncrementsPartitionedRegionClearTotalDuration() {
+    cachePerfStats.incPartitionedRegionClearTotalDuration(100L);
+
+    assertThat(statistics.getLong(partitionedRegionClearTotalDurationId)).isEqualTo(100L);
   }
 
   /**
    * Characterization test: {@code clears} currently wraps to negative from max long value.
    */
   @Test
-  public void clearsWrapsFromMaxLongToNegativeValue() {
-    statistics.incLong(clearsId, Long.MAX_VALUE);
+  public void regionClearsWrapsFromMaxLongToNegativeValue() {
+    statistics.incLong(regionClearsId, Long.MAX_VALUE);
+
+    cachePerfStats.incRegionClearCount();
+
+    assertThat(cachePerfStats.getRegionClearCount()).isNegative();
+  }
+
+  @Test
+  public void bucketClearsWrapsFromMaxLongToNegativeValue() {
+    statistics.incLong(bucketClearsId, Long.MAX_VALUE);
 
-    cachePerfStats.incClearCount();
+    cachePerfStats.incBucketClearCount();
 
-    assertThat(cachePerfStats.getClearCount()).isNegative();
+    assertThat(cachePerfStats.getBucketClearCount()).isNegative();
   }
 
   @Test


[geode] 06/22: GEODE-7676: Add PR clear with expiration tests (#4970)

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 06d77ba46f1249acdf4e9ed39a33bb5132648fac
Author: Juan José Ramos <ju...@users.noreply.github.com>
AuthorDate: Wed Apr 22 14:18:07 2020 +0100

    GEODE-7676: Add PR clear with expiration tests (#4970)
    
    Added distributed tests to verify the clear operation on Partitioned
    Regions works as expected when expiration is configured.
    
    - Added unit and distributed tests.
    - Fixed LocalRegion class to clear the entryExpiryTasks Map whenever
      the cancelAllEntryExpiryTasks method is invoked.
---
 ...titionedRegionClearWithExpirationDUnitTest.java | 516 +++++++++++++++++++++
 .../apache/geode/internal/cache/LocalRegion.java   |  11 +-
 .../geode/internal/cache/LocalRegionTest.java      |  22 +
 3 files changed, 548 insertions(+), 1 deletion(-)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithExpirationDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithExpirationDUnitTest.java
new file mode 100644
index 0000000..33301f4
--- /dev/null
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithExpirationDUnitTest.java
@@ -0,0 +1,516 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.cache.ExpirationAction.DESTROY;
+import static org.apache.geode.cache.RegionShortcut.PARTITION;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_OVERFLOW;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_PERSISTENT;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_PERSISTENT_OVERFLOW;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_REDUNDANT;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_REDUNDANT_OVERFLOW;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_REDUNDANT_PERSISTENT;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_REDUNDANT_PERSISTENT_OVERFLOW;
+import static org.apache.geode.internal.util.ArrayUtils.asList;
+import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.apache.geode.test.dunit.VM.getVM;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.IntStream;
+
+import junitparams.JUnitParamsRunner;
+import junitparams.Parameters;
+import junitparams.naming.TestCaseName;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import org.apache.geode.ForcedDisconnectException;
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.CacheWriter;
+import org.apache.geode.cache.CacheWriterException;
+import org.apache.geode.cache.ExpirationAttributes;
+import org.apache.geode.cache.PartitionAttributes;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionEvent;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.partition.PartitionRegionHelper;
+import org.apache.geode.cache.util.CacheWriterAdapter;
+import org.apache.geode.distributed.DistributedSystemDisconnectedException;
+import org.apache.geode.distributed.internal.DMStats;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.membership.api.MembershipManagerHelper;
+import org.apache.geode.test.awaitility.GeodeAwaitility;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.rules.CacheRule;
+import org.apache.geode.test.dunit.rules.DistributedDiskDirRule;
+import org.apache.geode.test.dunit.rules.DistributedRule;
+
+/**
+ * Tests to verify that {@link PartitionedRegion#clear()} cancels all remaining expiration tasks
+ * on the {@link PartitionedRegion} once the operation is executed.
+ */
+@RunWith(JUnitParamsRunner.class)
+public class PartitionedRegionClearWithExpirationDUnitTest implements Serializable {
+  private static final Integer BUCKETS = 13;
+  private static final Integer EXPIRATION_TIME = 30;
+  private static final String REGION_NAME = "PartitionedRegion";
+
+  @Rule
+  public DistributedRule distributedRule = new DistributedRule(3);
+
+  @Rule
+  public CacheRule cacheRule = CacheRule.builder().createCacheInAll().build();
+
+  @Rule
+  public DistributedDiskDirRule distributedDiskDirRule = new DistributedDiskDirRule();
+
+  private VM accessor, server1, server2;
+
+  private enum TestVM {
+    ACCESSOR(0), SERVER1(1), SERVER2(2);
+
+    final int vmNumber;
+
+    TestVM(int vmNumber) {
+      this.vmNumber = vmNumber;
+    }
+  }
+
+  @SuppressWarnings("unused")
+  static RegionShortcut[] regionTypes() {
+    return new RegionShortcut[] {
+        PARTITION,
+        PARTITION_OVERFLOW,
+        PARTITION_REDUNDANT,
+        PARTITION_REDUNDANT_OVERFLOW,
+
+        PARTITION_PERSISTENT,
+        PARTITION_PERSISTENT_OVERFLOW,
+        PARTITION_REDUNDANT_PERSISTENT,
+        PARTITION_REDUNDANT_PERSISTENT_OVERFLOW
+    };
+  }
+
+  @SuppressWarnings("unused")
+  static Object[] vmsAndRegionTypes() {
+    ArrayList<Object[]> parameters = new ArrayList<>();
+    RegionShortcut[] regionShortcuts = regionTypes();
+
+    Arrays.stream(regionShortcuts).forEach(regionShortcut -> {
+      parameters.add(new Object[] {TestVM.SERVER1, regionShortcut});
+      parameters.add(new Object[] {TestVM.ACCESSOR, regionShortcut});
+    });
+
+    return parameters.toArray();
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    server1 = getVM(TestVM.SERVER1.vmNumber);
+    server2 = getVM(TestVM.SERVER2.vmNumber);
+    accessor = getVM(TestVM.ACCESSOR.vmNumber);
+  }
+
+  private RegionShortcut getRegionAccessorShortcut(RegionShortcut dataStoreRegionShortcut) {
+    if (dataStoreRegionShortcut.isPersistent()) {
+      switch (dataStoreRegionShortcut) {
+        case PARTITION_PERSISTENT:
+          return PARTITION;
+        case PARTITION_PERSISTENT_OVERFLOW:
+          return PARTITION_OVERFLOW;
+        case PARTITION_REDUNDANT_PERSISTENT:
+          return PARTITION_REDUNDANT;
+        case PARTITION_REDUNDANT_PERSISTENT_OVERFLOW:
+          return PARTITION_REDUNDANT_OVERFLOW;
+      }
+    }
+
+    return dataStoreRegionShortcut;
+  }
+
+  private void initAccessor(RegionShortcut regionShortcut,
+      ExpirationAttributes expirationAttributes) {
+    RegionShortcut accessorShortcut = getRegionAccessorShortcut(regionShortcut);
+    PartitionAttributes<String, String> attributes =
+        new PartitionAttributesFactory<String, String>()
+            .setTotalNumBuckets(BUCKETS)
+            .setLocalMaxMemory(0)
+            .create();
+
+    cacheRule.getCache()
+        .<String, String>createRegionFactory(accessorShortcut)
+        .setPartitionAttributes(attributes)
+        .setEntryTimeToLive(expirationAttributes)
+        .setEntryIdleTimeout(expirationAttributes)
+        .create(REGION_NAME);
+  }
+
+  private void initDataStore(RegionShortcut regionShortcut,
+      ExpirationAttributes expirationAttributes) {
+    PartitionAttributes<String, String> attributes =
+        new PartitionAttributesFactory<String, String>()
+            .setTotalNumBuckets(BUCKETS)
+            .create();
+
+    cacheRule.getCache()
+        .<String, String>createRegionFactory(regionShortcut)
+        .setPartitionAttributes(attributes)
+        .setEntryTimeToLive(expirationAttributes)
+        .setEntryIdleTimeout(expirationAttributes)
+        .create(REGION_NAME);
+
+    ExpiryTask.expiryTaskListener = new ExpirationListener();
+  }
+
+  private void parametrizedSetup(RegionShortcut regionShortcut,
+      ExpirationAttributes expirationAttributes) {
+    server1.invoke(() -> initDataStore(regionShortcut, expirationAttributes));
+    server2.invoke(() -> initDataStore(regionShortcut, expirationAttributes));
+    accessor.invoke(() -> initAccessor(regionShortcut, expirationAttributes));
+  }
+
+  private void waitForSilence() {
+    DMStats dmStats = cacheRule.getSystem().getDistributionManager().getStats();
+    PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME);
+    PartitionedRegionStats partitionedRegionStats = region.getPrStats();
+
+    await().untilAsserted(() -> {
+      assertThat(dmStats.getReplyWaitsInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getVolunteeringInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getBucketCreatesInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getPrimaryTransfersInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getRebalanceBucketCreatesInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getRebalancePrimaryTransfersInProgress()).isEqualTo(0);
+    });
+  }
+
+  /**
+   * Populates the region and verifies the data on the selected VMs.
+   */
+  private void populateRegion(VM feeder, int entryCount, List<VM> vms) {
+    feeder.invoke(() -> {
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      IntStream.range(0, entryCount).forEach(i -> region.put(String.valueOf(i), "Value_" + i));
+    });
+
+    vms.forEach(vm -> vm.invoke(() -> {
+      waitForSilence();
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+
+      IntStream.range(0, entryCount)
+          .forEach(i -> assertThat(region.get(String.valueOf(i))).isEqualTo("Value_" + i));
+    }));
+  }
+
+  /**
+   * Asserts that the region is empty on requested VMs.
+   */
+  private void assertRegionIsEmpty(List<VM> vms) {
+    vms.forEach(vm -> vm.invoke(() -> {
+      waitForSilence();
+      PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME);
+
+      assertThat(region.getLocalSize()).isEqualTo(0);
+    }));
+  }
+
+  /**
+   * Asserts that the region data is consistent across buckets.
+   */
+  private void assertRegionBucketsConsistency() throws ForceReattemptException {
+    waitForSilence();
+    List<BucketDump> bucketDumps;
+    PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME);
+    // Redundant copies + 1 primary.
+    int expectedCopies = region.getRedundantCopies() + 1;
+
+    for (int bucketId = 0; bucketId < BUCKETS; bucketId++) {
+      bucketDumps = region.getAllBucketEntries(bucketId);
+      assertThat(bucketDumps.size()).as("Bucket " + bucketId + " should have " + expectedCopies
+          + " copies, but has " + bucketDumps.size()).isEqualTo(expectedCopies);
+
+      // Check that all copies of the bucket have the same data.
+      if (bucketDumps.size() > 1) {
+        BucketDump firstDump = bucketDumps.get(0);
+
+        for (int j = 1; j < bucketDumps.size(); j++) {
+          BucketDump otherDump = bucketDumps.get(j);
+          assertThat(otherDump.getValues())
+              .as("Values for bucket " + bucketId + " on member " + otherDump.getMember()
+                  + " are not consistent with member " + firstDump.getMember())
+              .isEqualTo(firstDump.getValues());
+          assertThat(otherDump.getVersions())
+              .as("Versions for bucket " + bucketId + " on member " + otherDump.getMember()
+                  + " are not consistent with member " + firstDump.getMember())
+              .isEqualTo(firstDump.getVersions());
+        }
+      }
+    }
+  }
+
+  /**
+   * Register the MemberKiller CacheWriter on the given vms.
+   */
+  private void registerVMKillerAsCacheWriter(List<VM> vmsToBounce) {
+    vmsToBounce.forEach(vm -> vm.invoke(() -> {
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      region.getAttributesMutator().setCacheWriter(new MemberKiller());
+    }));
+  }
+
+  /**
+   * The test does the following (clear coordinator and region type are parametrized):
+   * - Populates the Partition Region (entries have expiration).
+   * - Verifies that the entries are synchronized on all members.
+   * - Clears the Partition Region once.
+   * - Asserts that, after the clear is finished:
+   * . No expiration tasks were executed.
+   * . All expiration tasks were cancelled.
+   * . Map of expiry tasks per bucket is empty.
+   * . The Partition Region is empty on all members.
+   */
+  @Test
+  @Parameters(method = "vmsAndRegionTypes")
+  @TestCaseName("[{index}] {method}(Coordinator:{0}, RegionType:{1})")
+  public void clearShouldRemoveRegisteredExpirationTasks(TestVM coordinatorVM,
+      RegionShortcut regionShortcut) {
+    final int entries = 500;
+    int expirationTime = (int) GeodeAwaitility.getTimeout().getValueInMS() / 1000;
+    parametrizedSetup(regionShortcut, new ExpirationAttributes(expirationTime, DESTROY));
+    populateRegion(accessor, entries, asList(accessor, server1, server2));
+
+    // Clear the region.
+    getVM(coordinatorVM.vmNumber).invoke(() -> {
+      Cache cache = cacheRule.getCache();
+      cache.getRegion(REGION_NAME).clear();
+    });
+
+    // Assert all expiration tasks were cancelled and none were executed.
+    asList(server1, server2).forEach(vm -> vm.invoke(() -> {
+      ExpirationListener listener = (ExpirationListener) EntryExpiryTask.expiryTaskListener;
+      assertThat(listener.tasksRan.get()).isEqualTo(0);
+      assertThat(listener.tasksCanceled.get()).isEqualTo(listener.tasksScheduled.get());
+
+      PartitionedRegionDataStore dataStore =
+          ((PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME)).getDataStore();
+      Set<BucketRegion> bucketRegions = dataStore.getAllLocalBucketRegions();
+      bucketRegions
+          .forEach(bucketRegion -> assertThat(bucketRegion.entryExpiryTasks.isEmpty()).isTrue());
+    }));
+
+    // Assert Region Buckets are consistent and region is empty,
+    accessor.invoke(this::assertRegionBucketsConsistency);
+    assertRegionIsEmpty(asList(accessor, server1, server1));
+  }
+
+  /**
+   * The test does the following (region type is parametrized):
+   * - Populates the Partition Region (entries have expiration).
+   * - Verifies that the entries are synchronized on all members.
+   * - Sets the {@link MemberKiller} as a {@link CacheWriter} to stop the coordinator VM while the
+   * clear is in progress.
+   * - Clears the Partition Region (at this point the coordinator is restarted).
+   * - Asserts that, after the clear is finished and the expiration time is reached:
+   * . No expiration tasks were cancelled.
+   * . All entries were removed due to the expiration.
+   * . The Partition Region Buckets are consistent on all members.
+   */
+  @Test
+  @Parameters(method = "regionTypes")
+  @TestCaseName("[{index}] {method}(RegionType:{0})")
+  public void clearShouldFailWhenCoordinatorMemberIsBouncedAndExpirationTasksShouldSurvive(
+      RegionShortcut regionShortcut) {
+    final int entries = 1000;
+    ExpirationAttributes expirationAttributes = new ExpirationAttributes(EXPIRATION_TIME, DESTROY);
+    parametrizedSetup(regionShortcut, expirationAttributes);
+    populateRegion(accessor, entries, asList(accessor, server1, server2));
+    registerVMKillerAsCacheWriter(Collections.singletonList(server1));
+
+    // Clear the region (it should fail).
+    server1.invoke(() -> {
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      assertThatThrownBy(region::clear)
+          .isInstanceOf(DistributedSystemDisconnectedException.class)
+          .hasCauseInstanceOf(ForcedDisconnectException.class);
+    });
+
+    // Wait for member to get back online and assign all buckets.
+    server1.invoke(() -> {
+      cacheRule.createCache();
+      initDataStore(regionShortcut, expirationAttributes);
+      await().untilAsserted(
+          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
+      PartitionRegionHelper.assignBucketsToPartitions(cacheRule.getCache().getRegion(REGION_NAME));
+    });
+
+    // Wait until all expiration tasks are executed.
+    asList(server1, server2).forEach(vm -> vm.invoke(() -> {
+      PartitionedRegionDataStore dataStore =
+          ((PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME)).getDataStore();
+      Set<BucketRegion> bucketRegions = dataStore.getAllLocalBucketRegions();
+      bucketRegions.forEach(bucketRegion -> await()
+          .untilAsserted(() -> assertThat(bucketRegion.entryExpiryTasks.isEmpty()).isTrue()));
+    }));
+
+    // At this point the entries should be either invalidated or destroyed (expiration tasks ran).
+    asList(accessor, server1, server2).forEach(vm -> vm.invoke(() -> {
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      IntStream.range(0, entries).forEach(i -> {
+        String key = String.valueOf(i);
+        assertThat(region.get(key)).isNull();
+      });
+    }));
+
+    // Assert Region Buckets are consistent.
+    accessor.invoke(this::assertRegionBucketsConsistency);
+  }
+
+  /**
+   * The test does the following (clear coordinator and region type are parametrized):
+   * - Populates the Partition Region (entries have expiration).
+   * - Verifies that the entries are synchronized on all members.
+   * - Sets the {@link MemberKiller} as a {@link CacheWriter} to stop a non-coordinator VM while the
+   * clear is in progress (the member has primary buckets, though, so participates on
+   * the clear operation).
+   * - Clears the Partition Region (at this point the non-coordinator is restarted).
+   * - Asserts that, after the clear is finished:
+   * . No expiration tasks were executed on the non-restarted members.
+   * . All expiration tasks were cancelled on the non-restarted members.
+   * . Map of expiry tasks per bucket is empty on the non-restarted members.
+   * . All expiration tasks were executed and all expired on the restarted members.
+   * . The Partition Region is empty and buckets are consistent across all members.
+   */
+  @Test
+  @Parameters(method = "vmsAndRegionTypes")
+  @TestCaseName("[{index}] {method}(Coordinator:{0}, RegionType:{1})")
+  public void clearShouldSucceedAndRemoveRegisteredExpirationTasksWhenNonCoordinatorMemberIsBounced(
+      TestVM coordinatorVM, RegionShortcut regionShortcut) {
+    final int entries = 1500;
+    ExpirationAttributes expirationAttributes = new ExpirationAttributes(EXPIRATION_TIME, DESTROY);
+    parametrizedSetup(regionShortcut, expirationAttributes);
+    registerVMKillerAsCacheWriter(Collections.singletonList(server2));
+    populateRegion(accessor, entries, asList(accessor, server1, server2));
+
+    // Clear the region.
+    getVM(coordinatorVM.vmNumber).invoke(() -> {
+      Cache cache = cacheRule.getCache();
+      cache.getRegion(REGION_NAME).clear();
+    });
+
+    // Wait for member to get back online and assign buckets.
+    server2.invoke(() -> {
+      cacheRule.createCache();
+      initDataStore(regionShortcut, expirationAttributes);
+      await().untilAsserted(
+          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
+      PartitionRegionHelper.assignBucketsToPartitions(cacheRule.getCache().getRegion(REGION_NAME));
+    });
+
+    // Assert all expiration tasks were cancelled and none were executed (surviving members).
+    server1.invoke(() -> {
+      PartitionedRegionDataStore dataStore =
+          ((PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME)).getDataStore();
+      Set<BucketRegion> bucketRegions = dataStore.getAllLocalBucketRegions();
+      bucketRegions
+          .forEach(bucketRegion -> assertThat(bucketRegion.entryExpiryTasks.isEmpty()).isTrue());
+
+      ExpirationListener listener = (ExpirationListener) EntryExpiryTask.expiryTaskListener;
+      assertThat(listener.tasksRan.get()).isEqualTo(0);
+      assertThat(listener.tasksCanceled.get()).isEqualTo(listener.tasksScheduled.get());
+    });
+
+    // Assert all expiration tasks were expired as the region is empty (restarted member).
+    server2.invoke(() -> {
+      PartitionedRegionDataStore dataStore =
+          ((PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME)).getDataStore();
+      Set<BucketRegion> bucketRegions = dataStore.getAllLocalBucketRegions();
+
+      // During restart, the member loads the region from disk and automatically registers
+      // expiration tasks for each entry. After GII, however, the region is empty due to the
+      // clear operation and the tasks will just expire as there are no entries.
+      bucketRegions.forEach(bucketRegion -> await()
+          .untilAsserted(() -> assertThat(bucketRegion.entryExpiryTasks.isEmpty()).isTrue()));
+
+      ExpirationListener listener = (ExpirationListener) EntryExpiryTask.expiryTaskListener;
+      assertThat(listener.tasksExpired.get()).isEqualTo(listener.tasksRan.get());
+    });
+
+    // Assert Region Buckets are consistent and region is empty,
+    accessor.invoke(this::assertRegionBucketsConsistency);
+    assertRegionIsEmpty(asList(accessor, server1, server1));
+  }
+
+  /**
+   * Tracks expiration tasks lifecycle.
+   */
+  public static class ExpirationListener implements ExpiryTask.ExpiryTaskListener {
+    final AtomicInteger tasksRan = new AtomicInteger(0);
+    final AtomicInteger tasksExpired = new AtomicInteger(0);
+    final AtomicInteger tasksCanceled = new AtomicInteger(0);
+    final AtomicInteger tasksScheduled = new AtomicInteger(0);
+
+    @Override
+    public void afterSchedule(ExpiryTask et) {
+      tasksScheduled.incrementAndGet();
+    }
+
+    @Override
+    public void afterTaskRan(ExpiryTask et) {
+      tasksRan.incrementAndGet();
+    }
+
+    @Override
+    public void afterReschedule(ExpiryTask et) {}
+
+    @Override
+    public void afterExpire(ExpiryTask et) {
+      tasksExpired.incrementAndGet();
+    }
+
+    @Override
+    public void afterCancel(ExpiryTask et) {
+      tasksCanceled.incrementAndGet();
+    }
+  }
+
+  /**
+   * Shutdowns a member while the clear operation is in progress.
+   * The writer is only installed on the member the test wants to shutdown, doesn't matter whether
+   * it's the clear coordinator or another member holding primary buckets.
+   */
+  public static class MemberKiller extends CacheWriterAdapter<String, String> {
+
+    @Override
+    public synchronized void beforeRegionClear(RegionEvent<String, String> event)
+        throws CacheWriterException {
+      InternalDistributedSystem.getConnectedInstance().stopReconnectingNoDisconnect();
+      MembershipManagerHelper.crashDistributedSystem(
+          InternalDistributedSystem.getConnectedInstance());
+      await().untilAsserted(
+          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNull());
+    }
+  }
+}
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
index d96c6f8..954e3a4 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
@@ -325,6 +325,10 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
    */
   private int txRefCount;
 
+  @VisibleForTesting
+  final ConcurrentHashMap<RegionEntry, EntryExpiryTask> entryExpiryTasks =
+      new ConcurrentHashMap<>();
+
   private volatile boolean regionInvalid;
 
   /**
@@ -7977,7 +7981,8 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
     }
   }
 
-  private void cancelAllEntryExpiryTasks() {
+  @VisibleForTesting
+  void cancelAllEntryExpiryTasks() {
     // This method gets called during LocalRegion construction
     // in which case the final entryExpiryTasks field can still be null
     if (entryExpiryTasks.isEmpty()) {
@@ -7992,6 +7997,10 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
       task.cancel();
       doPurge = true;
     }
+
+    // Clear the map after canceling each expiry task.
+    entryExpiryTasks.clear();
+
     if (doPurge) {
       // do a force to not leave any refs to this region
       cache.getExpirationScheduler().forcePurge();
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/LocalRegionTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/LocalRegionTest.java
index dd9cce1..25c63ae 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/LocalRegionTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/LocalRegionTest.java
@@ -23,6 +23,7 @@ import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
@@ -277,4 +278,25 @@ public class LocalRegionTest {
 
     assertThat(region.isGenerateLocalFilterRoutingNeeded(event)).isFalse();
   }
+
+  @Test
+  public void cancelAllEntryExpiryTasksShouldClearMapOfExpiryTasks() {
+    when(cache.getExpirationScheduler()).thenReturn(mock(ExpirationScheduler.class));
+    LocalRegion region =
+        spy(new LocalRegion("region", regionAttributes, null, cache, internalRegionArguments,
+            internalDataView, regionMapConstructor, serverRegionProxyConstructor, entryEventFactory,
+            poolFinder, regionPerfStatsFactory, disabledClock()));
+
+    RegionEntry regionEntry1 = mock(RegionEntry.class);
+    RegionEntry regionEntry2 = mock(RegionEntry.class);
+    EntryExpiryTask entryExpiryTask1 = spy(new EntryExpiryTask(region, regionEntry1));
+    EntryExpiryTask entryExpiryTask2 = spy(new EntryExpiryTask(region, regionEntry2));
+    region.entryExpiryTasks.put(regionEntry1, entryExpiryTask1);
+    region.entryExpiryTasks.put(regionEntry2, entryExpiryTask2);
+
+    region.cancelAllEntryExpiryTasks();
+    assertThat(region.entryExpiryTasks).isEmpty();
+    verify(entryExpiryTask1, times(1)).cancel();
+    verify(entryExpiryTask2, times(1)).cancel();
+  }
 }


[geode] 22/22: GEODE-7845: Adding a cleaner simpler test. (#5622)

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 30a699df1d5084de06e9a29a00bf89095f1de53b
Author: mhansonp <ha...@vmware.com>
AuthorDate: Tue Oct 13 17:09:03 2020 -0700

    GEODE-7845: Adding a cleaner simpler test. (#5622)
    
    - Changed the test for ServerVersionMismatchException to be more readable.
---
 ...ionRegionClearMixedServerPartitionedRegion.java | 412 ---------------------
 ...ePartitionRegionClearServerVersionMismatch.java | 174 +++++++++
 2 files changed, 174 insertions(+), 412 deletions(-)

diff --git a/geode-core/src/upgradeTest/java/org/apache/geode/internal/cache/rollingupgrade/RollingUpgradePartitionRegionClearMixedServerPartitionedRegion.java b/geode-core/src/upgradeTest/java/org/apache/geode/internal/cache/rollingupgrade/RollingUpgradePartitionRegionClearMixedServerPartitionedRegion.java
deleted file mode 100644
index bfcd651..0000000
--- a/geode-core/src/upgradeTest/java/org/apache/geode/internal/cache/rollingupgrade/RollingUpgradePartitionRegionClearMixedServerPartitionedRegion.java
+++ /dev/null
@@ -1,412 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.internal.cache.rollingupgrade;
-
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.assertj.core.api.Assertions.catchThrowable;
-
-import java.io.File;
-import java.lang.reflect.Constructor;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.Properties;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.logging.log4j.Logger;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameter;
-import org.junit.runners.Parameterized.Parameters;
-import org.junit.runners.Parameterized.UseParametersRunnerFactory;
-
-import org.apache.geode.cache.Cache;
-import org.apache.geode.cache.CacheFactory;
-import org.apache.geode.cache.GemFireCache;
-import org.apache.geode.cache.Region;
-import org.apache.geode.cache.RegionFactory;
-import org.apache.geode.cache.RegionShortcut;
-import org.apache.geode.cache.ServerVersionMismatchException;
-import org.apache.geode.cache.client.ClientCache;
-import org.apache.geode.cache.client.ClientCacheFactory;
-import org.apache.geode.cache.client.ClientRegionShortcut;
-import org.apache.geode.cache.client.ServerOperationException;
-import org.apache.geode.cache.server.CacheServer;
-import org.apache.geode.distributed.DistributedSystem;
-import org.apache.geode.distributed.internal.DistributionConfig;
-import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
-import org.apache.geode.internal.AvailablePortHelper;
-import org.apache.geode.internal.cache.GemFireCacheImpl;
-import org.apache.geode.internal.cache.PartitionedRegion;
-import org.apache.geode.logging.internal.log4j.api.LogService;
-import org.apache.geode.test.dunit.DistributedTestUtils;
-import org.apache.geode.test.dunit.Host;
-import org.apache.geode.test.dunit.IgnoredException;
-import org.apache.geode.test.dunit.Invoke;
-import org.apache.geode.test.dunit.NetworkUtils;
-import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.dunit.internal.DUnitLauncher;
-import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
-import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactory;
-import org.apache.geode.test.version.VersionManager;
-
-@RunWith(Parameterized.class)
-@UseParametersRunnerFactory(CategoryWithParameterizedRunnerFactory.class)
-public class RollingUpgradePartitionRegionClearMixedServerPartitionedRegion
-    extends JUnit4DistributedTestCase {
-
-  protected static final Logger logger = LogService.getLogger();
-  protected static GemFireCache cache;
-  protected static ClientCache clientcache;
-
-  @Parameter
-  public String oldVersion;
-
-  @Parameters(name = "from_v{0}")
-  public static Collection<String> data() {
-    List<String> result = VersionManager.getInstance().getVersionsWithoutCurrent();
-    if (result.size() < 1) {
-      throw new RuntimeException("No older versions of Geode were found to test against");
-    } else {
-      System.out.println("running against these versions: " + result);
-    }
-    return result;
-  }
-
-  @Test
-  public void testPutAndGetMixedServerPartitionedRegion() throws Exception {
-    doTestPutAndGetMixedServers(oldVersion);
-  }
-
-  /**
-   * This test starts up multiple servers from the current code base and multiple servers from the
-   * old version and executes puts and gets on a new server and old server and verifies that the
-   * results are present. Note that the puts have overlapping region keys just to test new puts and
-   * replaces
-   */
-  void doTestPutAndGetMixedServers(String oldVersion)
-      throws Exception {
-    VM currentServer1 = VM.getVM(VersionManager.CURRENT_VERSION, 0);
-    VM oldServerAndLocator = VM.getVM(oldVersion, 1);
-    VM currentServer2 = VM.getVM(VersionManager.CURRENT_VERSION, 2);
-    VM oldServer2 = VM.getVM(oldVersion, 3);
-
-    String regionName = "aRegion";
-
-    final String serverHostName = NetworkUtils.getServerHostName();
-    final int port = AvailablePortHelper.getRandomAvailableTCPPort();
-    oldServerAndLocator.invoke(() -> DistributedTestUtils.deleteLocatorStateFile(port));
-    try {
-      final Properties props = getSystemProperties();
-      props.remove(DistributionConfig.LOCATORS_NAME);
-
-      // Fire up the locator and server
-      oldServerAndLocator.invoke(() -> {
-        props.put(DistributionConfig.START_LOCATOR_NAME,
-            "" + serverHostName + "[" + port + "]");
-        props.put(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
-        cache = createCache(props);
-        Thread.sleep(5000); // bug in 1.0 - cluster config service not immediately available
-      });
-
-      props.put(DistributionConfig.LOCATORS_NAME, serverHostName + "[" + port + "]");
-
-      // create the cache in all the server VMs.
-      for (VM vm : Arrays.asList(oldServer2, currentServer1, currentServer2)) {
-        vm.invoke(() -> {
-          cache = createCache(props);
-        });
-      }
-      // spin up current version servers
-      for (VM vm : Arrays.asList(currentServer1, currentServer2)) {
-        vm.invoke(
-            () -> assertVersion(cache, VersionManager.getInstance().getCurrentVersionOrdinal()));
-      }
-
-      // create region
-      for (VM vm : Arrays.asList(currentServer1, currentServer2, oldServerAndLocator, oldServer2)) {
-        vm.invoke(() -> createRegion(cache, regionName));
-      }
-
-      // put some data in the region to make sure there is something to clear.
-      putDataSerializableAndVerify(currentServer1, regionName, currentServer2, oldServerAndLocator,
-          oldServer2);
-
-      // invoke Partition Region Clear and verify we didn't touch the old servers.
-
-      currentServer1.invoke(() -> {
-        assertRegionExists(cache, regionName);
-        PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName);
-
-        Throwable thrown = catchThrowable(region::clear);
-        assertThat(thrown).isInstanceOf(ServerVersionMismatchException.class);
-
-      });
-    } finally {
-      for (VM vm : Arrays.asList(currentServer1, currentServer2, oldServerAndLocator, oldServer2)) {
-        vm.invoke(
-            () -> closeCache(RollingUpgradePartitionRegionClearMixedServerPartitionedRegion.cache));
-      }
-    }
-  }
-
-  @Test
-  public void TestClientServerGetsUnsupportedExceptionWhenPRClearInvoked() throws Exception {
-    doTestClientServerGetsUnsupportedExceptionWhenPRClearInvoked(oldVersion);
-  }
-
-  void doTestClientServerGetsUnsupportedExceptionWhenPRClearInvoked(String oldVersion)
-      throws Exception {
-
-    VM client = VM.getVM(VersionManager.CURRENT_VERSION, 0);
-    VM locator = VM.getVM(VersionManager.CURRENT_VERSION, 1);
-    VM currentServer = VM.getVM(VersionManager.CURRENT_VERSION, 2);
-    VM oldServer2 = VM.getVM(oldVersion, 3);
-
-    for (VM vm : Arrays.asList(locator, currentServer, client)) {
-      vm.invoke(() -> System.setProperty("gemfire.allow_old_members_to_join_for_testing", "true"));
-    }
-
-    String regionName = "aRegion";
-
-    final String serverHostName = NetworkUtils.getServerHostName();
-    final int port = AvailablePortHelper.getRandomAvailableTCPPort();
-    locator.invoke(() -> DistributedTestUtils.deleteLocatorStateFile(port));
-    try {
-      final Properties props = getSystemProperties();
-      props.remove(DistributionConfig.LOCATORS_NAME);
-
-      // Fire up the locator and server
-      locator.invoke(() -> {
-        props.put(DistributionConfig.START_LOCATOR_NAME,
-            "" + serverHostName + "[" + port + "]");
-        props.put(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
-        cache = createCache(props);
-      });
-
-      props.put(DistributionConfig.LOCATORS_NAME, serverHostName + "[" + port + "]");
-
-      // create the cache in all the server VMs.
-      for (VM vm : Arrays.asList(oldServer2, currentServer)) {
-        vm.invoke(() -> {
-          props.setProperty(DistributionConfig.NAME_NAME, "vm" + VM.getVMId());
-          cache = createCache(props);
-        });
-      }
-      int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
-
-      oldServer2.invoke(() -> startCacheServer(cache, ports[0]));
-      currentServer.invoke(() -> startCacheServer(cache, ports[1]));
-
-      // create region
-      for (VM vm : Arrays.asList(currentServer, locator, oldServer2)) {
-        vm.invoke(() -> createRegion(cache, regionName));
-      }
-
-      // put some data in the region to make sure there is something to clear.
-      putDataSerializableAndVerify(currentServer, regionName, locator, oldServer2);
-
-      // invoke Partition Region Clear from the client and verify the exception.
-      client.invoke(() -> {
-        clientcache = new ClientCacheFactory().addPoolServer(serverHostName, ports[1]).create();
-        Region<Object, Object> clientRegion = clientcache.createClientRegionFactory(
-            ClientRegionShortcut.PROXY).create(regionName);
-
-        clientRegion.put("key", "value");
-
-        Throwable thrown = catchThrowable(clientRegion::clear);
-        assertThat(thrown).isInstanceOf(ServerOperationException.class);
-        assertThat(thrown).hasCauseInstanceOf(ServerVersionMismatchException.class);
-        ServerVersionMismatchException serverVersionMismatchException =
-            (ServerVersionMismatchException) thrown.getCause();
-        assertThat(serverVersionMismatchException.getMessage()).contains("vm3");
-      });
-
-    } finally {
-
-      for (VM vm : Arrays.asList(currentServer, locator, oldServer2)) {
-        vm.invoke(() -> closeCache(cache));
-      }
-
-      client.invoke(() -> {
-        if (cache != null && !clientcache.isClosed()) {
-          clientcache.close(false);
-        }
-      });
-    }
-  }
-
-  private String getLocatorString(int locatorPort) {
-    return getDUnitLocatorAddress() + "[" + locatorPort + "]";
-  }
-
-  public String getLocatorString(int[] locatorPorts) {
-    StringBuilder locatorString = new StringBuilder();
-    int numLocators = locatorPorts.length;
-    for (int i = 0; i < numLocators; i++) {
-      locatorString.append(getLocatorString(locatorPorts[i]));
-      if (i + 1 < numLocators) {
-        locatorString.append(",");
-      }
-    }
-    return locatorString.toString();
-  }
-
-  private Cache createCache(Properties systemProperties) {
-    systemProperties.setProperty(DistributionConfig.USE_CLUSTER_CONFIGURATION_NAME, "false");
-    if (VersionManager.getInstance().getCurrentVersionOrdinal() < 75) {
-      systemProperties.remove("validate-serializable-objects");
-      systemProperties.remove("serializable-object-filter");
-    }
-    CacheFactory cf = new CacheFactory(systemProperties);
-    return cf.create();
-  }
-
-  private void startCacheServer(GemFireCache cache, int port) throws Exception {
-    CacheServer cacheServer = ((GemFireCacheImpl) cache).addCacheServer();
-    cacheServer.setPort(port);
-    cacheServer.start();
-  }
-
-  protected void assertRegionExists(GemFireCache cache, String regionName) {
-    Region<Object, Object> region = cache.getRegion(regionName);
-    if (region == null) {
-      throw new Error("Region: " + regionName + " does not exist");
-    }
-  }
-
-  private void assertEntryExists(GemFireCache cache, String regionName) {
-    assertRegionExists(cache, regionName);
-    Region<Object, Object> region = cache.getRegion(regionName);
-    for (int i = 0; i < 10; i++) {
-      String key = "" + i;
-      Object regionValue = region.get(key);
-      assertThat(regionValue).describedAs("Entry for key:" + key + " does not exist").isNotNull();
-    }
-  }
-
-  public void put(GemFireCache cache, String regionName, Object key, Object value) {
-    Region<Object, Object> region = cache.getRegion(regionName);
-    System.out.println(regionName + ".put(" + key + "," + value + ")");
-    Object result = region.put(key, value);
-    System.out.println("returned " + result);
-  }
-
-  private void createRegion(GemFireCache cache, String regionName) {
-    RegionFactory<Object, Object> rf = ((GemFireCacheImpl) cache).createRegionFactory(
-        RegionShortcut.PARTITION);
-    System.out.println("created region " + rf.create(regionName));
-  }
-
-  void assertVersion(GemFireCache cache, short ordinal) {
-    DistributedSystem system = cache.getDistributedSystem();
-    int thisOrdinal =
-        ((InternalDistributedMember) system.getDistributedMember()).getVersion()
-            .ordinal();
-    if (ordinal != thisOrdinal) {
-      throw new Error(
-          "Version ordinal:" + thisOrdinal + " was not the expected ordinal of:" + ordinal);
-    }
-  }
-
-  private void closeCache(GemFireCache cache) {
-    if (cache == null) {
-      return;
-    }
-    boolean cacheClosed = cache.isClosed();
-    if (!cacheClosed) {
-      List<CacheServer> servers = ((Cache) cache).getCacheServers();
-      for (CacheServer server : servers) {
-        server.stop();
-      }
-      cache.close();
-    }
-  }
-
-  /**
-   * Get the port that the standard dunit locator is listening on.
-   *
-   */
-  private String getDUnitLocatorAddress() {
-    return Host.getHost(0).getHostName();
-  }
-
-  private void deleteVMFiles() {
-    System.out.println("deleting files in vm" + VM.getVMId());
-    File pwd = new File(".");
-    for (File entry : pwd.listFiles()) {
-      try {
-        if (entry.isDirectory()) {
-          FileUtils.deleteDirectory(entry);
-        } else {
-          if (!entry.delete()) {
-            System.out.println("Could not delete " + entry);
-          }
-        }
-      } catch (Exception e) {
-        System.out.println("Could not delete " + entry + ": " + e.getMessage());
-      }
-    }
-  }
-
-  @Override
-  public void postSetUp() {
-    Invoke.invokeInEveryVM("delete files", this::deleteVMFiles);
-    IgnoredException.addIgnoredException(
-        "cluster configuration service not available|ConflictingPersistentDataException");
-  }
-
-
-  void putDataSerializableAndVerify(VM putter, String regionName,
-      VM... vms) throws Exception {
-    for (int i = 0; i < 10; i++) {
-      Class aClass = Thread.currentThread().getContextClassLoader()
-          .loadClass("org.apache.geode.cache.ExpirationAttributes");
-      Constructor constructor = aClass.getConstructor(int.class);
-      Object testDataSerializable = constructor.newInstance(i);
-      int finalI = i;
-      putter.invoke(() -> put(cache, regionName, "" + finalI, testDataSerializable));
-    }
-
-    // verify present in others
-    for (VM vm : vms) {
-      vm.invoke(() -> assertEntryExists(cache, regionName));
-    }
-  }
-
-  public Properties getSystemProperties() {
-    Properties props = DistributedTestUtils.getAllDistributedSystemProperties(new Properties());
-    props.remove("disable-auto-reconnect");
-    props.put(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "true");
-    props.put(DistributionConfig.USE_CLUSTER_CONFIGURATION_NAME, "false");
-    props.remove(DistributionConfig.LOAD_CLUSTER_CONFIG_FROM_DIR_NAME);
-    props.remove(DistributionConfig.OFF_HEAP_MEMORY_SIZE_NAME);
-    props.remove(DistributionConfig.LOCK_MEMORY_NAME);
-    return props;
-  }
-
-  public Properties getSystemProperties(int[] locatorPorts) {
-    Properties props = new Properties();
-    String locatorString = getLocatorString(locatorPorts);
-    props.setProperty("locators", locatorString);
-    props.setProperty("mcast-port", "0");
-    props.put(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "true");
-    props.put(DistributionConfig.USE_CLUSTER_CONFIGURATION_NAME, "false");
-    props.remove(DistributionConfig.LOAD_CLUSTER_CONFIG_FROM_DIR_NAME);
-    props.setProperty(DistributionConfig.LOG_LEVEL_NAME, DUnitLauncher.logLevel);
-    return props;
-  }
-}
diff --git a/geode-core/src/upgradeTest/java/org/apache/geode/internal/cache/rollingupgrade/RollingUpgradePartitionRegionClearServerVersionMismatch.java b/geode-core/src/upgradeTest/java/org/apache/geode/internal/cache/rollingupgrade/RollingUpgradePartitionRegionClearServerVersionMismatch.java
new file mode 100644
index 0000000..144ea38
--- /dev/null
+++ b/geode-core/src/upgradeTest/java/org/apache/geode/internal/cache/rollingupgrade/RollingUpgradePartitionRegionClearServerVersionMismatch.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache.rollingupgrade;
+
+import static org.apache.geode.test.dunit.rules.ClusterStartupRule.getCache;
+import static org.apache.geode.test.dunit.rules.ClusterStartupRule.getClientCache;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+import static org.assertj.core.api.Assertions.catchThrowable;
+
+import java.util.Collection;
+import java.util.List;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.ServerVersionMismatchException;
+import org.apache.geode.cache.client.ClientCache;
+import org.apache.geode.cache.client.ClientRegionFactory;
+import org.apache.geode.cache.client.ClientRegionShortcut;
+import org.apache.geode.cache.client.ServerOperationException;
+import org.apache.geode.distributed.internal.DistributionConfig;
+import org.apache.geode.test.dunit.IgnoredException;
+import org.apache.geode.test.dunit.rules.ClientVM;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactory;
+import org.apache.geode.test.version.VersionManager;
+
+/**
+ * This test class exists to test the ServerVersionMismatchException
+ * A ServerVersionMismatchException is thrown when a cluster has a server that is previous to
+ * version 1.14.0 which doesn't support the Partitioned Region Clear feature.
+ *
+ * When the exception is thrown it is expected to contain the members that have the bad version,
+ * the version number necessary, and the feature that is not supported.
+ */
+
+
+@RunWith(Parameterized.class)
+@Parameterized.UseParametersRunnerFactory(CategoryWithParameterizedRunnerFactory.class)
+public class RollingUpgradePartitionRegionClearServerVersionMismatch {
+
+  @Rule
+  public ClusterStartupRule cluster = new ClusterStartupRule();
+
+  @Parameterized.Parameter
+  public String oldVersion;
+
+  @Parameterized.Parameters(name = "from_v{0}")
+  public static Collection<String> data() {
+    List<String> result = VersionManager.getInstance().getVersionsWithoutCurrent();
+    if (result.size() < 1) {
+      throw new RuntimeException("No older versions of Geode were found to test against");
+    } else {
+      System.out.println("running against these versions: " + result);
+    }
+    return result;
+  }
+
+  // This is the message that we are expected to be in the exception in both tests below.
+  private static final String expectedMessage =
+      "A server's [server-2] version was too old (< GEODE 1.14.0) for : Partitioned Region Clear";
+
+  private MemberVM locator;
+  private MemberVM serverNew;
+
+  @Before
+  public void before() {
+    locator = cluster.startLocatorVM(0,
+        l -> l.withSystemProperty("gemfire.allow_old_members_to_join_for_testing", "true")
+            .withProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false"));
+    final int locatorPort = locator.getPort();
+
+    serverNew = cluster.startServerVM(1, locatorPort);
+    MemberVM serverOld =
+        cluster.startServerVM(2, oldVersion, s -> s.withConnectionToLocator(locatorPort));
+
+    MemberVM.invokeInEveryMember(() -> {
+      Cache cache = getCache();
+      assertThat(cache).isNotNull();
+      getCache().createRegionFactory(RegionShortcut.PARTITION).create("regionA");
+    }, serverNew, serverOld);
+
+    // Put in some boiler plate data for region clear
+    serverNew.invoke(() -> {
+      Cache cache = getCache();
+      assertThat(cache).isNotNull();
+
+      Region<String, String> region = cache.getRegion("regionA");
+      region.put("A", "ValueA");
+      region.put("B", "ValueB");
+    });
+
+  }
+
+  /**
+   * testClient_ServerVersionMismatchException - validates that when a client invokes a partitioned
+   * region clear on a cluster where one server is running an unsupported version for this feature
+   * we return a ServerVersionMismatchException
+   */
+  @Test
+  public void testClient_ServerVersionMismatchException() throws Exception {
+    IgnoredException.addIgnoredException(ServerOperationException.class);
+    final int locatorPort = locator.getPort();
+    // Get a client VM
+    ClientVM clientVM = cluster.startClientVM(3, c -> c.withLocatorConnection(locatorPort));
+
+    clientVM.invoke(() -> {
+      // Validate we have a cache and region
+      ClientCache clientCache = getClientCache();
+      assertThat(clientCache).isNotNull();
+
+      ClientRegionFactory<String, String> clientRegionFactory =
+          clientCache.createClientRegionFactory(ClientRegionShortcut.PROXY);
+      Region<String, String> region = clientRegionFactory.create("regionA");
+      assertThat(region).isNotNull();
+
+      // Validate that we get a ServerVersionMismatchException wrapped in a ServerOperationException
+      Throwable thrown = catchThrowable(region::clear);
+      assertThat(thrown).isInstanceOf(ServerOperationException.class);
+      assertThat(thrown).hasCauseInstanceOf(ServerVersionMismatchException.class);
+
+      // Validate that the message is exactly as we expect it.
+      ServerVersionMismatchException serverVersionMismatchException =
+          (ServerVersionMismatchException) thrown.getCause();
+      assertThat(serverVersionMismatchException.getMessage()).isEqualTo(expectedMessage);
+    });
+  }
+
+  /**
+   * testServer_ServerVersionMismatchException - validates that when a partitioned region clear is
+   * invoked on a cluster where one server is running an unsupported version for this feature we
+   * return a ServerVersionMismatchException
+   */
+  @Test
+  public void testServer_ServerVersionMismatchException() {
+    IgnoredException.addIgnoredException(ServerOperationException.class);
+
+    serverNew.invoke(() -> {
+      // Validate we have a cache and region
+      Cache cache = getCache();
+      assertThat(cache).isNotNull();
+
+      Region<String, String> region = cache.getRegion("regionA");
+      assertThat(region).isNotNull();
+
+      // Validate that the message is exactly as we expect it.
+      assertThatThrownBy(region::clear).isInstanceOf(ServerVersionMismatchException.class)
+          .hasMessage(expectedMessage);
+
+      assertThat(region.get("A")).isEqualTo("ValueA");
+      assertThat(region.get("B")).isEqualTo("ValueB");
+    });
+  }
+}


[geode] 15/22: GEODE-8361: Use Set instead of List to track cleared buckets (#5379)

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 26396332807e56fa7d4d45992967bf73ebc0bb7c
Author: Donal Evans <do...@pivotal.io>
AuthorDate: Fri Jul 17 12:55:51 2020 -0700

    GEODE-8361: Use Set instead of List to track cleared buckets (#5379)
    
    - Refactor PartitionRegionClear to use Set instead of List
    - Some other changes to remove warnings/alerts from PartitionedRegionClear and PartitionedRegionClearMessage
    
    Authored-by: Donal Evans <do...@vmware.com>
---
 .../codeAnalysis/sanctionedDataSerializables.txt   |  2 +-
 .../internal/cache/PartitionedRegionClear.java     | 36 +++++-------
 .../cache/PartitionedRegionClearMessage.java       | 28 +++++----
 .../internal/cache/PartitionedRegionClearTest.java | 68 ++++++++++++----------
 4 files changed, 67 insertions(+), 67 deletions(-)

diff --git a/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt b/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
index e56247d..98ff773 100644
--- a/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
+++ b/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
@@ -1080,7 +1080,7 @@ fromData,40
 toData,36
 
 org/apache/geode/internal/cache/PartitionedRegionClearMessage$PartitionedRegionClearReplyMessage,2
-fromData,29
+fromData,32
 toData,28
 
 org/apache/geode/internal/cache/PoolFactoryImpl$PoolAttributes,2
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
index 030b36e..5a0621d 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
@@ -14,11 +14,9 @@
  */
 package org.apache.geode.internal.cache;
 
-import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
+import java.util.Set;
 
 import org.apache.logging.log4j.Logger;
 
@@ -32,7 +30,6 @@ import org.apache.geode.distributed.internal.DistributionManager;
 import org.apache.geode.distributed.internal.MembershipListener;
 import org.apache.geode.distributed.internal.ReplyException;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
-import org.apache.geode.internal.cache.versions.RegionVersionVector;
 import org.apache.geode.logging.internal.log4j.api.LogService;
 
 public class PartitionedRegionClear {
@@ -98,10 +95,8 @@ public class PartitionedRegionClear {
         PartitionedRegionClearMessage.OperationType.OP_UNLOCK_FOR_PR_CLEAR);
   }
 
-  List clearRegion(RegionEventImpl regionEvent, boolean cacheWrite,
-      RegionVersionVector vector) {
-    List allBucketsCleared = new ArrayList();
-    allBucketsCleared.addAll(clearRegionLocal(regionEvent));
+  Set<Integer> clearRegion(RegionEventImpl regionEvent) {
+    Set<Integer> allBucketsCleared = new HashSet<>(clearRegionLocal(regionEvent));
     allBucketsCleared.addAll(sendPartitionedRegionClearMessage(regionEvent,
         PartitionedRegionClearMessage.OperationType.OP_PR_CLEAR));
     return allBucketsCleared;
@@ -116,8 +111,8 @@ public class PartitionedRegionClear {
         if (!bucketRegion.getBucketAdvisor().hasPrimary()) {
           if (retryTimer.overMaximum()) {
             throw new PartitionedRegionPartialClearException(
-                "Unable to find primary bucket region during clear operation for region: " +
-                    partitionedRegion.getName());
+                "Unable to find primary bucket region during clear operation on "
+                    + partitionedRegion.getName() + " region.");
           }
           retryTimer.waitForBucketsRecovery();
           retry = true;
@@ -126,8 +121,8 @@ public class PartitionedRegionClear {
     } while (retry);
   }
 
-  public ArrayList clearRegionLocal(RegionEventImpl regionEvent) {
-    ArrayList clearedBuckets = new ArrayList();
+  public Set<Integer> clearRegionLocal(RegionEventImpl regionEvent) {
+    Set<Integer> clearedBuckets = new HashSet<>();
     setMembershipChange(false);
     // Synchronized to handle the requester departure.
     synchronized (lockForListenerAndClientNotification) {
@@ -255,7 +250,7 @@ public class PartitionedRegionClear {
     }
   }
 
-  protected List sendPartitionedRegionClearMessage(RegionEventImpl event,
+  protected Set<Integer> sendPartitionedRegionClearMessage(RegionEventImpl event,
       PartitionedRegionClearMessage.OperationType op) {
     RegionEventImpl eventForLocalClear = (RegionEventImpl) event.clone();
     eventForLocalClear.setOperation(Operation.REGION_LOCAL_CLEAR);
@@ -269,10 +264,10 @@ public class PartitionedRegionClear {
     } while (true);
   }
 
-  protected List attemptToSendPartitionedRegionClearMessage(RegionEventImpl event,
+  protected Set<Integer> attemptToSendPartitionedRegionClearMessage(RegionEventImpl event,
       PartitionedRegionClearMessage.OperationType op)
       throws ForceReattemptException {
-    List bucketsOperated = null;
+    Set<Integer> bucketsOperated = null;
 
     if (partitionedRegion.getPRRoot() == null) {
       if (logger.isDebugEnabled()) {
@@ -284,17 +279,16 @@ public class PartitionedRegionClear {
       return bucketsOperated;
     }
 
-    final HashSet configRecipients =
-        new HashSet(partitionedRegion.getRegionAdvisor().adviseAllPRNodes());
+    final Set<InternalDistributedMember> configRecipients =
+        new HashSet<>(partitionedRegion.getRegionAdvisor().adviseAllPRNodes());
 
     try {
       final PartitionRegionConfig prConfig =
           partitionedRegion.getPRRoot().get(partitionedRegion.getRegionIdentifier());
 
       if (prConfig != null) {
-        Iterator itr = prConfig.getNodes().iterator();
-        while (itr.hasNext()) {
-          InternalDistributedMember idm = ((Node) itr.next()).getMemberId();
+        for (Node node : prConfig.getNodes()) {
+          InternalDistributedMember idm = node.getMemberId();
           if (!idm.equals(partitionedRegion.getMyId())) {
             configRecipients.add(idm);
           }
@@ -355,7 +349,7 @@ public class PartitionedRegionClear {
         obtainLockForClear(regionEvent);
       }
       try {
-        List bucketsCleared = clearRegion(regionEvent, cacheWrite, null);
+        Set<Integer> bucketsCleared = clearRegion(regionEvent);
 
         if (partitionedRegion.getTotalNumberOfBuckets() != bucketsCleared.size()) {
           String message = "Unable to clear all the buckets from the partitioned region "
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClearMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClearMessage.java
index b66ab44..b48c9ee 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClearMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClearMessage.java
@@ -18,10 +18,7 @@ package org.apache.geode.internal.cache;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
 import java.util.Set;
-import java.util.concurrent.CopyOnWriteArrayList;
 
 import org.apache.geode.DataSerializer;
 import org.apache.geode.cache.CacheException;
@@ -36,6 +33,7 @@ import org.apache.geode.distributed.internal.ReplyProcessor21;
 import org.apache.geode.distributed.internal.ReplySender;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
 import org.apache.geode.internal.Assert;
+import org.apache.geode.internal.CopyOnWriteHashSet;
 import org.apache.geode.internal.NanoTimer;
 import org.apache.geode.internal.cache.partitioned.PartitionMessage;
 import org.apache.geode.internal.logging.log4j.LogMarker;
@@ -57,7 +55,7 @@ public class PartitionedRegionClearMessage extends PartitionMessage {
 
   private PartitionedRegion partitionedRegion;
 
-  private ArrayList bucketsCleared;
+  private Set<Integer> bucketsCleared;
 
   @Override
   public EventID getEventID() {
@@ -66,7 +64,7 @@ public class PartitionedRegionClearMessage extends PartitionMessage {
 
   public PartitionedRegionClearMessage() {}
 
-  PartitionedRegionClearMessage(Set recipients, PartitionedRegion region,
+  PartitionedRegionClearMessage(Set<InternalDistributedMember> recipients, PartitionedRegion region,
       ReplyProcessor21 processor, PartitionedRegionClearMessage.OperationType operationType,
       final RegionEventImpl event) {
     super(recipients, region.getPRId(), processor);
@@ -90,11 +88,10 @@ public class PartitionedRegionClearMessage extends PartitionMessage {
   protected Throwable processCheckForPR(PartitionedRegion pr,
       DistributionManager distributionManager) {
     if (pr != null && !pr.getDistributionAdvisor().isInitialized()) {
-      Throwable thr = new ForceReattemptException(
+      return new ForceReattemptException(
           String.format("%s : could not find partitioned region with Id %s",
               distributionManager.getDistributionManagerId(),
               pr.getRegionIdentifier()));
-      return thr;
     }
     return null;
   }
@@ -160,16 +157,17 @@ public class PartitionedRegionClearMessage extends PartitionMessage {
    * received from the "far side"
    */
   public static class PartitionedRegionClearResponse extends ReplyProcessor21 {
-    CopyOnWriteArrayList bucketsCleared = new CopyOnWriteArrayList();
+    CopyOnWriteHashSet<Integer> bucketsCleared = new CopyOnWriteHashSet<>();
 
-    public PartitionedRegionClearResponse(InternalDistributedSystem system, Set initMembers) {
+    public PartitionedRegionClearResponse(InternalDistributedSystem system,
+        Set<InternalDistributedMember> initMembers) {
       super(system, initMembers);
     }
 
     @Override
     public void process(DistributionMessage msg) {
       if (msg instanceof PartitionedRegionClearReplyMessage) {
-        List buckets = ((PartitionedRegionClearReplyMessage) msg).bucketsCleared;
+        Set<Integer> buckets = ((PartitionedRegionClearReplyMessage) msg).bucketsCleared;
         if (buckets != null) {
           bucketsCleared.addAll(buckets);
         }
@@ -194,7 +192,7 @@ public class PartitionedRegionClearMessage extends PartitionMessage {
 
   public static class PartitionedRegionClearReplyMessage extends ReplyMessage {
 
-    private ArrayList bucketsCleared;
+    private Set<Integer> bucketsCleared;
 
     private OperationType op;
 
@@ -209,7 +207,7 @@ public class PartitionedRegionClearMessage extends PartitionMessage {
     public PartitionedRegionClearReplyMessage() {}
 
     private PartitionedRegionClearReplyMessage(int processorId, OperationType op,
-        ArrayList bucketsCleared, ReplyException ex) {
+        Set<Integer> bucketsCleared, ReplyException ex) {
       super();
       this.bucketsCleared = bucketsCleared;
       this.op = op;
@@ -219,7 +217,7 @@ public class PartitionedRegionClearMessage extends PartitionMessage {
 
     /** Send an ack */
     public static void send(InternalDistributedMember recipient, int processorId, ReplySender dm,
-        OperationType op, ArrayList bucketsCleared, ReplyException ex) {
+        OperationType op, Set<Integer> bucketsCleared, ReplyException ex) {
 
       Assert.assertTrue(recipient != null, "partitionedRegionClearReplyMessage NULL reply message");
 
@@ -262,7 +260,7 @@ public class PartitionedRegionClearMessage extends PartitionMessage {
         DeserializationContext context) throws IOException, ClassNotFoundException {
       super.fromData(in, context);
       op = PartitionedRegionClearMessage.OperationType.values()[in.readByte()];
-      bucketsCleared = DataSerializer.readArrayList(in);
+      bucketsCleared = DataSerializer.readObject(in);
     }
 
     @Override
@@ -270,7 +268,7 @@ public class PartitionedRegionClearMessage extends PartitionMessage {
         SerializationContext context) throws IOException {
       super.toData(out, context);
       out.writeByte(op.ordinal());
-      DataSerializer.writeArrayList(bucketsCleared, out);
+      DataSerializer.writeObject(bucketsCleared, out);
     }
 
     @Override
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java
index d8c42af..bd37d9e 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java
@@ -28,7 +28,6 @@ import static org.mockito.Mockito.when;
 
 import java.util.Collections;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Set;
 
 import org.junit.Before;
@@ -45,7 +44,6 @@ import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.distributed.internal.MembershipListener;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
 import org.apache.geode.internal.cache.partitioned.RegionAdvisor;
-import org.apache.geode.internal.cache.versions.RegionVersionVector;
 
 public class PartitionedRegionClearTest {
 
@@ -74,7 +72,7 @@ public class PartitionedRegionClearTest {
     for (int i = 0; i < numBuckets; i++) {
       BucketRegion bucketRegion = mock(BucketRegion.class);
       when(bucketRegion.getBucketAdvisor()).thenReturn(bucketAdvisor);
-      when(bucketRegion.size()).thenReturn(1);
+      when(bucketRegion.size()).thenReturn(1).thenReturn(0);
       when(bucketRegion.getId()).thenReturn(i);
       bucketRegions.add(bucketRegion);
     }
@@ -131,7 +129,7 @@ public class PartitionedRegionClearTest {
     Region<String, PartitionRegionConfig> region = mock(Region.class);
     when(partitionedRegion.getPRRoot()).thenReturn(region);
     PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
-    doReturn(Collections.EMPTY_LIST).when(spyPartitionedRegionClear)
+    doReturn(Collections.EMPTY_SET).when(spyPartitionedRegionClear)
         .attemptToSendPartitionedRegionClearMessage(regionEvent,
             PartitionedRegionClearMessage.OperationType.OP_LOCK_FOR_PR_CLEAR);
     InternalDistributedMember internalDistributedMember = mock(InternalDistributedMember.class);
@@ -151,7 +149,7 @@ public class PartitionedRegionClearTest {
     Region<String, PartitionRegionConfig> region = mock(Region.class);
     when(partitionedRegion.getPRRoot()).thenReturn(region);
     PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
-    doReturn(Collections.EMPTY_LIST).when(spyPartitionedRegionClear)
+    doReturn(Collections.EMPTY_SET).when(spyPartitionedRegionClear)
         .attemptToSendPartitionedRegionClearMessage(regionEvent,
             PartitionedRegionClearMessage.OperationType.OP_UNLOCK_FOR_PR_CLEAR);
     InternalDistributedMember internalDistributedMember = mock(InternalDistributedMember.class);
@@ -171,14 +169,13 @@ public class PartitionedRegionClearTest {
     Region<String, PartitionRegionConfig> region = mock(Region.class);
     when(partitionedRegion.getPRRoot()).thenReturn(region);
     PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
-    doReturn(Collections.EMPTY_LIST).when(spyPartitionedRegionClear)
+    doReturn(Collections.EMPTY_SET).when(spyPartitionedRegionClear)
         .attemptToSendPartitionedRegionClearMessage(regionEvent,
             PartitionedRegionClearMessage.OperationType.OP_PR_CLEAR);
     InternalDistributedMember internalDistributedMember = mock(InternalDistributedMember.class);
     when(distributionManager.getId()).thenReturn(internalDistributedMember);
-    RegionVersionVector regionVersionVector = mock(RegionVersionVector.class);
 
-    spyPartitionedRegionClear.clearRegion(regionEvent, false, regionVersionVector);
+    spyPartitionedRegionClear.clearRegion(regionEvent);
 
     verify(spyPartitionedRegionClear, times(1)).clearRegionLocal(regionEvent);
     verify(spyPartitionedRegionClear, times(1)).sendPartitionedRegionClearMessage(regionEvent,
@@ -227,7 +224,7 @@ public class PartitionedRegionClearTest {
     assertThat(thrown)
         .isInstanceOf(PartitionedRegionPartialClearException.class)
         .hasMessage(
-            "Unable to find primary bucket region during clear operation for region: prRegion");
+            "Unable to find primary bucket region during clear operation on prRegion region.");
     verify(retryTimer, times(0)).waitForBucketsRecovery();
   }
 
@@ -241,7 +238,7 @@ public class PartitionedRegionClearTest {
     Set<BucketRegion> buckets = setupBucketRegions(partitionedRegionDataStore, bucketAdvisor);
     when(partitionedRegion.getDataStore()).thenReturn(partitionedRegionDataStore);
 
-    List bucketsCleared = partitionedRegionClear.clearRegionLocal(regionEvent);
+    Set<Integer> bucketsCleared = partitionedRegionClear.clearRegionLocal(regionEvent);
 
     assertThat(bucketsCleared).hasSize(buckets.size());
 
@@ -254,25 +251,44 @@ public class PartitionedRegionClearTest {
   }
 
   @Test
-  public void clearRegionLocalRetriesClearOnLocalPrimaryBucketRegions() {
+  public void clearRegionLocalRetriesClearOnNonClearedLocalPrimaryBucketRegionsWhenMembershipChanges() {
     RegionEventImpl regionEvent = mock(RegionEventImpl.class);
     BucketAdvisor bucketAdvisor = mock(BucketAdvisor.class);
     when(bucketAdvisor.hasPrimary()).thenReturn(true);
     PartitionedRegionDataStore partitionedRegionDataStore = mock(PartitionedRegionDataStore.class);
     doNothing().when(partitionedRegionDataStore).lockBucketCreationForRegionClear();
     Set<BucketRegion> buckets = setupBucketRegions(partitionedRegionDataStore, bucketAdvisor);
+
+    final int numExtraBuckets = 3;
+    Set<BucketRegion> extraBuckets = new HashSet<>();
+    for (int i = 0; i < numExtraBuckets; i++) {
+      BucketRegion bucketRegion = mock(BucketRegion.class);
+      when(bucketRegion.getBucketAdvisor()).thenReturn(bucketAdvisor);
+      when(bucketRegion.size()).thenReturn(1);
+      when(bucketRegion.getId()).thenReturn(i + buckets.size());
+      extraBuckets.add(bucketRegion);
+    }
+    Set<BucketRegion> allBuckets = new HashSet<>(buckets);
+    allBuckets.addAll(extraBuckets);
+
+    // After the first try, add 3 extra buckets to the local bucket regions
+    when(partitionedRegionDataStore.getAllLocalBucketRegions()).thenReturn(buckets)
+        .thenReturn(allBuckets);
+    when(partitionedRegionDataStore.getAllLocalPrimaryBucketRegions()).thenReturn(buckets)
+        .thenReturn(allBuckets);
+
     when(partitionedRegion.getDataStore()).thenReturn(partitionedRegionDataStore);
     PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
     when(spyPartitionedRegionClear.getMembershipChange()).thenReturn(true).thenReturn(false);
 
-    List bucketsCleared = spyPartitionedRegionClear.clearRegionLocal(regionEvent);
+    Set<Integer> bucketsCleared = spyPartitionedRegionClear.clearRegionLocal(regionEvent);
 
-    int expectedClears = buckets.size() * 2; /* clear is called twice on each bucket */
+    int expectedClears = allBuckets.size();
     assertThat(bucketsCleared).hasSize(expectedClears);
 
     ArgumentCaptor<RegionEventImpl> argument = ArgumentCaptor.forClass(RegionEventImpl.class);
-    for (BucketRegion bucketRegion : buckets) {
-      verify(bucketRegion, times(2)).cmnClearRegion(argument.capture(), eq(false), eq(true));
+    for (BucketRegion bucketRegion : allBuckets) {
+      verify(bucketRegion, times(1)).cmnClearRegion(argument.capture(), eq(false), eq(true));
       RegionEventImpl bucketRegionEvent = argument.getValue();
       assertThat(bucketRegionEvent.getRegion()).isEqualTo(bucketRegion);
     }
@@ -372,7 +388,6 @@ public class PartitionedRegionClearTest {
     PartitionedRegionDataStore partitionedRegionDataStore = mock(PartitionedRegionDataStore.class);
     Set<BucketRegion> buckets = setupBucketRegions(partitionedRegionDataStore, bucketAdvisor);
     when(partitionedRegion.getDataStore()).thenReturn(partitionedRegionDataStore);
-    InternalDistributedMember member = mock(InternalDistributedMember.class);
 
     partitionedRegionClear.releaseClearLockLocal();
 
@@ -424,8 +439,7 @@ public class PartitionedRegionClearTest {
     PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
     doNothing().when(spyPartitionedRegionClear).acquireDistributedClearLock(any());
     doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
-    doReturn(Collections.EMPTY_LIST).when(spyPartitionedRegionClear).clearRegion(regionEvent, false,
-        null);
+    doReturn(Collections.EMPTY_SET).when(spyPartitionedRegionClear).clearRegion(regionEvent);
 
     spyPartitionedRegionClear.doClear(regionEvent, false);
 
@@ -441,8 +455,7 @@ public class PartitionedRegionClearTest {
     PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
     doNothing().when(spyPartitionedRegionClear).acquireDistributedClearLock(any());
     doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
-    doReturn(Collections.EMPTY_LIST).when(spyPartitionedRegionClear).clearRegion(regionEvent,
-        cacheWrite, null);
+    doReturn(Collections.EMPTY_SET).when(spyPartitionedRegionClear).clearRegion(regionEvent);
 
     spyPartitionedRegionClear.doClear(regionEvent, cacheWrite);
 
@@ -456,8 +469,7 @@ public class PartitionedRegionClearTest {
     PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
     doNothing().when(spyPartitionedRegionClear).acquireDistributedClearLock(any());
     doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
-    doReturn(Collections.EMPTY_LIST).when(spyPartitionedRegionClear).clearRegion(regionEvent,
-        cacheWrite, null);
+    doReturn(Collections.EMPTY_SET).when(spyPartitionedRegionClear).clearRegion(regionEvent);
 
     spyPartitionedRegionClear.doClear(regionEvent, cacheWrite);
 
@@ -475,8 +487,7 @@ public class PartitionedRegionClearTest {
     doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
     doNothing().when(spyPartitionedRegionClear).obtainLockForClear(regionEvent);
     doNothing().when(spyPartitionedRegionClear).releaseLockForClear(regionEvent);
-    doReturn(Collections.EMPTY_LIST).when(spyPartitionedRegionClear).clearRegion(regionEvent,
-        cacheWrite, null);
+    doReturn(Collections.EMPTY_SET).when(spyPartitionedRegionClear).clearRegion(regionEvent);
 
     spyPartitionedRegionClear.doClear(regionEvent, cacheWrite);
 
@@ -495,8 +506,7 @@ public class PartitionedRegionClearTest {
     doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
     doNothing().when(spyPartitionedRegionClear).obtainLockForClear(regionEvent);
     doNothing().when(spyPartitionedRegionClear).releaseLockForClear(regionEvent);
-    doReturn(Collections.EMPTY_LIST).when(spyPartitionedRegionClear).clearRegion(regionEvent,
-        cacheWrite, null);
+    doReturn(Collections.EMPTY_SET).when(spyPartitionedRegionClear).clearRegion(regionEvent);
 
     spyPartitionedRegionClear.doClear(regionEvent, cacheWrite);
 
@@ -515,8 +525,7 @@ public class PartitionedRegionClearTest {
     doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
     doNothing().when(spyPartitionedRegionClear).obtainLockForClear(regionEvent);
     doNothing().when(spyPartitionedRegionClear).releaseLockForClear(regionEvent);
-    doReturn(Collections.EMPTY_LIST).when(spyPartitionedRegionClear).clearRegion(regionEvent,
-        cacheWrite, null);
+    doReturn(Collections.EMPTY_SET).when(spyPartitionedRegionClear).clearRegion(regionEvent);
 
     spyPartitionedRegionClear.doClear(regionEvent, cacheWrite);
 
@@ -537,8 +546,7 @@ public class PartitionedRegionClearTest {
     doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
     doNothing().when(spyPartitionedRegionClear).obtainLockForClear(regionEvent);
     doNothing().when(spyPartitionedRegionClear).releaseLockForClear(regionEvent);
-    doReturn(Collections.EMPTY_LIST).when(spyPartitionedRegionClear).clearRegion(regionEvent,
-        cacheWrite, null);
+    doReturn(Collections.EMPTY_SET).when(spyPartitionedRegionClear).clearRegion(regionEvent);
 
     Throwable thrown =
         catchThrowable(() -> spyPartitionedRegionClear.doClear(regionEvent, cacheWrite));


[geode] 13/22: GEODE-8173: Add unit test (coverage) for PartitionedRegionClear class. (#5208)

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 65a90168defa3e11352cc8143c621bafc3f31296
Author: agingade <ag...@pivotal.io>
AuthorDate: Mon Jun 8 10:23:50 2020 -0700

    GEODE-8173: Add unit test (coverage) for PartitionedRegionClear class. (#5208)
    
    * GEODE-8173: Add unit test (coverage) for PartitionedRegionClear class.
    Co-authored-by: anilkumar gingade <an...@anilg.local>
---
 .../cache/PRCacheListenerDistributedTest.java      | 337 +++++++++++-
 .../ReplicateCacheListenerDistributedTest.java     |   4 +-
 .../geode/internal/cache/PartitionedRegion.java    |   2 +-
 .../internal/cache/PartitionedRegionClear.java     |  83 ++-
 .../internal/cache/PartitionedRegionClearTest.java | 611 +++++++++++++++++++++
 5 files changed, 999 insertions(+), 38 deletions(-)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/PRCacheListenerDistributedTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/PRCacheListenerDistributedTest.java
index f4a9ac9..7d95473 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/PRCacheListenerDistributedTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/PRCacheListenerDistributedTest.java
@@ -17,10 +17,18 @@ package org.apache.geode.cache;
 import static org.apache.geode.test.dunit.VM.getVM;
 import static org.apache.geode.test.dunit.VM.getVMCount;
 import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.fail;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
+import static org.hamcrest.Matchers.nullValue;
 
+import java.io.Serializable;
 import java.util.Arrays;
 import java.util.Collection;
 
+import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -28,7 +36,13 @@ import org.junit.runners.Parameterized.Parameter;
 import org.junit.runners.Parameterized.Parameters;
 import org.junit.runners.Parameterized.UseParametersRunnerFactory;
 
+import org.apache.geode.cache.util.CacheListenerAdapter;
 import org.apache.geode.logging.internal.log4j.api.LogService;
+import org.apache.geode.test.dunit.rules.CacheRule;
+import org.apache.geode.test.dunit.rules.DistributedRule;
+import org.apache.geode.test.dunit.rules.SharedCountersRule;
+import org.apache.geode.test.dunit.rules.SharedErrorCollector;
+import org.apache.geode.test.junit.rules.serializable.SerializableTestName;
 import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactory;
 
 /**
@@ -43,7 +57,28 @@ import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactor
 @RunWith(Parameterized.class)
 @UseParametersRunnerFactory(CategoryWithParameterizedRunnerFactory.class)
 @SuppressWarnings("serial")
-public class PRCacheListenerDistributedTest extends ReplicateCacheListenerDistributedTest {
+public class PRCacheListenerDistributedTest implements Serializable {
+
+  protected static final String CLEAR = "CLEAR";
+  protected static final String REGION_DESTROY = "REGION_DESTROY";
+  private static final String CREATES = "CREATES";
+  private static final String UPDATES = "UPDATES";
+  private static final String INVALIDATES = "INVALIDATES";
+  private static final String DESTROYS = "DESTROYS";
+  private static final int ENTRY_VALUE = 0;
+  private static final int UPDATED_ENTRY_VALUE = 1;
+  private static final String KEY = "key-1";
+  @Rule
+  public DistributedRule distributedRule = new DistributedRule();
+  @Rule
+  public CacheRule cacheRule = CacheRule.builder().createCacheInAll().build();
+  @Rule
+  public SerializableTestName testName = new SerializableTestName();
+  @Rule
+  public SharedCountersRule sharedCountersRule = new SharedCountersRule();
+  @Rule
+  public SharedErrorCollector errorCollector = new SharedErrorCollector();
+  protected String regionName;
 
   @Parameters
   public static Collection<Object[]> data() {
@@ -59,7 +94,6 @@ public class PRCacheListenerDistributedTest extends ReplicateCacheListenerDistri
   @Parameter(1)
   public Boolean withData;
 
-  @Override
   protected Region<String, Integer> createRegion(final String name,
       final CacheListener<String, Integer> listener) {
     return createPartitionedRegion(name, listener, false);
@@ -99,22 +133,18 @@ public class PRCacheListenerDistributedTest extends ReplicateCacheListenerDistri
     }
   }
 
-  @Override
   protected int expectedCreates() {
     return 1;
   }
 
-  @Override
   protected int expectedUpdates() {
     return 1;
   }
 
-  @Override
   protected int expectedInvalidates() {
     return 1;
   }
 
-  @Override
   protected int expectedDestroys() {
     return 1;
   }
@@ -132,7 +162,8 @@ public class PRCacheListenerDistributedTest extends ReplicateCacheListenerDistri
 
     region.destroyRegion();
 
-    assertThat(sharedCountersRule.getTotal(REGION_DESTROY)).isEqualTo(expectedRegionDestroys());
+    assertThat(sharedCountersRule.getTotal(REGION_DESTROY))
+        .isGreaterThanOrEqualTo(expectedRegionDestroys());
   }
 
   @Test
@@ -321,4 +352,296 @@ public class PRCacheListenerDistributedTest extends ReplicateCacheListenerDistri
     assertThat(sharedCountersRule.getTotal(CLEAR)).isEqualTo(1);
   }
 
+  @Before
+  public void setUp() {
+    regionName = getClass().getSimpleName();
+
+    sharedCountersRule.initialize(CREATES);
+    sharedCountersRule.initialize(DESTROYS);
+    sharedCountersRule.initialize(INVALIDATES);
+    sharedCountersRule.initialize(UPDATES);
+    sharedCountersRule.initialize(CLEAR);
+    sharedCountersRule.initialize(REGION_DESTROY);
+  }
+
+  @Test
+  public void afterCreateIsInvokedInEveryMember() {
+    CacheListener<String, Integer> listener = new CreateCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        createRegion(regionName, listener);
+      });
+    }
+
+    region.put(KEY, ENTRY_VALUE, cacheRule.getSystem().getDistributedMember());
+
+    assertThat(sharedCountersRule.getTotal(CREATES)).isEqualTo(expectedCreates());
+  }
+
+  @Test
+  public void afterUpdateIsInvokedInEveryMember() {
+    CacheListener<String, Integer> listener = new UpdateCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        createRegion(regionName, listener);
+      });
+    }
+
+    region.put(KEY, ENTRY_VALUE, cacheRule.getSystem().getDistributedMember());
+    region.put(KEY, UPDATED_ENTRY_VALUE, cacheRule.getSystem().getDistributedMember());
+
+    assertThat(sharedCountersRule.getTotal(UPDATES)).isEqualTo(expectedUpdates());
+  }
+
+  @Test
+  public void afterInvalidateIsInvokedInEveryMember() {
+    CacheListener<String, Integer> listener = new InvalidateCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        createRegion(regionName, listener);
+      });
+    }
+
+    region.put(KEY, 0, cacheRule.getSystem().getDistributedMember());
+    region.invalidate(KEY);
+
+    assertThat(sharedCountersRule.getTotal(INVALIDATES)).isEqualTo(expectedInvalidates());
+    assertThat(region.get(KEY)).isNull();
+  }
+
+  @Test
+  public void afterDestroyIsInvokedInEveryMember() {
+    CacheListener<String, Integer> listener = new DestroyCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        createRegion(regionName, listener);
+      });
+    }
+
+    region.put(KEY, 0, cacheRule.getSystem().getDistributedMember());
+    region.destroy(KEY);
+
+    assertThat(sharedCountersRule.getTotal(DESTROYS)).isEqualTo(expectedDestroys());
+  }
+
+  @Test
+  public void afterClearIsInvokedInEveryMember() {
+    CacheListener<String, Integer> listener = new ClearCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        createRegion(regionName, listener);
+      });
+    }
+
+    region.clear();
+
+    assertThat(sharedCountersRule.getTotal(CLEAR)).isEqualTo(expectedClears());
+  }
+
+  protected int expectedClears() {
+    return getVMCount() + 1;
+  }
+
+  protected int expectedRegionDestroys() {
+    return getVMCount() + 1;
+  }
+
+  /**
+   * Overridden within tests to increment shared counters.
+   */
+  private abstract static class BaseCacheListener extends CacheListenerAdapter<String, Integer>
+      implements Serializable {
+
+    @Override
+    public void afterCreate(final EntryEvent<String, Integer> event) {
+      fail("Unexpected listener callback: afterCreate");
+    }
+
+    @Override
+    public void afterInvalidate(final EntryEvent<String, Integer> event) {
+      fail("Unexpected listener callback: afterInvalidate");
+    }
+
+    @Override
+    public void afterDestroy(final EntryEvent<String, Integer> event) {
+      fail("Unexpected listener callback: afterDestroy");
+    }
+
+    @Override
+    public void afterUpdate(final EntryEvent<String, Integer> event) {
+      fail("Unexpected listener callback: afterUpdate");
+    }
+
+    @Override
+    public void afterRegionInvalidate(final RegionEvent<String, Integer> event) {
+      fail("Unexpected listener callback: afterRegionInvalidate");
+    }
+  }
+
+  private class CreateCountingCacheListener extends BaseCacheListener {
+
+    @Override
+    public void afterCreate(final EntryEvent<String, Integer> event) {
+      sharedCountersRule.increment(CREATES);
+
+      errorCollector.checkThat(event.getDistributedMember(), equalTo(event.getCallbackArgument()));
+      errorCollector.checkThat(event.getOperation(), equalTo(Operation.CREATE));
+      errorCollector.checkThat(event.getOldValue(), nullValue());
+      errorCollector.checkThat(event.getNewValue(), equalTo(ENTRY_VALUE));
+
+      if (event.getSerializedOldValue() != null) {
+        errorCollector.checkThat(event.getSerializedOldValue().getDeserializedValue(),
+            equalTo(event.getOldValue()));
+      }
+      if (event.getSerializedNewValue() != null) {
+        errorCollector.checkThat(event.getSerializedNewValue().getDeserializedValue(),
+            equalTo(event.getNewValue()));
+      }
+    }
+  }
+
+  private class UpdateCountingCacheListener extends BaseCacheListener {
+
+    @Override
+    public void afterCreate(final EntryEvent<String, Integer> event) {
+      // nothing
+    }
+
+    @Override
+    public void afterUpdate(final EntryEvent<String, Integer> event) {
+      sharedCountersRule.increment(UPDATES);
+
+      errorCollector.checkThat(event.getDistributedMember(), equalTo(event.getCallbackArgument()));
+      errorCollector.checkThat(event.getOperation(), equalTo(Operation.UPDATE));
+      errorCollector.checkThat(event.getOldValue(), anyOf(equalTo(ENTRY_VALUE), nullValue()));
+      errorCollector.checkThat(event.getNewValue(), equalTo(UPDATED_ENTRY_VALUE));
+
+      if (event.getSerializedOldValue() != null) {
+        errorCollector.checkThat(event.getSerializedOldValue().getDeserializedValue(),
+            equalTo(event.getOldValue()));
+      }
+      if (event.getSerializedNewValue() != null) {
+        errorCollector.checkThat(event.getSerializedNewValue().getDeserializedValue(),
+            equalTo(event.getNewValue()));
+      }
+    }
+  }
+
+  private class InvalidateCountingCacheListener extends BaseCacheListener {
+
+    @Override
+    public void afterCreate(final EntryEvent<String, Integer> event) {
+      // ignore
+    }
+
+    @Override
+    public void afterInvalidate(final EntryEvent<String, Integer> event) {
+      sharedCountersRule.increment(INVALIDATES);
+
+      if (event.isOriginRemote()) {
+        errorCollector.checkThat(event.getDistributedMember(),
+            not(cacheRule.getSystem().getDistributedMember()));
+      } else {
+        errorCollector.checkThat(event.getDistributedMember(),
+            equalTo(cacheRule.getSystem().getDistributedMember()));
+      }
+      errorCollector.checkThat(event.getOperation(), equalTo(Operation.INVALIDATE));
+      errorCollector.checkThat(event.getOldValue(), anyOf(equalTo(ENTRY_VALUE), nullValue()));
+      errorCollector.checkThat(event.getNewValue(), nullValue());
+    }
+  }
+
+  private class DestroyCountingCacheListener extends BaseCacheListener {
+
+    @Override
+    public void afterCreate(final EntryEvent<String, Integer> event) {
+      sharedCountersRule.increment(CREATES);
+    }
+
+    @Override
+    public void afterUpdate(final EntryEvent<String, Integer> event) {
+      sharedCountersRule.increment(UPDATES);
+    }
+
+    @Override
+    public void afterDestroy(final EntryEvent<String, Integer> event) {
+      sharedCountersRule.increment(DESTROYS);
+
+      if (event.isOriginRemote()) {
+        errorCollector.checkThat(event.getDistributedMember(),
+            not(cacheRule.getSystem().getDistributedMember()));
+      } else {
+        errorCollector.checkThat(event.getDistributedMember(),
+            equalTo(cacheRule.getSystem().getDistributedMember()));
+      }
+      errorCollector.checkThat(event.getOperation(), equalTo(Operation.DESTROY));
+      errorCollector.checkThat(event.getOldValue(), anyOf(equalTo(ENTRY_VALUE), nullValue()));
+      errorCollector.checkThat(event.getNewValue(), nullValue());
+    }
+  }
+
+  protected class ClearCountingCacheListener extends BaseCacheListener {
+
+    @Override
+    public void afterCreate(final EntryEvent<String, Integer> event) {
+      sharedCountersRule.increment(CREATES);
+    }
+
+    @Override
+    public void afterUpdate(final EntryEvent<String, Integer> event) {
+      sharedCountersRule.increment(UPDATES);
+    }
+
+    @Override
+    public void afterRegionClear(RegionEvent<String, Integer> event) {
+
+      sharedCountersRule.increment(CLEAR);
+      if (!event.getRegion().getAttributes().getDataPolicy().withPartitioning()) {
+        if (event.isOriginRemote()) {
+          errorCollector.checkThat(event.getDistributedMember(),
+              not(cacheRule.getSystem().getDistributedMember()));
+        } else {
+          errorCollector.checkThat(event.getDistributedMember(),
+              equalTo(cacheRule.getSystem().getDistributedMember()));
+        }
+      }
+      errorCollector.checkThat(event.getOperation(), equalTo(Operation.REGION_CLEAR));
+      errorCollector.checkThat(event.getRegion().getName(), equalTo(regionName));
+    }
+  }
+
+  protected class RegionDestroyCountingCacheListener extends BaseCacheListener {
+
+    @Override
+    public void afterCreate(final EntryEvent<String, Integer> event) {
+      sharedCountersRule.increment(CREATES);
+    }
+
+    @Override
+    public void afterUpdate(final EntryEvent<String, Integer> event) {
+      sharedCountersRule.increment(UPDATES);
+    }
+
+    @Override
+    public void afterRegionDestroy(final RegionEvent<String, Integer> event) {
+      sharedCountersRule.increment(REGION_DESTROY);
+
+      if (!event.getRegion().getAttributes().getDataPolicy().withPartitioning()) {
+        if (event.isOriginRemote()) {
+          errorCollector.checkThat(event.getDistributedMember(),
+              not(cacheRule.getSystem().getDistributedMember()));
+        } else {
+          errorCollector.checkThat(event.getDistributedMember(),
+              equalTo(cacheRule.getSystem().getDistributedMember()));
+        }
+      }
+      errorCollector.checkThat(event.getOperation(), equalTo(Operation.REGION_DESTROY));
+      errorCollector.checkThat(event.getRegion().getName(), equalTo(regionName));
+    }
+  }
 }
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/ReplicateCacheListenerDistributedTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/ReplicateCacheListenerDistributedTest.java
index 6612833..dd229de 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/ReplicateCacheListenerDistributedTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/ReplicateCacheListenerDistributedTest.java
@@ -51,8 +51,8 @@ public class ReplicateCacheListenerDistributedTest implements Serializable {
   private static final String UPDATES = "UPDATES";
   private static final String INVALIDATES = "INVALIDATES";
   private static final String DESTROYS = "DESTROYS";
-  protected static final String CLEAR = "CLEAR";
-  protected static final String REGION_DESTROY = "REGION_DESTROY";
+  private static final String CLEAR = "CLEAR";
+  private static final String REGION_DESTROY = "REGION_DESTROY";
 
   private static final int ENTRY_VALUE = 0;
   private static final int UPDATED_ENTRY_VALUE = 1;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
index 671d27b..25481ae 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
@@ -10256,7 +10256,7 @@ public class PartitionedRegion extends LocalRegion
   void cmnClearRegion(RegionEventImpl regionEvent, boolean cacheWrite, boolean useRVV) {
     // Synchronized to avoid other threads invoking clear on this vm/node.
     synchronized (clearLock) {
-      partitionedRegionClear.doClear(regionEvent, cacheWrite, this);
+      partitionedRegionClear.doClear(regionEvent, cacheWrite);
     }
   }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
index 69277ef..030b36e 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
@@ -39,21 +39,24 @@ public class PartitionedRegionClear {
 
   private static final Logger logger = LogService.getLogger();
 
-  private static final String CLEAR_OPERATION = "_clearOperation";
+  protected static final String CLEAR_OPERATION = "_clearOperation";
 
   private final int retryTime = 2 * 60 * 1000;
 
   private final PartitionedRegion partitionedRegion;
 
-  private final LockForListenerAndClientNotification lockForListenerAndClientNotification =
+  protected final LockForListenerAndClientNotification lockForListenerAndClientNotification =
       new LockForListenerAndClientNotification();
 
   private volatile boolean membershipChange = false;
 
+  protected final PartitionedRegionClearListener partitionedRegionClearListener =
+      new PartitionedRegionClearListener();
+
   public PartitionedRegionClear(PartitionedRegion partitionedRegion) {
     this.partitionedRegion = partitionedRegion;
     partitionedRegion.getDistributionManager()
-        .addMembershipListener(new PartitionedRegionClearListener());
+        .addMembershipListener(partitionedRegionClearListener);
   }
 
   public boolean isLockedForListenerAndClientNotification() {
@@ -79,6 +82,10 @@ public class PartitionedRegionClear {
     }
   }
 
+  protected PartitionedRegionClearListener getPartitionedRegionClearListener() {
+    return partitionedRegionClearListener;
+  }
+
   void obtainLockForClear(RegionEventImpl event) {
     obtainClearLockLocal(partitionedRegion.getDistributionManager().getId());
     sendPartitionedRegionClearMessage(event,
@@ -100,9 +107,8 @@ public class PartitionedRegionClear {
     return allBucketsCleared;
   }
 
-  private void waitForPrimary() {
+  protected void waitForPrimary(PartitionedRegion.RetryTimeKeeper retryTimer) {
     boolean retry;
-    PartitionedRegion.RetryTimeKeeper retryTimer = new PartitionedRegion.RetryTimeKeeper(retryTime);
     do {
       retry = false;
       for (BucketRegion bucketRegion : partitionedRegion.getDataStore()
@@ -122,7 +128,7 @@ public class PartitionedRegionClear {
 
   public ArrayList clearRegionLocal(RegionEventImpl regionEvent) {
     ArrayList clearedBuckets = new ArrayList();
-    membershipChange = false;
+    setMembershipChange(false);
     // Synchronized to handle the requester departure.
     synchronized (lockForListenerAndClientNotification) {
       if (partitionedRegion.getDataStore() != null) {
@@ -130,18 +136,22 @@ public class PartitionedRegionClear {
         try {
           boolean retry;
           do {
-            waitForPrimary();
-
+            waitForPrimary(new PartitionedRegion.RetryTimeKeeper(retryTime));
+            RegionEventImpl bucketRegionEvent;
             for (BucketRegion localPrimaryBucketRegion : partitionedRegion.getDataStore()
                 .getAllLocalPrimaryBucketRegions()) {
               if (localPrimaryBucketRegion.size() > 0) {
-                localPrimaryBucketRegion.clear();
+                bucketRegionEvent =
+                    new RegionEventImpl(localPrimaryBucketRegion, Operation.REGION_CLEAR, null,
+                        false, partitionedRegion.getMyId(), regionEvent.getEventId());
+                localPrimaryBucketRegion.cmnClearRegion(bucketRegionEvent, false, true);
               }
               clearedBuckets.add(localPrimaryBucketRegion.getId());
             }
 
-            if (membershipChange) {
-              membershipChange = false;
+            if (getMembershipChange()) {
+              // Retry and reset the membership change status.
+              setMembershipChange(false);
               retry = true;
             } else {
               retry = false;
@@ -160,7 +170,7 @@ public class PartitionedRegionClear {
     return clearedBuckets;
   }
 
-  private void doAfterClear(RegionEventImpl regionEvent) {
+  protected void doAfterClear(RegionEventImpl regionEvent) {
     if (partitionedRegion.hasAnyClientsInterested()) {
       notifyClients(regionEvent);
     }
@@ -245,7 +255,7 @@ public class PartitionedRegionClear {
     }
   }
 
-  private List sendPartitionedRegionClearMessage(RegionEventImpl event,
+  protected List sendPartitionedRegionClearMessage(RegionEventImpl event,
       PartitionedRegionClearMessage.OperationType op) {
     RegionEventImpl eventForLocalClear = (RegionEventImpl) event.clone();
     eventForLocalClear.setOperation(Operation.REGION_LOCAL_CLEAR);
@@ -259,7 +269,7 @@ public class PartitionedRegionClear {
     } while (true);
   }
 
-  private List attemptToSendPartitionedRegionClearMessage(RegionEventImpl event,
+  protected List attemptToSendPartitionedRegionClearMessage(RegionEventImpl event,
       PartitionedRegionClearMessage.OperationType op)
       throws ForceReattemptException {
     List bucketsOperated = null;
@@ -321,30 +331,27 @@ public class PartitionedRegionClear {
     return bucketsOperated;
   }
 
-  void doClear(RegionEventImpl regionEvent, boolean cacheWrite,
-      PartitionedRegion partitionedRegion) {
-    String lockName = CLEAR_OPERATION + partitionedRegion.getDisplayName();
+  void doClear(RegionEventImpl regionEvent, boolean cacheWrite) {
+    String lockName = CLEAR_OPERATION + partitionedRegion.getName();
 
     try {
       // distributed lock to make sure only one clear op is in progress in the cluster.
       acquireDistributedClearLock(lockName);
 
       // Force all primary buckets to be created before clear.
-      PartitionRegionHelper.assignBucketsToPartitions(partitionedRegion);
+      assignAllPrimaryBuckets();
 
       // do cacheWrite
-      try {
-        partitionedRegion.cacheWriteBeforeRegionClear(regionEvent);
-      } catch (OperationAbortedException operationAbortedException) {
-        throw new CacheWriterException(operationAbortedException);
+      if (cacheWrite) {
+        invokeCacheWriter(regionEvent);
       }
 
       // Check if there are any listeners or clients interested. If so, then clear write
       // locks needs to be taken on all local and remote primary buckets in order to
       // preserve the ordering of client events (for concurrent operations on the region).
-      boolean acquireClearLockForClientNotification =
-          (partitionedRegion.hasAnyClientsInterested() && partitionedRegion.hasListener());
-      if (acquireClearLockForClientNotification) {
+      boolean acquireClearLockForNotification =
+          (partitionedRegion.hasAnyClientsInterested() || partitionedRegion.hasListener());
+      if (acquireClearLockForNotification) {
         obtainLockForClear(regionEvent);
       }
       try {
@@ -362,7 +369,7 @@ public class PartitionedRegionClear {
           throw new PartitionedRegionPartialClearException(message);
         }
       } finally {
-        if (acquireClearLockForClientNotification) {
+        if (acquireClearLockForNotification) {
           releaseLockForClear(regionEvent);
         }
       }
@@ -372,7 +379,19 @@ public class PartitionedRegionClear {
     }
   }
 
-  void handleClearFromDepartedMember(InternalDistributedMember departedMember) {
+  protected void invokeCacheWriter(RegionEventImpl regionEvent) {
+    try {
+      partitionedRegion.cacheWriteBeforeRegionClear(regionEvent);
+    } catch (OperationAbortedException operationAbortedException) {
+      throw new CacheWriterException(operationAbortedException);
+    }
+  }
+
+  protected void assignAllPrimaryBuckets() {
+    PartitionRegionHelper.assignBucketsToPartitions(partitionedRegion);
+  }
+
+  protected void handleClearFromDepartedMember(InternalDistributedMember departedMember) {
     if (departedMember.equals(lockForListenerAndClientNotification.getLockRequester())) {
       synchronized (lockForListenerAndClientNotification) {
         if (lockForListenerAndClientNotification.getLockRequester() != null) {
@@ -407,12 +426,20 @@ public class PartitionedRegionClear {
     }
   }
 
+  protected void setMembershipChange(boolean membershipChange) {
+    this.membershipChange = membershipChange;
+  }
+
+  protected boolean getMembershipChange() {
+    return membershipChange;
+  }
+
   protected class PartitionedRegionClearListener implements MembershipListener {
 
     @Override
     public synchronized void memberDeparted(DistributionManager distributionManager,
         InternalDistributedMember id, boolean crashed) {
-      membershipChange = true;
+      setMembershipChange(true);
       handleClearFromDepartedMember(id);
     }
   }
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java
new file mode 100644
index 0000000..d8c42af
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java
@@ -0,0 +1,611 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.catchThrowable;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+
+import org.apache.geode.CancelCriterion;
+import org.apache.geode.cache.PartitionedRegionPartialClearException;
+import org.apache.geode.cache.Region;
+import org.apache.geode.distributed.DistributedLockService;
+import org.apache.geode.distributed.internal.DMStats;
+import org.apache.geode.distributed.internal.DistributionManager;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.MembershipListener;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.cache.partitioned.RegionAdvisor;
+import org.apache.geode.internal.cache.versions.RegionVersionVector;
+
+public class PartitionedRegionClearTest {
+
+
+  private PartitionedRegionClear partitionedRegionClear;
+  private DistributionManager distributionManager;
+  private PartitionedRegion partitionedRegion;
+
+  @Before
+  public void setUp() {
+
+    partitionedRegion = mock(PartitionedRegion.class);
+    distributionManager = mock(DistributionManager.class);
+
+    when(partitionedRegion.getDistributionManager()).thenReturn(distributionManager);
+    when(partitionedRegion.getName()).thenReturn("prRegion");
+
+    partitionedRegionClear = new PartitionedRegionClear(partitionedRegion);
+  }
+
+  private Set<BucketRegion> setupBucketRegions(
+      PartitionedRegionDataStore partitionedRegionDataStore,
+      BucketAdvisor bucketAdvisor) {
+    final int numBuckets = 2;
+    Set<BucketRegion> bucketRegions = new HashSet<>();
+    for (int i = 0; i < numBuckets; i++) {
+      BucketRegion bucketRegion = mock(BucketRegion.class);
+      when(bucketRegion.getBucketAdvisor()).thenReturn(bucketAdvisor);
+      when(bucketRegion.size()).thenReturn(1);
+      when(bucketRegion.getId()).thenReturn(i);
+      bucketRegions.add(bucketRegion);
+    }
+
+    when(partitionedRegionDataStore.getAllLocalBucketRegions()).thenReturn(bucketRegions);
+    when(partitionedRegionDataStore.getAllLocalPrimaryBucketRegions()).thenReturn(bucketRegions);
+
+    return bucketRegions;
+  }
+
+  @Test
+  public void isLockedForListenerAndClientNotificationReturnsTrueWhenLocked() {
+    InternalDistributedMember internalDistributedMember = mock(InternalDistributedMember.class);
+    when(distributionManager.isCurrentMember(internalDistributedMember)).thenReturn(true);
+    partitionedRegionClear.obtainClearLockLocal(internalDistributedMember);
+
+    assertThat(partitionedRegionClear.isLockedForListenerAndClientNotification()).isTrue();
+  }
+
+  @Test
+  public void isLockedForListenerAndClientNotificationReturnsFalseWhenMemberNotInTheSystemRequestsLock() {
+    InternalDistributedMember internalDistributedMember = mock(InternalDistributedMember.class);
+    when(distributionManager.isCurrentMember(internalDistributedMember)).thenReturn(false);
+
+    assertThat(partitionedRegionClear.isLockedForListenerAndClientNotification()).isFalse();
+  }
+
+  @Test
+  public void acquireDistributedClearLockGetsDistributedLock() {
+    DistributedLockService distributedLockService = mock(DistributedLockService.class);
+    String lockName = PartitionedRegionClear.CLEAR_OPERATION + partitionedRegion.getName();
+    when(partitionedRegion.getPartitionedRegionLockService()).thenReturn(distributedLockService);
+
+    partitionedRegionClear.acquireDistributedClearLock(lockName);
+
+    verify(distributedLockService, times(1)).lock(lockName, -1, -1);
+  }
+
+  @Test
+  public void releaseDistributedClearLockReleasesDistributedLock() {
+    DistributedLockService distributedLockService = mock(DistributedLockService.class);
+    String lockName = PartitionedRegionClear.CLEAR_OPERATION + partitionedRegion.getName();
+    when(partitionedRegion.getPartitionedRegionLockService()).thenReturn(distributedLockService);
+
+    partitionedRegionClear.releaseDistributedClearLock(lockName);
+
+    verify(distributedLockService, times(1)).unlock(lockName);
+  }
+
+  @Test
+  public void obtainLockForClearGetsLocalLockAndSendsMessageForRemote() throws Exception {
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    when(regionEvent.clone()).thenReturn(mock(RegionEventImpl.class));
+    Region<String, PartitionRegionConfig> region = mock(Region.class);
+    when(partitionedRegion.getPRRoot()).thenReturn(region);
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doReturn(Collections.EMPTY_LIST).when(spyPartitionedRegionClear)
+        .attemptToSendPartitionedRegionClearMessage(regionEvent,
+            PartitionedRegionClearMessage.OperationType.OP_LOCK_FOR_PR_CLEAR);
+    InternalDistributedMember internalDistributedMember = mock(InternalDistributedMember.class);
+    when(distributionManager.getId()).thenReturn(internalDistributedMember);
+
+    spyPartitionedRegionClear.obtainLockForClear(regionEvent);
+
+    verify(spyPartitionedRegionClear, times(1)).obtainClearLockLocal(internalDistributedMember);
+    verify(spyPartitionedRegionClear, times(1)).sendPartitionedRegionClearMessage(regionEvent,
+        PartitionedRegionClearMessage.OperationType.OP_LOCK_FOR_PR_CLEAR);
+  }
+
+  @Test
+  public void releaseLockForClearReleasesLocalLockAndSendsMessageForRemote() throws Exception {
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    when(regionEvent.clone()).thenReturn(mock(RegionEventImpl.class));
+    Region<String, PartitionRegionConfig> region = mock(Region.class);
+    when(partitionedRegion.getPRRoot()).thenReturn(region);
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doReturn(Collections.EMPTY_LIST).when(spyPartitionedRegionClear)
+        .attemptToSendPartitionedRegionClearMessage(regionEvent,
+            PartitionedRegionClearMessage.OperationType.OP_UNLOCK_FOR_PR_CLEAR);
+    InternalDistributedMember internalDistributedMember = mock(InternalDistributedMember.class);
+    when(distributionManager.getId()).thenReturn(internalDistributedMember);
+
+    spyPartitionedRegionClear.releaseLockForClear(regionEvent);
+
+    verify(spyPartitionedRegionClear, times(1)).releaseClearLockLocal();
+    verify(spyPartitionedRegionClear, times(1)).sendPartitionedRegionClearMessage(regionEvent,
+        PartitionedRegionClearMessage.OperationType.OP_UNLOCK_FOR_PR_CLEAR);
+  }
+
+  @Test
+  public void clearRegionClearsLocalAndSendsMessageForRemote() throws Exception {
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    when(regionEvent.clone()).thenReturn(mock(RegionEventImpl.class));
+    Region<String, PartitionRegionConfig> region = mock(Region.class);
+    when(partitionedRegion.getPRRoot()).thenReturn(region);
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doReturn(Collections.EMPTY_LIST).when(spyPartitionedRegionClear)
+        .attemptToSendPartitionedRegionClearMessage(regionEvent,
+            PartitionedRegionClearMessage.OperationType.OP_PR_CLEAR);
+    InternalDistributedMember internalDistributedMember = mock(InternalDistributedMember.class);
+    when(distributionManager.getId()).thenReturn(internalDistributedMember);
+    RegionVersionVector regionVersionVector = mock(RegionVersionVector.class);
+
+    spyPartitionedRegionClear.clearRegion(regionEvent, false, regionVersionVector);
+
+    verify(spyPartitionedRegionClear, times(1)).clearRegionLocal(regionEvent);
+    verify(spyPartitionedRegionClear, times(1)).sendPartitionedRegionClearMessage(regionEvent,
+        PartitionedRegionClearMessage.OperationType.OP_PR_CLEAR);
+  }
+
+  @Test
+  public void waitForPrimaryReturnsAfterFindingAllPrimary() {
+    PartitionedRegionDataStore partitionedRegionDataStore = mock(PartitionedRegionDataStore.class);
+    BucketAdvisor bucketAdvisor = mock(BucketAdvisor.class);
+    when(bucketAdvisor.hasPrimary()).thenReturn(true);
+    setupBucketRegions(partitionedRegionDataStore, bucketAdvisor);
+    when(partitionedRegion.getDataStore()).thenReturn(partitionedRegionDataStore);
+    PartitionedRegion.RetryTimeKeeper retryTimer = mock(PartitionedRegion.RetryTimeKeeper.class);
+
+    partitionedRegionClear.waitForPrimary(retryTimer);
+
+    verify(retryTimer, times(0)).waitForBucketsRecovery();
+  }
+
+  @Test
+  public void waitForPrimaryReturnsAfterRetryForPrimary() {
+    PartitionedRegionDataStore partitionedRegionDataStore = mock(PartitionedRegionDataStore.class);
+    BucketAdvisor bucketAdvisor = mock(BucketAdvisor.class);
+    when(bucketAdvisor.hasPrimary()).thenReturn(false).thenReturn(true);
+    setupBucketRegions(partitionedRegionDataStore, bucketAdvisor);
+    when(partitionedRegion.getDataStore()).thenReturn(partitionedRegionDataStore);
+    PartitionedRegion.RetryTimeKeeper retryTimer = mock(PartitionedRegion.RetryTimeKeeper.class);
+
+    partitionedRegionClear.waitForPrimary(retryTimer);
+
+    verify(retryTimer, times(1)).waitForBucketsRecovery();
+  }
+
+  @Test
+  public void waitForPrimaryThrowsPartitionedRegionPartialClearException() {
+    PartitionedRegionDataStore partitionedRegionDataStore = mock(PartitionedRegionDataStore.class);
+    BucketAdvisor bucketAdvisor = mock(BucketAdvisor.class);
+    setupBucketRegions(partitionedRegionDataStore, bucketAdvisor);
+    when(partitionedRegion.getDataStore()).thenReturn(partitionedRegionDataStore);
+    PartitionedRegion.RetryTimeKeeper retryTimer = mock(PartitionedRegion.RetryTimeKeeper.class);
+    when(retryTimer.overMaximum()).thenReturn(true);
+
+    Throwable thrown = catchThrowable(() -> partitionedRegionClear.waitForPrimary(retryTimer));
+
+    assertThat(thrown)
+        .isInstanceOf(PartitionedRegionPartialClearException.class)
+        .hasMessage(
+            "Unable to find primary bucket region during clear operation for region: prRegion");
+    verify(retryTimer, times(0)).waitForBucketsRecovery();
+  }
+
+  @Test
+  public void clearRegionLocalCallsClearOnLocalPrimaryBucketRegions() {
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    BucketAdvisor bucketAdvisor = mock(BucketAdvisor.class);
+    when(bucketAdvisor.hasPrimary()).thenReturn(true);
+    PartitionedRegionDataStore partitionedRegionDataStore = mock(PartitionedRegionDataStore.class);
+    doNothing().when(partitionedRegionDataStore).lockBucketCreationForRegionClear();
+    Set<BucketRegion> buckets = setupBucketRegions(partitionedRegionDataStore, bucketAdvisor);
+    when(partitionedRegion.getDataStore()).thenReturn(partitionedRegionDataStore);
+
+    List bucketsCleared = partitionedRegionClear.clearRegionLocal(regionEvent);
+
+    assertThat(bucketsCleared).hasSize(buckets.size());
+
+    ArgumentCaptor<RegionEventImpl> argument = ArgumentCaptor.forClass(RegionEventImpl.class);
+    for (BucketRegion bucketRegion : buckets) {
+      verify(bucketRegion, times(1)).cmnClearRegion(argument.capture(), eq(false), eq(true));
+      RegionEventImpl bucketRegionEvent = argument.getValue();
+      assertThat(bucketRegionEvent.getRegion()).isEqualTo(bucketRegion);
+    }
+  }
+
+  @Test
+  public void clearRegionLocalRetriesClearOnLocalPrimaryBucketRegions() {
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    BucketAdvisor bucketAdvisor = mock(BucketAdvisor.class);
+    when(bucketAdvisor.hasPrimary()).thenReturn(true);
+    PartitionedRegionDataStore partitionedRegionDataStore = mock(PartitionedRegionDataStore.class);
+    doNothing().when(partitionedRegionDataStore).lockBucketCreationForRegionClear();
+    Set<BucketRegion> buckets = setupBucketRegions(partitionedRegionDataStore, bucketAdvisor);
+    when(partitionedRegion.getDataStore()).thenReturn(partitionedRegionDataStore);
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    when(spyPartitionedRegionClear.getMembershipChange()).thenReturn(true).thenReturn(false);
+
+    List bucketsCleared = spyPartitionedRegionClear.clearRegionLocal(regionEvent);
+
+    int expectedClears = buckets.size() * 2; /* clear is called twice on each bucket */
+    assertThat(bucketsCleared).hasSize(expectedClears);
+
+    ArgumentCaptor<RegionEventImpl> argument = ArgumentCaptor.forClass(RegionEventImpl.class);
+    for (BucketRegion bucketRegion : buckets) {
+      verify(bucketRegion, times(2)).cmnClearRegion(argument.capture(), eq(false), eq(true));
+      RegionEventImpl bucketRegionEvent = argument.getValue();
+      assertThat(bucketRegionEvent.getRegion()).isEqualTo(bucketRegion);
+    }
+  }
+
+  @Test
+  public void doAfterClearCallsNotifyClientsWhenClientHaveInterests() {
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    when(partitionedRegion.hasAnyClientsInterested()).thenReturn(true);
+    FilterProfile filterProfile = mock(FilterProfile.class);
+    FilterRoutingInfo filterRoutingInfo = mock(FilterRoutingInfo.class);
+    when(filterProfile.getFilterRoutingInfoPart1(regionEvent, FilterProfile.NO_PROFILES,
+        Collections.emptySet())).thenReturn(filterRoutingInfo);
+    when(filterProfile.getFilterRoutingInfoPart2(filterRoutingInfo, regionEvent)).thenReturn(
+        filterRoutingInfo);
+    when(partitionedRegion.getFilterProfile()).thenReturn(filterProfile);
+
+    partitionedRegionClear.doAfterClear(regionEvent);
+
+    verify(regionEvent, times(1)).setLocalFilterInfo(any());
+    verify(partitionedRegion, times(1)).notifyBridgeClients(regionEvent);
+  }
+
+  @Test
+  public void doAfterClearDispatchesListenerEvents() {
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    when(partitionedRegion.hasListener()).thenReturn(true);
+
+    partitionedRegionClear.doAfterClear(regionEvent);
+
+    verify(partitionedRegion, times(1)).dispatchListenerEvent(
+        EnumListenerEvent.AFTER_REGION_CLEAR, regionEvent);
+  }
+
+  @Test
+  public void obtainClearLockLocalGetsLockOnPrimaryBuckets() {
+    BucketAdvisor bucketAdvisor = mock(BucketAdvisor.class);
+    when(bucketAdvisor.hasPrimary()).thenReturn(true);
+    PartitionedRegionDataStore partitionedRegionDataStore = mock(PartitionedRegionDataStore.class);
+    Set<BucketRegion> buckets = setupBucketRegions(partitionedRegionDataStore, bucketAdvisor);
+    when(partitionedRegion.getDataStore()).thenReturn(partitionedRegionDataStore);
+    InternalDistributedMember member = mock(InternalDistributedMember.class);
+    when(distributionManager.isCurrentMember(member)).thenReturn(true);
+
+    partitionedRegionClear.obtainClearLockLocal(member);
+
+    assertThat(partitionedRegionClear.lockForListenerAndClientNotification.getLockRequester())
+        .isSameAs(member);
+    for (BucketRegion bucketRegion : buckets) {
+      verify(bucketRegion, times(1)).lockLocallyForClear(partitionedRegion.getDistributionManager(),
+          partitionedRegion.getMyId(), null);
+    }
+  }
+
+  @Test
+  public void obtainClearLockLocalDoesNotGetLocksOnPrimaryBucketsWhenMemberIsNotCurrent() {
+    BucketAdvisor bucketAdvisor = mock(BucketAdvisor.class);
+    when(bucketAdvisor.hasPrimary()).thenReturn(true);
+    PartitionedRegionDataStore partitionedRegionDataStore = mock(PartitionedRegionDataStore.class);
+    Set<BucketRegion> buckets = setupBucketRegions(partitionedRegionDataStore, bucketAdvisor);
+    when(partitionedRegion.getDataStore()).thenReturn(partitionedRegionDataStore);
+    InternalDistributedMember member = mock(InternalDistributedMember.class);
+    when(distributionManager.isCurrentMember(member)).thenReturn(false);
+
+    partitionedRegionClear.obtainClearLockLocal(member);
+
+    assertThat(partitionedRegionClear.lockForListenerAndClientNotification.getLockRequester())
+        .isNull();
+    for (BucketRegion bucketRegion : buckets) {
+      verify(bucketRegion, times(0)).lockLocallyForClear(partitionedRegion.getDistributionManager(),
+          partitionedRegion.getMyId(), null);
+    }
+  }
+
+  @Test
+  public void releaseClearLockLocalReleasesLockOnPrimaryBuckets() {
+    BucketAdvisor bucketAdvisor = mock(BucketAdvisor.class);
+    when(bucketAdvisor.hasPrimary()).thenReturn(true);
+    PartitionedRegionDataStore partitionedRegionDataStore = mock(PartitionedRegionDataStore.class);
+    Set<BucketRegion> buckets = setupBucketRegions(partitionedRegionDataStore, bucketAdvisor);
+    when(partitionedRegion.getDataStore()).thenReturn(partitionedRegionDataStore);
+    InternalDistributedMember member = mock(InternalDistributedMember.class);
+    when(distributionManager.isCurrentMember(member)).thenReturn(true);
+    partitionedRegionClear.lockForListenerAndClientNotification.setLocked(member);
+
+    partitionedRegionClear.releaseClearLockLocal();
+
+    for (BucketRegion bucketRegion : buckets) {
+      verify(bucketRegion, times(1)).releaseLockLocallyForClear(null);
+    }
+  }
+
+  @Test
+  public void releaseClearLockLocalDoesNotReleaseLocksOnPrimaryBucketsWhenMemberIsNotCurrent() {
+    BucketAdvisor bucketAdvisor = mock(BucketAdvisor.class);
+    when(bucketAdvisor.hasPrimary()).thenReturn(true);
+    PartitionedRegionDataStore partitionedRegionDataStore = mock(PartitionedRegionDataStore.class);
+    Set<BucketRegion> buckets = setupBucketRegions(partitionedRegionDataStore, bucketAdvisor);
+    when(partitionedRegion.getDataStore()).thenReturn(partitionedRegionDataStore);
+    InternalDistributedMember member = mock(InternalDistributedMember.class);
+
+    partitionedRegionClear.releaseClearLockLocal();
+
+    assertThat(partitionedRegionClear.lockForListenerAndClientNotification.getLockRequester())
+        .isNull();
+    for (BucketRegion bucketRegion : buckets) {
+      verify(bucketRegion, times(0)).releaseLockLocallyForClear(null);
+    }
+  }
+
+  @Test
+  public void sendPartitionedRegionClearMessageSendsClearMessageToPRNodes() {
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    when(regionEvent.clone()).thenReturn(mock(RegionEventImpl.class));
+    Region<String, PartitionRegionConfig> prRoot = mock(Region.class);
+    when(partitionedRegion.getPRRoot()).thenReturn(prRoot);
+    InternalDistributedMember member = mock(InternalDistributedMember.class);
+    RegionAdvisor regionAdvisor = mock(RegionAdvisor.class);
+    Set<InternalDistributedMember> prNodes = Collections.singleton(member);
+    Node node = mock(Node.class);
+    when(node.getMemberId()).thenReturn(member);
+    Set<Node> configNodes = Collections.singleton(node);
+    when(regionAdvisor.adviseAllPRNodes()).thenReturn(prNodes);
+    when(partitionedRegion.getRegionAdvisor()).thenReturn(regionAdvisor);
+    PartitionRegionConfig partitionRegionConfig = mock(PartitionRegionConfig.class);
+    when(partitionRegionConfig.getNodes()).thenReturn(configNodes);
+    when(prRoot.get(any())).thenReturn(partitionRegionConfig);
+    InternalDistributedSystem internalDistributedSystem = mock(InternalDistributedSystem.class);
+    when(internalDistributedSystem.getDistributionManager()).thenReturn(distributionManager);
+    when(partitionedRegion.getSystem()).thenReturn(internalDistributedSystem);
+    InternalCache internalCache = mock(InternalCache.class);
+    TXManagerImpl txManager = mock(TXManagerImpl.class);
+    when(txManager.isDistributed()).thenReturn(false);
+    when(internalCache.getTxManager()).thenReturn(txManager);
+    when(partitionedRegion.getCache()).thenReturn(internalCache);
+
+    when(distributionManager.getCancelCriterion()).thenReturn(mock(CancelCriterion.class));
+    when(distributionManager.getStats()).thenReturn(mock(DMStats.class));
+
+    partitionedRegionClear.sendPartitionedRegionClearMessage(regionEvent,
+        PartitionedRegionClearMessage.OperationType.OP_PR_CLEAR);
+
+    verify(distributionManager, times(1)).putOutgoing(any());
+  }
+
+  @Test
+  public void doClearAcquiresAndReleasesDistributedClearLockAndCreatesAllPrimaryBuckets() {
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doNothing().when(spyPartitionedRegionClear).acquireDistributedClearLock(any());
+    doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
+    doReturn(Collections.EMPTY_LIST).when(spyPartitionedRegionClear).clearRegion(regionEvent, false,
+        null);
+
+    spyPartitionedRegionClear.doClear(regionEvent, false);
+
+    verify(spyPartitionedRegionClear, times(1)).acquireDistributedClearLock(any());
+    verify(spyPartitionedRegionClear, times(1)).releaseDistributedClearLock(any());
+    verify(spyPartitionedRegionClear, times(1)).assignAllPrimaryBuckets();
+  }
+
+  @Test
+  public void doClearInvokesCacheWriterWhenCacheWriteIsSet() {
+    boolean cacheWrite = true;
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doNothing().when(spyPartitionedRegionClear).acquireDistributedClearLock(any());
+    doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
+    doReturn(Collections.EMPTY_LIST).when(spyPartitionedRegionClear).clearRegion(regionEvent,
+        cacheWrite, null);
+
+    spyPartitionedRegionClear.doClear(regionEvent, cacheWrite);
+
+    verify(spyPartitionedRegionClear, times(1)).invokeCacheWriter(regionEvent);
+  }
+
+  @Test
+  public void doClearDoesNotInvokesCacheWriterWhenCacheWriteIsNotSet() {
+    boolean cacheWrite = false;
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doNothing().when(spyPartitionedRegionClear).acquireDistributedClearLock(any());
+    doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
+    doReturn(Collections.EMPTY_LIST).when(spyPartitionedRegionClear).clearRegion(regionEvent,
+        cacheWrite, null);
+
+    spyPartitionedRegionClear.doClear(regionEvent, cacheWrite);
+
+    verify(spyPartitionedRegionClear, times(0)).invokeCacheWriter(regionEvent);
+  }
+
+  @Test
+  public void doClearObtainsAndReleasesLockForClearWhenRegionHasListener() {
+    boolean cacheWrite = false;
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    when(partitionedRegion.hasListener()).thenReturn(true);
+    when(partitionedRegion.hasAnyClientsInterested()).thenReturn(false);
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doNothing().when(spyPartitionedRegionClear).acquireDistributedClearLock(any());
+    doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
+    doNothing().when(spyPartitionedRegionClear).obtainLockForClear(regionEvent);
+    doNothing().when(spyPartitionedRegionClear).releaseLockForClear(regionEvent);
+    doReturn(Collections.EMPTY_LIST).when(spyPartitionedRegionClear).clearRegion(regionEvent,
+        cacheWrite, null);
+
+    spyPartitionedRegionClear.doClear(regionEvent, cacheWrite);
+
+    verify(spyPartitionedRegionClear, times(1)).obtainLockForClear(regionEvent);
+    verify(spyPartitionedRegionClear, times(1)).releaseLockForClear(regionEvent);
+  }
+
+  @Test
+  public void doClearObtainsAndReleasesLockForClearWhenRegionHasClientInterest() {
+    boolean cacheWrite = false;
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    when(partitionedRegion.hasListener()).thenReturn(false);
+    when(partitionedRegion.hasAnyClientsInterested()).thenReturn(true);
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doNothing().when(spyPartitionedRegionClear).acquireDistributedClearLock(any());
+    doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
+    doNothing().when(spyPartitionedRegionClear).obtainLockForClear(regionEvent);
+    doNothing().when(spyPartitionedRegionClear).releaseLockForClear(regionEvent);
+    doReturn(Collections.EMPTY_LIST).when(spyPartitionedRegionClear).clearRegion(regionEvent,
+        cacheWrite, null);
+
+    spyPartitionedRegionClear.doClear(regionEvent, cacheWrite);
+
+    verify(spyPartitionedRegionClear, times(1)).obtainLockForClear(regionEvent);
+    verify(spyPartitionedRegionClear, times(1)).releaseLockForClear(regionEvent);
+  }
+
+  @Test
+  public void doClearDoesNotObtainLockForClearWhenRegionHasNoListenerAndNoClientInterest() {
+    boolean cacheWrite = false;
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    when(partitionedRegion.hasListener()).thenReturn(false);
+    when(partitionedRegion.hasAnyClientsInterested()).thenReturn(false);
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doNothing().when(spyPartitionedRegionClear).acquireDistributedClearLock(any());
+    doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
+    doNothing().when(spyPartitionedRegionClear).obtainLockForClear(regionEvent);
+    doNothing().when(spyPartitionedRegionClear).releaseLockForClear(regionEvent);
+    doReturn(Collections.EMPTY_LIST).when(spyPartitionedRegionClear).clearRegion(regionEvent,
+        cacheWrite, null);
+
+    spyPartitionedRegionClear.doClear(regionEvent, cacheWrite);
+
+    verify(spyPartitionedRegionClear, times(0)).obtainLockForClear(regionEvent);
+    verify(spyPartitionedRegionClear, times(0)).releaseLockForClear(regionEvent);
+  }
+
+  @Test
+  public void doClearThrowsPartitionedRegionPartialClearException() {
+    boolean cacheWrite = false;
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    when(partitionedRegion.hasListener()).thenReturn(false);
+    when(partitionedRegion.hasAnyClientsInterested()).thenReturn(false);
+    when(partitionedRegion.getTotalNumberOfBuckets()).thenReturn(1);
+    when(partitionedRegion.getName()).thenReturn("prRegion");
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doNothing().when(spyPartitionedRegionClear).acquireDistributedClearLock(any());
+    doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
+    doNothing().when(spyPartitionedRegionClear).obtainLockForClear(regionEvent);
+    doNothing().when(spyPartitionedRegionClear).releaseLockForClear(regionEvent);
+    doReturn(Collections.EMPTY_LIST).when(spyPartitionedRegionClear).clearRegion(regionEvent,
+        cacheWrite, null);
+
+    Throwable thrown =
+        catchThrowable(() -> spyPartitionedRegionClear.doClear(regionEvent, cacheWrite));
+
+    assertThat(thrown)
+        .isInstanceOf(PartitionedRegionPartialClearException.class)
+        .hasMessage(
+            "Unable to clear all the buckets from the partitioned region prRegion, either data (buckets) moved or member departed.");
+  }
+
+  @Test
+  public void handleClearFromDepartedMemberReleasesTheLockForRequesterDeparture() {
+    InternalDistributedMember member = mock(InternalDistributedMember.class);
+    partitionedRegionClear.lockForListenerAndClientNotification.setLocked(member);
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+
+    spyPartitionedRegionClear.handleClearFromDepartedMember(member);
+
+    verify(spyPartitionedRegionClear, times(1)).releaseClearLockLocal();
+  }
+
+  @Test
+  public void handleClearFromDepartedMemberDoesNotReleasesTheLockForNonRequesterDeparture() {
+    InternalDistributedMember requesterMember = mock(InternalDistributedMember.class);
+    InternalDistributedMember member = mock(InternalDistributedMember.class);
+    partitionedRegionClear.lockForListenerAndClientNotification.setLocked(requesterMember);
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+
+    spyPartitionedRegionClear.handleClearFromDepartedMember(member);
+
+    verify(spyPartitionedRegionClear, times(0)).releaseClearLockLocal();
+  }
+
+  @Test
+  public void partitionedRegionClearRegistersMembershipListener() {
+    MembershipListener membershipListener =
+        partitionedRegionClear.getPartitionedRegionClearListener();
+
+    verify(distributionManager, times(1)).addMembershipListener(membershipListener);
+  }
+
+  @Test
+  public void lockRequesterDepartureReleasesTheLock() {
+    InternalDistributedMember member = mock(InternalDistributedMember.class);
+    partitionedRegionClear.lockForListenerAndClientNotification.setLocked(member);
+    PartitionedRegionClear.PartitionedRegionClearListener partitionedRegionClearListener =
+        partitionedRegionClear.getPartitionedRegionClearListener();
+
+    partitionedRegionClearListener.memberDeparted(distributionManager, member, true);
+
+    assertThat(partitionedRegionClear.getMembershipChange()).isTrue();
+    assertThat(partitionedRegionClear.lockForListenerAndClientNotification.getLockRequester())
+        .isNull();
+  }
+
+  @Test
+  public void nonLockRequesterDepartureDoesNotReleasesTheLock() {
+    InternalDistributedMember requesterMember = mock(InternalDistributedMember.class);
+    InternalDistributedMember member = mock(InternalDistributedMember.class);
+    partitionedRegionClear.lockForListenerAndClientNotification.setLocked(requesterMember);
+    PartitionedRegionClear.PartitionedRegionClearListener partitionedRegionClearListener =
+        partitionedRegionClear.getPartitionedRegionClearListener();
+
+    partitionedRegionClearListener.memberDeparted(distributionManager, member, true);
+
+    assertThat(partitionedRegionClear.getMembershipChange()).isTrue();
+    assertThat(partitionedRegionClear.lockForListenerAndClientNotification.getLockRequester())
+        .isNotNull();
+  }
+}