You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by nn...@apache.org on 2021/04/30 01:16:38 UTC

[geode] branch feature/GEODE-7665 updated (a8d9e52 -> 477b3fe)

This is an automated email from the ASF dual-hosted git repository.

nnag pushed a change to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git.


    omit a8d9e52  GEODE-9132: Fix locking in PRClearCreateIndexDUnitTest
    omit 145bf22  GEODE-9132: Use factory method to avoid escaped reference
    omit cc292e5  GEODE-9132: Always acquire write lock for PR clear
    omit e39b748  GEODE-9132: Fix assertion in ResourceUtils
    omit 33b3293  GEODE-9132: PartitionedRegionClearWithConcurrentOperationsDUnitTest cleanup 3
    omit d10aad6  GEODE-9132: PartitionedRegionClearWithConcurrentOperationsDUnitTest cleanup 2
    omit 871a591  GEODE-9132: PartitionedRegionClearWithConcurrentOperationsDUnitTest cleanup 1
    omit 0e7bca0  Fixup AnalyzeCoreSerializablesJUnitTest for PartitionedRegionPartialClearException
    omit 549e9ad  GEODE-9132: Remove ClearPRMessage from sanctionedDataSerializables.txt
    omit 5f55886  GEODE-9132: Cleanup PartitionedRegionPartialClearException
    omit ccc5e67  GEODE-9132: Undelete unnecessary uses of final
    omit 62edccb  GEODE-9132: Remove unused DSFID constants
    omit fb90ad4  GEODE-9132: Cleanup PartitionedRegionClearMessage
    omit 274e191  GEODE-9132: Fixup PartitionResponse constructors
    omit 902984f  GEODE-9132: Delete ClearPRMessage
    omit a4852fe  GEODE-7674: Clear on PR with lucene index should throw exception (#6317)
    omit 248a56a  GEODE-7683: introduce BR.cmnClearRegion
     add ac4ebe9  GEODE-9113: Extend wait period for hscanSnaphots_shouldExpireAfterExpiryPeriod (#6303)
     add dd956a9  GEODE-9065 - Introduce ClasspathService abstraction and create LegacyClasspathServiceImpl. (#6223)
     add 8e1c7cd  GEODE-9128: Remove host name look-up from JGAddress (#6298)
     add 3b4d4ac  GEODE-9153: Fix alpine-tools docker image (#6321)
     add 5567fe2  GEODE-9145: update CODEOWNERS (#6316)
     add 3254cc1  Revert "GEODE-9055: drop patch version in docs if 0" (#6327)
     add 4636f29  Revert "GEODE-9064: Configure serial filter during ManagementAgent start (#6196)" (#6324)
     add e65228f  GEODE-8899: Temporarily restore dunitParallelForks (#6335)
     add 5f7abc9  GEODE-9161: Fix simple Gradle 7 warnings (#6315)
     add 15ab919  add CODEWATCHERS for jar-deployment (#6345)
     add 1ceca81  GEODE-9172: Add Junit rule to start docker-based redis cluster (#6346)
     add f309ffe  fix a missing flag that broke the script (#6357)
     add 4c1e7a0  GEODE-9181: update Dockerfile to use closer.lua (#6358)
     add d902cef  add 1.12.2 to old versions on develop (#6356)
     add e933b51  GEODE-8980: bump deps (#6342)
     add 78598cf  fix CODEOWNERS rule that was intended to match dependencies only, not tests (#6340)
     add 8b2ee6d  GEODE-9064: Configure serial filter during ManagementAgent start (#6196) (#6336)
     add f366940  Revert "GEODE-8977: include syncs in thread monitor stack (#6248)" (#6368)
     add 1693168  GEODE-9095: Resolve package splitting http-service (#6220)
     add 8665c1d  GEODE-9041: add mkevo to several sections of CODEWATCHERS (#6371)
     add ddd7bcf  Revert "GEODE-9172: Add Junit rule to start docker-based redis cluster (#6346)" (#6375)
     add 2e2c8f5  GEODE-9197: don't stress deleted tests (#6376)
     add 9d577cf  GEODE-5337: End-port is not exclusive when creating a gw receiver (#6370)
     add 3d12601  GEODE-8990: Add await logic to prevent CargoTestBase tests from running before members (#6355)
     add 3daa950  GEODE-9172: Add Junit rule to start docker-based redis cluster (#6379)
     add 0b3eb04  GEODE-9192: test-container fixes (#6381)
     add 1ce8170  GEODE-9161: Fix some more gradle 7 warnings (#6369)
     add 8518348  GEODE-9155: change frequency of passive expiration (#6325)
     add 55921a4  GEODE-9139 SSLException in starting up a Locator (#6308)
     add 02d3f88  GEODE-4709: Add list gateways output sample in UG (#6372)
     add 465173c  GEODE-9174: fix displaying the result of a gfsh query containing a UUID (#6365)
     add b18836c  GEODE-9163: Mark docs for apis compatible with Redis as experimental (#6374)
     add f8b07a0  GEODE-9180: warn when heartbeat thread oversleeps (#6360)
     add 38a3540  GEODE-9141: (1 of 2) rename ByteBufferSharingImpl to ByteBuferVendor
     add 9d0d4d1  GEODE-9141: (2 of 2) Handle in-buffer concurrency * Connection uses a ByteBufferVendor to mediate access to inputBuffer * Prevent return to pool before socket closer is finished
     add a533575  GEODE-9156: Replace docker-compose-rule with testcontainers in geode-connectors (#6378)
     add d7cf967  GEODE-9158: Add CLUSTER NODES, SLOTS and INFO commands (#6359)
     add 97700d3  GEODE-8862:  Send grant message to remote node using waiting thread pool. (#6361)
     add 3c2035c  GEODE-9079: downsize non-intentensive CI instances (#6206)
     add 8d99d08  GEODE-8977: improve thread monitor to log lock information (#6377)
     add f6fee95  GEODE-9161: Use not deperacted property in gradle build (#6388)
     add 94104d2  GEODE-9093 - Resolve package splitting for geode-connectors (#6217)
     add f8793f8  GEODE-9092: Resolve package splitting geode-common (#6216)
     add f260ddd  GEODE-9208: Allow StressNewTest to pick up tests from all directories (#6391)
     add c1a8e17  GEODE-9164: User Guide - relocate Redis section to "Developing" from "Tools and Modules" (#6387)
     add 92d545a  GEODE-9216 - User Guide: "gemfire.enableTcpKeepAlive" should be "gemfire.setTcpKeepAlive" (#6400)
     add ebdf8ed  GEODE-9079: increase timeouts (#6404)
     add fa88b95  Revert "GEODE-9155: change frequency of passive expiration (#6325)" (#6398)
     new a64d533  GEODE-7683: introduce BR.cmnClearRegion
     new a75e7f1  GEODE-7674: Clear on PR with lucene index should throw exception (#6317)
     new a6b7732  GEODE-9132: Delete ClearPRMessage
     new f0d2f11  GEODE-9132: Fixup PartitionResponse constructors
     new 6f0bbb1  GEODE-9132: Cleanup PartitionedRegionClearMessage
     new 4a423ae  GEODE-9132: Remove unused DSFID constants
     new 6d755163 GEODE-9132: Undelete unnecessary uses of final
     new fca9c02  GEODE-9132: Cleanup PartitionedRegionPartialClearException
     new 1399c29  GEODE-9132: Remove ClearPRMessage from sanctionedDataSerializables.txt
     new 8dacfe6  Fixup AnalyzeCoreSerializablesJUnitTest for PartitionedRegionPartialClearException
     new 41eba46  GEODE-9132: PartitionedRegionClearWithConcurrentOperationsDUnitTest cleanup 1
     new 5e007a6  GEODE-9132: PartitionedRegionClearWithConcurrentOperationsDUnitTest cleanup 2
     new 1466a44  GEODE-9132: PartitionedRegionClearWithConcurrentOperationsDUnitTest cleanup 3
     new 38631fc  GEODE-9132: Fix assertion in ResourceUtils
     new 30f5e90  GEODE-9132: Always acquire write lock for PR clear
     new 76ed734  GEODE-9132: Use factory method to avoid escaped reference
     new 477b3fe  GEODE-9132: Fix locking in PRClearCreateIndexDUnitTest

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (a8d9e52)
            \
             N -- N -- N   refs/heads/feature/GEODE-7665 (477b3fe)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 17 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 CODEOWNERS                                         |  59 ++--
 CODEWATCHERS                                       |  35 +-
 .../src/test/resources/expected-pom.xml            |  86 ++---
 buildSrc/build.gradle                              |   5 +-
 .../gradle/plugins/DependencyConstraints.groovy    |  42 +--
 ci/images/alpine-tools/Dockerfile                  |   3 +-
 ci/images/test-container/Dockerfile                |   9 +-
 ci/pipelines/geode-build/jinja.template.yml        |   2 +-
 ci/pipelines/images/jinja.template.yml             |   8 +-
 ci/pipelines/meta/deploy_meta.sh                   |   2 +-
 ci/pipelines/shared/jinja.variables.yml            |  36 +-
 ci/scripts/execute_build.sh                        |   3 +-
 ci/scripts/execute_tests.sh                        |   2 +-
 ci/scripts/repeat-new-tests.sh                     |  12 +-
 ci/scripts/rsync_code_down.sh                      |   9 +-
 dev-tools/dependencies/bump.sh                     |   4 +-
 dev-tools/release/create_support_branches.sh       |   4 +-
 dev-tools/release/promote_rc.sh                    |   2 +-
 dev-tools/release/set_versions.sh                  |   4 +-
 docker/Dockerfile                                  |   3 +-
 extensions/geode-modules-assembly/build.gradle     |  10 +-
 .../release/session/bin/modify_war                 |   2 +
 extensions/geode-modules-session/build.gradle      |   8 +-
 extensions/session-testing-war/build.gradle        |   4 +-
 geode-apis-compatible-with-redis/README.md         |   3 +-
 geode-apis-compatible-with-redis/build.gradle      |  11 +-
 .../apache/geode/redis/NativeRedisClusterTest.java |  80 +++++
 .../cluster/ClusterNativeRedisAcceptanceTest.java  |  17 +-
 .../java/org/apache/geode/redis/ClusterNode.java   |  67 ++++
 .../java/org/apache/geode/redis/ClusterNodes.java  | 126 +++++++
 .../geode/redis/NativeRedisClusterTestRule.java    | 128 +++++++
 .../proxy/ClusterNodesResponseProcessor.java       |  65 ++++
 .../proxy/ClusterSlotsResponseProcessor.java       | 113 +++++++
 .../geode/redis/internal/proxy/HostPort.java       |  46 ++-
 .../redis/internal/proxy/MovedResponseHandler.java |  58 ++++
 .../internal/proxy/NoopRedisResponseProcessor.java |  15 +-
 .../geode/redis/internal/proxy/RedisProxy.java     |  87 +++++
 .../internal/proxy/RedisProxyInboundHandler.java   | 169 ++++++++++
 .../internal/proxy/RedisProxyOutboundHandler.java  |  79 +++++
 .../internal/proxy/RedisResponseProcessor.java     |  14 +-
 .../commonTest/resources/redis-cluster-compose.yml |  65 ++++
 .../cluster/ClusterSlotsAndNodesDUnitTest.java     | 163 +++++++++
 .../cluster/JedisAndLettuceClusterDUnitTest.java   |  89 +++++
 .../cluster/AbstractClusterIntegrationTest.java    |  56 ++++
 .../executor/cluster/ClusterIntegrationTest.java   |  18 +-
 .../geode/redis/internal/GeodeRedisServer.java     |   8 +-
 .../ClusterParameterRequirements.java              |  36 ++
 .../geode/redis/internal/RedisCommandType.java     |   5 +
 .../geode/redis/internal/RedisConstants.java       |   2 +
 .../cluster/BucketInfoRetrievalFunction.java       | 118 +++++++
 .../internal/executor/cluster/ClusterExecutor.java | 203 +++++++++++
 .../internal/netty/ExecutionHandlerContext.java    |  10 +-
 .../redis/internal/netty/NettyRedisServer.java     |   7 +-
 ...de-apis-compatible-with-redis-serializables.txt |   2 +
 .../redis/internal/SupportedCommandsJUnitTest.java |   1 +
 .../geode/redis/internal/data/RedisHashTest.java   |   2 +-
 .../src/test/resources/expected-pom.xml            |  16 +
 geode-assembly/build.gradle                        |  21 +-
 geode-assembly/geode-assembly-test/build.gradle    |   2 +-
 .../apache/geode/session/tests/TomcatInstall.java  |   4 +-
 .../client/sni/ClientSNICQAcceptanceTest.java      |  18 +-
 .../client/sni/DualServerSNIAcceptanceTest.java    |  28 +-
 .../client/sni/GenerateSNIKeyAndTrustStores.java   |   7 +-
 .../client/sni/dual-server-docker-compose.yml      |  70 ++++
 .../geode/client/sni/dual-server-haproxy.cfg       |  36 +-
 .../sni/geode-config/locator-maeve-keystore.jks    | Bin 3529 -> 3514 bytes
 .../geode-config/server-clementine-keystore.jks    | Bin 3537 -> 3520 bytes
 .../sni/geode-config/server-dolores-keystore.jks   | Bin 3533 -> 3515 bytes
 .../geode/client/sni/geode-config/truststore.jks   | Bin 1129 -> 1129 bytes
 .../geode/client/sni/scripts/create-regions.gfsh   |  14 +-
 .../geode/client/sni/scripts/geode-starter-2.gfsh  |  23 --
 .../geode/client/sni/scripts/geode-starter.gfsh    |   2 +-
 .../geode/client/sni/scripts/locator-maeve.gfsh    |  12 +-
 .../client/sni/scripts/server-clementine.gfsh      |  12 +-
 .../geode/client/sni/scripts/server-dolores.gfsh   |  12 +-
 .../rest/DeploymentManagementRedployDUnitTest.java |   3 +-
 .../DeploymentSemanticVersionJarDUnitTest.java     |   3 +-
 .../apache/geode/session/tests/CargoTestBase.java  |  55 ++-
 .../session/tests/GenericAppServerInstall.java     |   2 +-
 .../tests/Jetty9CachingClientServerTest.java       |  13 +-
 .../StartLocatorCommandIntegrationTest.java        |   8 +-
 .../integrationTest/resources/assembly_content.txt |  48 +--
 .../resources/dependency_classpath.txt             |  42 +--
 .../integrationTest/resources/expected_jars.txt    |   2 +
 geode-assembly/src/main/dist/LICENSE               |   4 +-
 .../source/subnavs/geode-subnav.erb                |  47 +--
 .../internal/lang/ComputeIfAbsentBenchmark.java    |   2 +
 .../apache/geode/internal/inet/LocalHostUtil.java  |   2 +-
 .../internal/lang/{ => utils}/JavaWorkarounds.java |   2 +-
 .../apache/geode/internal/{ => utils}/Retry.java   |   2 +-
 .../geode/internal/{ => utils}/RetryTest.java      |   3 +-
 geode-concurrency-test/build.gradle                |   2 +-
 geode-connectors/build.gradle                      |  12 +-
 .../jdbc/JdbcAsyncWriterIntegrationTest.java       |   6 +-
 .../geode/connectors/jdbc/JdbcDistributedTest.java |  91 +++--
 .../connectors/jdbc/JdbcLoaderIntegrationTest.java |   6 +-
 .../connectors/jdbc/JdbcWriterIntegrationTest.java |   6 +-
 .../jdbc/MySqlJdbcAsyncWriterIntegrationTest.java  |  11 +-
 .../connectors/jdbc/MySqlJdbcDistributedTest.java  |  40 ++-
 .../jdbc/MySqlJdbcLoaderIntegrationTest.java       |  11 +-
 .../jdbc/MySqlJdbcWriterIntegrationTest.java       |  11 +-
 .../PostgresJdbcAsyncWriterIntegrationTest.java    |  11 +-
 .../jdbc/PostgresJdbcDistributedTest.java          |  40 ++-
 .../jdbc/PostgresJdbcLoaderIntegrationTest.java    |  22 +-
 .../jdbc/PostgresJdbcWriterIntegrationTest.java    |  29 +-
 .../connectors/jdbc/TestDataSourceFactory.java     |   8 +-
 .../MySqlTableMetaDataManagerIntegrationTest.java  |   6 +-
 ...ostgresTableMetaDataManagerIntegrationTest.java |   6 +-
 .../jdbc/test/junit/rules/MySqlConnectionRule.java |  23 +-
 .../test/junit/rules/PostgresConnectionRule.java   |  18 +-
 .../junit/rules/SqlDatabaseConnectionRule.java     |  61 ++--
 .../apache/geode/connectors/jdbc => }/mysql.yml    |   6 +-
 .../apache/geode/connectors/jdbc => }/postgres.yml |   4 +-
 .../AnalyzeConnectorsSerializablesJUnitTest.java   |   6 +
 .../ConnectorsDistributedSystemService.java        |   3 +-
 .../jdbc/internal/TableMetaDataManager.java        |   2 +-
 .../cli/converters/PoolPropertyConverter.java      |   2 +-
 ...e.distributed.internal.DistributedSystemService |   2 +-
 .../sanctioned-geode-connectors-serializables.txt  |   0
 geode-core/build.gradle                            |  20 +-
 ...ReconnectWithClusterConfigurationDUnitTest.java |   3 +-
 .../FunctionToFileTrackerIntegrationTest.java      |  12 +-
 .../internal/InternalLocatorIntegrationTest.java   |   8 +-
 .../internal/membership/MembershipJUnitTest.java   |   9 +-
 .../backup/IncrementalBackupIntegrationTest.java   |   8 +-
 .../classloader/ClassPathLoaderDeployTest.java     |   8 +-
 .../ClassPathLoaderIntegrationTest.java            |  29 +-
 .../classloader/ClassPathLoaderJUnitTest.java      |  92 +++--
 .../ThreadsMonitoringIntegrationTest.java          |  24 +-
 ...LSocketHostNameVerificationIntegrationTest.java |   6 +-
 .../internal/net/SSLSocketIntegrationTest.java     |   3 +-
 .../apache/geode/codeAnalysis/excludedClasses.txt  |   2 +-
 .../internal/execute/FunctionToFileTracker.java    |  60 +---
 .../cache/query/internal/AttributeDescriptor.java  |   2 +-
 .../java/org/apache/geode/distributed/Locator.java |  39 ++-
 .../apache/geode/distributed/LocatorLauncher.java  |  78 +++--
 .../internal/InternalDistributedSystem.java        |   6 +-
 .../distributed/internal/InternalLocator.java      |  66 ++--
 .../geode/distributed/internal/ServerLocator.java  |   7 +-
 .../internal/locks/DLockRequestProcessor.java      |  32 +-
 .../internal/membership/adapter/ServiceConfig.java |   5 +-
 .../apache/geode/internal/DistributionLocator.java |  30 +-
 .../admin/remote/DistributionLocatorId.java        |   7 +-
 .../internal/cache/ClusterConfigurationLoader.java |   6 +-
 .../geode/internal/cache/GemFireCacheImpl.java     |   5 +-
 .../internal/cache/backup/BackupDefinition.java    |   2 +-
 .../internal/cache/backup/BackupFileCopier.java    |   2 +-
 .../geode/internal/cache/backup/BackupTask.java    |   4 +-
 .../execute/metrics/FunctionStatsManager.java      |   2 +-
 .../cache/tier/sockets/ClientHealthMonitor.java    |   2 +-
 .../internal/classloader/ClassPathLoader.java      | 333 ++++--------------
 .../internal/classloader/ClasspathService.java     |  58 ++++
 .../DeployJarChildFirstClassLoader.java            |   2 +-
 .../deployment/DeploymentServiceFactory.java}      |  53 ++-
 .../deployment}/JarDeploymentService.java          |   2 +-
 .../monitoring/ThreadsMonitoringProcess.java       | 122 ++++++-
 .../monitoring/executor/AbstractExecutor.java      |  36 +-
 .../monitoring/executor/SuspendableExecutor.java   |   4 +
 .../org/apache/geode/internal/net/BufferPool.java  |   4 +-
 .../geode/internal/net/ByteBufferSharing.java      |  15 +
 .../geode/internal/net/ByteBufferSharingNoOp.java  |   7 +-
 ...ufferSharingImpl.java => ByteBufferVendor.java} | 146 +++++---
 .../apache/geode/internal/net/NioSslEngine.java    |  56 ++--
 .../apache/geode/internal/net/SocketCreator.java   |  17 +-
 .../org/apache/geode/internal/tcp/Connection.java  | 258 +++++++-------
 .../apache/geode/internal/tcp/ConnectionTable.java |   2 +-
 .../realizers/DeploymentRealizer.java              |   6 +-
 .../distributed/internal/InternalLocatorTest.java  |   3 +-
 .../internal/locks/DLockRequestProcessorTest.java  | 101 ++++++
 .../internal/classloader/ClassPathLoaderTest.java  |   5 +-
 .../executor/AbstractExecutorGroupJUnitTest.java   |  17 +-
 .../internal/net/ByteBufferConcurrencyTest.java    |  16 +-
 ...ringImplTest.java => ByteBufferVendorTest.java} |  40 ++-
 .../geode/internal/net/NioSslEngineTest.java       |  41 +--
 .../apache/geode/internal/tcp/ConnectionTest.java  |   1 +
 geode-core/src/test/resources/expected-pom.xml     |   0
 .../cache/query/cq/internal/CqServiceImpl.java     |   2 +-
 geode-deployment/build.gradle                      |   2 +-
 .../backup/BackupFileCopierIntegrationTest.java    |   8 +-
 .../internal/LegacyClasspathServiceImpl.java       | 372 +++++++--------------
 .../geode/deployment/internal/JarDeployer.java     |   6 +-
 .../{ => legacy}/LegacyJarDeploymentService.java   |  29 +-
 ...che.geode.internal.classloader.ClasspathService |   1 +
 ....geode.internal.deployment.JarDeploymentService |   1 +
 .../internal/JarDeployerDeadlockTest.java          |  12 +-
 geode-docs/developing/book_intro.html.md.erb       |   8 +
 .../geode_apis_compatible_with_redis.html.md.erb   |   6 +-
 .../logging/setting_up_logging.html.md.erb         |   2 +-
 .../monitor_tune/socket_tcp_keepalive.html.md.erb  |   2 +-
 geode-docs/tools_modules/book_intro.html.md.erb    |   4 -
 .../gfsh/command-pages/create.html.md.erb          |   4 +-
 .../gfsh/command-pages/list.html.md.erb            |  89 +++--
 .../setting_up_a_multisite_system.html.md.erb      | 105 +++---
 geode-dunit/build.gradle                           |   4 +-
 .../cli/commands/QueryCommandDUnitTestBase.java    |  23 ++
 .../internal/configuration/ClusterConfig.java      |   6 +-
 .../commands/DeployCommandRedeployDUnitTest.java   |   3 +-
 .../DeploySemanticVersionJarDUnitTest.java         |   3 +-
 .../cli/commands/DeployWithGroupsDUnitTest.java    |   5 +-
 .../management/internal/cli/CommandManager.java    |   4 +-
 .../internal/cli/commands/StartLocatorCommand.java |   9 +-
 .../internal/cli/domain/DataCommandResult.java     |   3 +
 .../cli/functions/ListDeployedFunction.java        |   6 +-
 .../internal/cli/functions/UndeployFunction.java   |  10 +-
 .../internal/cli/shell/JmxOperationInvoker.java    |   3 +-
 geode-http-service/build.gradle                    |   2 +-
 .../{ => http/service}/InternalHttpService.java    |   4 +-
 .../org.apache.geode.internal.cache.CacheService   |   2 +-
 .../internal/InternalHttpServiceJunitTest.java     |   2 +-
 geode-junit/build.gradle                           |   4 +-
 .../geode/test/util/StressNewTestHelper.java       |   6 +-
 geode-logging/build.gradle                         |   4 +-
 geode-lucene/geode-lucene-test/build.gradle        |   4 +-
 .../geode/cache/lucene/FlatFormatSerializer.java   |   2 +-
 geode-management/build.gradle                      |   2 +-
 geode-membership/build.gradle                      |   4 +-
 .../gms/fd/GMSHealthMonitorJUnitTest.java          |  47 +++
 .../locator/GMSLocatorRecoveryIntegrationTest.java |   5 +-
 .../gms/membership/GMSJoinLeaveJUnitTest.java      |   2 +-
 .../membership/api/MembershipLocatorBuilder.java   |   4 +-
 .../internal/membership/gms/GMSMemberData.java     |   2 -
 .../gms/MembershipLocatorBuilderImpl.java          |   6 +-
 .../membership/gms/fd/GMSHealthMonitor.java        | 192 ++++++-----
 .../membership/gms/locator/GMSLocator.java         |   7 +-
 .../gms/locator/MembershipLocatorImpl.java         |   8 +-
 .../membership/gms/messenger/GMSEncrypt.java       |   2 +-
 .../membership/gms/messenger/JGAddress.java        |   4 +-
 .../membership/gms/messenger/JGroupsMessenger.java |  60 +++-
 geode-pulse/build.gradle                           |   2 +-
 geode-pulse/geode-pulse-test/build.gradle          |  10 +-
 .../geode/tools/pulse/tests/rules/ServerRule.java  |   2 +-
 geode-rebalancer/build.gradle                      |   4 +-
 geode-serialization/build.gradle                   |   2 +-
 .../internal/tcpserver/HostAddress.java            |  56 ++++
 .../internal/tcpserver/HostAndPort.java            |  81 +----
 .../internal/tcpserver/InetSocketWrapper.java      | 100 ++++++
 geode-wan/build.gradle                             |   2 +-
 .../geode/internal/cache/wan/WANTestBase.java      |   3 +-
 geode-web-api/build.gradle                         |   2 +-
 geode-web-management/build.gradle                  |   2 +-
 geode-web/build.gradle                             |   4 +-
 gradle/java.gradle                                 |   4 +-
 gradle/test.gradle                                 |   2 +-
 settings.gradle                                    |   1 +
 244 files changed, 4385 insertions(+), 2100 deletions(-)
 create mode 100644 geode-apis-compatible-with-redis/src/acceptanceTest/java/org/apache/geode/redis/NativeRedisClusterTest.java
 copy geode-connectors/src/integrationTest/java/org/apache/geode/codeAnalysis/AnalyzeConnectorsSerializablesJUnitTest.java => geode-apis-compatible-with-redis/src/acceptanceTest/java/org/apache/geode/redis/internal/executor/cluster/ClusterNativeRedisAcceptanceTest.java (66%)
 create mode 100644 geode-apis-compatible-with-redis/src/commonTest/java/org/apache/geode/redis/ClusterNode.java
 create mode 100644 geode-apis-compatible-with-redis/src/commonTest/java/org/apache/geode/redis/ClusterNodes.java
 create mode 100644 geode-apis-compatible-with-redis/src/commonTest/java/org/apache/geode/redis/NativeRedisClusterTestRule.java
 create mode 100644 geode-apis-compatible-with-redis/src/commonTest/java/org/apache/geode/redis/internal/proxy/ClusterNodesResponseProcessor.java
 create mode 100644 geode-apis-compatible-with-redis/src/commonTest/java/org/apache/geode/redis/internal/proxy/ClusterSlotsResponseProcessor.java
 copy geode-core/src/main/java/org/apache/geode/internal/monitoring/executor/SuspendableExecutor.java => geode-apis-compatible-with-redis/src/commonTest/java/org/apache/geode/redis/internal/proxy/HostPort.java (53%)
 create mode 100644 geode-apis-compatible-with-redis/src/commonTest/java/org/apache/geode/redis/internal/proxy/MovedResponseHandler.java
 copy geode-connectors/src/integrationTest/java/org/apache/geode/codeAnalysis/AnalyzeConnectorsSerializablesJUnitTest.java => geode-apis-compatible-with-redis/src/commonTest/java/org/apache/geode/redis/internal/proxy/NoopRedisResponseProcessor.java (68%)
 create mode 100644 geode-apis-compatible-with-redis/src/commonTest/java/org/apache/geode/redis/internal/proxy/RedisProxy.java
 create mode 100644 geode-apis-compatible-with-redis/src/commonTest/java/org/apache/geode/redis/internal/proxy/RedisProxyInboundHandler.java
 create mode 100644 geode-apis-compatible-with-redis/src/commonTest/java/org/apache/geode/redis/internal/proxy/RedisProxyOutboundHandler.java
 copy geode-connectors/src/integrationTest/java/org/apache/geode/codeAnalysis/AnalyzeConnectorsSerializablesJUnitTest.java => geode-apis-compatible-with-redis/src/commonTest/java/org/apache/geode/redis/internal/proxy/RedisResponseProcessor.java (68%)
 create mode 100644 geode-apis-compatible-with-redis/src/commonTest/resources/redis-cluster-compose.yml
 create mode 100644 geode-apis-compatible-with-redis/src/distributedTest/java/org/apache/geode/redis/internal/executor/cluster/ClusterSlotsAndNodesDUnitTest.java
 create mode 100644 geode-apis-compatible-with-redis/src/distributedTest/java/org/apache/geode/redis/internal/executor/cluster/JedisAndLettuceClusterDUnitTest.java
 create mode 100644 geode-apis-compatible-with-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/cluster/AbstractClusterIntegrationTest.java
 copy geode-connectors/src/integrationTest/java/org/apache/geode/codeAnalysis/AnalyzeConnectorsSerializablesJUnitTest.java => geode-apis-compatible-with-redis/src/integrationTest/java/org/apache/geode/redis/internal/executor/cluster/ClusterIntegrationTest.java (69%)
 create mode 100644 geode-apis-compatible-with-redis/src/main/java/org/apache/geode/redis/internal/ParameterRequirements/ClusterParameterRequirements.java
 create mode 100644 geode-apis-compatible-with-redis/src/main/java/org/apache/geode/redis/internal/cluster/BucketInfoRetrievalFunction.java
 create mode 100644 geode-apis-compatible-with-redis/src/main/java/org/apache/geode/redis/internal/executor/cluster/ClusterExecutor.java
 create mode 100644 geode-assembly/src/acceptanceTest/resources/org/apache/geode/client/sni/dual-server-docker-compose.yml
 copy geode-connectors/src/acceptanceTest/resources/org/apache/geode/connectors/jdbc/internal/mysql.yml => geode-assembly/src/acceptanceTest/resources/org/apache/geode/client/sni/dual-server-haproxy.cfg (52%)
 rename geode-connectors/src/acceptanceTest/resources/org/apache/geode/connectors/jdbc/internal/postgres.yml => geode-assembly/src/acceptanceTest/resources/org/apache/geode/client/sni/scripts/create-regions.gfsh (74%)
 delete mode 100644 geode-assembly/src/acceptanceTest/resources/org/apache/geode/client/sni/scripts/geode-starter-2.gfsh
 copy geode-connectors/src/acceptanceTest/resources/org/apache/geode/connectors/jdbc/internal/mysql.yml => geode-assembly/src/acceptanceTest/resources/org/apache/geode/client/sni/scripts/locator-maeve.gfsh (62%)
 copy geode-connectors/src/acceptanceTest/resources/org/apache/geode/connectors/jdbc/internal/mysql.yml => geode-assembly/src/acceptanceTest/resources/org/apache/geode/client/sni/scripts/server-clementine.gfsh (61%)
 rename geode-connectors/src/acceptanceTest/resources/org/apache/geode/connectors/jdbc/internal/mysql.yml => geode-assembly/src/acceptanceTest/resources/org/apache/geode/client/sni/scripts/server-dolores.gfsh (62%)
 rename geode-common/src/main/java/org/apache/geode/internal/lang/{ => utils}/JavaWorkarounds.java (96%)
 rename geode-common/src/main/java/org/apache/geode/internal/{ => utils}/Retry.java (98%)
 rename geode-common/src/test/java/org/apache/geode/internal/{ => utils}/RetryTest.java (98%)
 rename geode-connectors/src/acceptanceTest/resources/{org/apache/geode/connectors/jdbc => }/mysql.yml (92%)
 rename geode-connectors/src/acceptanceTest/resources/{org/apache/geode/connectors/jdbc => }/postgres.yml (95%)
 rename geode-connectors/src/main/java/org/apache/geode/{ => connectors/jdbc}/internal/ConnectorsDistributedSystemService.java (93%)
 rename geode-connectors/src/main/java/org/apache/geode/{ => connectors/jdbc}/management/internal/cli/converters/PoolPropertyConverter.java (96%)
 rename geode-connectors/src/main/resources/org/apache/geode/{ => connectors/jdbc}/internal/sanctioned-geode-connectors-serializables.txt (100%)
 create mode 100644 geode-core/src/main/java/org/apache/geode/internal/classloader/ClasspathService.java
 rename geode-core/src/main/java/org/apache/geode/{deployment/internal/JarDeploymentServiceFactory.java => internal/deployment/DeploymentServiceFactory.java} (60%)
 rename geode-core/src/main/java/org/apache/geode/{deployment/internal => internal/deployment}/JarDeploymentService.java (98%)
 rename geode-core/src/main/java/org/apache/geode/internal/net/{ByteBufferSharingImpl.java => ByteBufferVendor.java} (53%)
 create mode 100644 geode-core/src/test/java/org/apache/geode/distributed/internal/locks/DLockRequestProcessorTest.java
 rename geode-core/src/test/java/org/apache/geode/internal/net/{ByteBufferSharingImplTest.java => ByteBufferVendorTest.java} (84%)
 mode change 100644 => 100755 geode-core/src/test/resources/expected-pom.xml
 copy geode-core/src/main/java/org/apache/geode/internal/classloader/ClassPathLoader.java => geode-deployment/geode-deployment-legacy/src/main/java/org/apache/geode/classloader/internal/LegacyClasspathServiceImpl.java (50%)
 mode change 100755 => 100644
 rename geode-deployment/geode-deployment-legacy/src/main/java/org/apache/geode/deployment/internal/{ => legacy}/LegacyJarDeploymentService.java (90%)
 create mode 100644 geode-deployment/geode-deployment-legacy/src/main/resources/META-INF/services/org.apache.geode.internal.classloader.ClasspathService
 create mode 100644 geode-deployment/geode-deployment-legacy/src/main/resources/META-INF/services/org.apache.geode.internal.deployment.JarDeploymentService
 rename geode-docs/{tools_modules => developing}/geode_apis_compatible_with_redis.html.md.erb (98%)
 rename geode-http-service/src/main/java/org/apache/geode/internal/cache/{ => http/service}/InternalHttpService.java (98%)
 create mode 100644 geode-tcp-server/src/main/java/org/apache/geode/distributed/internal/tcpserver/HostAddress.java
 create mode 100644 geode-tcp-server/src/main/java/org/apache/geode/distributed/internal/tcpserver/InetSocketWrapper.java

[geode] 15/17: GEODE-9132: Always acquire write lock for PR clear

Posted by nn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

nnag pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 30f5e90c8e116e90adeec6e2ad847c5427b50942
Author: Kirk Lund <kl...@apache.org>
AuthorDate: Wed Apr 21 15:24:06 2021 -0700

    GEODE-9132: Always acquire write lock for PR clear
---
 ...gionClearWithConcurrentOperationsDUnitTest.java | 110 ++++++++---------
 .../internal/cache/PartitionedRegionClear.java     | 115 ++++++++++++------
 .../internal/cache/PartitionedRegionClearTest.java | 131 +++++++++++++++++----
 3 files changed, 246 insertions(+), 110 deletions(-)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java
index 7ef187f..710ae6f 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java
@@ -14,6 +14,9 @@
  */
 package org.apache.geode.internal.cache;
 
+import static java.time.Duration.ofMillis;
+import static org.apache.geode.distributed.ConfigurationProperties.MAX_WAIT_TIME_RECONNECT;
+import static org.apache.geode.distributed.ConfigurationProperties.MEMBER_TIMEOUT;
 import static org.apache.geode.internal.util.ArrayUtils.asList;
 import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
 import static org.apache.geode.test.dunit.VM.getVM;
@@ -21,6 +24,7 @@ import static org.assertj.core.api.Assertions.assertThat;
 import static org.assertj.core.api.Assertions.assertThatThrownBy;
 
 import java.io.Serializable;
+import java.time.Duration;
 import java.time.Instant;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -73,11 +77,16 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
 
   private static final int BUCKETS = 13;
   private static final String REGION_NAME = "PartitionedRegion";
+  private static final Duration WORK_DURATION = Duration.ofSeconds(15);
 
   @Rule
   public DistributedRule distributedRule = new DistributedRule(3);
   @Rule
-  public CacheRule cacheRule = CacheRule.builder().createCacheInAll().build();
+  public CacheRule cacheRule = CacheRule.builder()
+      .addSystemProperty(MAX_WAIT_TIME_RECONNECT, "1000")
+      .addSystemProperty(MEMBER_TIMEOUT, "2000")
+      .createCacheInAll()
+      .build();
 
   private VM server1;
   private VM server2;
@@ -106,15 +115,15 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
     createRegions(regionShortcut);
 
     // Let all VMs continuously execute puts and gets for 60 seconds.
-    final int workMillis = 60000;
+    // final int workMillis = 60000;
     final int entries = 15000;
     List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
-        server1.invokeAsync(() -> executePuts(entries, workMillis)),
-        server2.invokeAsync(() -> executeGets(entries, workMillis)),
-        accessor.invokeAsync(() -> executeRemoves(entries, workMillis)));
+        server1.invokeAsync(() -> executePuts(entries, WORK_DURATION)),
+        server2.invokeAsync(() -> executeGets(entries, WORK_DURATION)),
+        accessor.invokeAsync(() -> executeRemoves(entries, WORK_DURATION)));
 
     // Clear the region every second for 60 seconds.
-    getVM(coordinatorVM.getVmId()).invoke(() -> executeClears(workMillis, 1000));
+    getVM(coordinatorVM.getVmId()).invoke(() -> executeClears(WORK_DURATION, ofMillis(1000)));
 
     // Let asyncInvocations finish.
     for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
@@ -142,17 +151,17 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
     createRegions(regionShortcut);
 
     // Let all VMs continuously execute putAll and removeAll for 15 seconds.
-    final int workMillis = 15000;
+    // final int workMillis = 15000;
     List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
-        server1.invokeAsync(() -> executePutAlls(0, 2000, workMillis)),
-        server1.invokeAsync(() -> executeRemoveAlls(0, 2000, workMillis)),
-        server2.invokeAsync(() -> executePutAlls(2000, 4000, workMillis)),
-        server2.invokeAsync(() -> executeRemoveAlls(2000, 4000, workMillis)),
-        accessor.invokeAsync(() -> executePutAlls(4000, 6000, workMillis)),
-        accessor.invokeAsync(() -> executeRemoveAlls(4000, 6000, workMillis)));
+        server1.invokeAsync(() -> executePutAlls(0, 2000, WORK_DURATION)),
+        server1.invokeAsync(() -> executeRemoveAlls(0, 2000, WORK_DURATION)),
+        server2.invokeAsync(() -> executePutAlls(2000, 4000, WORK_DURATION)),
+        server2.invokeAsync(() -> executeRemoveAlls(2000, 4000, WORK_DURATION)),
+        accessor.invokeAsync(() -> executePutAlls(4000, 6000, WORK_DURATION)),
+        accessor.invokeAsync(() -> executeRemoveAlls(4000, 6000, WORK_DURATION)));
 
     // Clear the region every half second for 15 seconds.
-    getVM(coordinatorVM.getVmId()).invoke(() -> executeClears(workMillis, 500));
+    getVM(coordinatorVM.getVmId()).invoke(() -> executeClears(WORK_DURATION, ofMillis(500)));
 
     // Let asyncInvocations finish.
     for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
@@ -226,12 +235,12 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
     server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
 
     // Let all VMs (except the one to kill) continuously execute gets, put and removes for 30"
-    final int workMillis = 30000;
+    // final int workMillis = 30000;
     List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
-        server1.invokeAsync(() -> executeGets(entries, workMillis)),
-        server1.invokeAsync(() -> executePuts(entries, workMillis)),
-        accessor.invokeAsync(() -> executeGets(entries, workMillis)),
-        accessor.invokeAsync(() -> executeRemoves(entries, workMillis)));
+        server1.invokeAsync(() -> executeGets(entries, WORK_DURATION)),
+        server1.invokeAsync(() -> executePuts(entries, WORK_DURATION)),
+        accessor.invokeAsync(() -> executeGets(entries, WORK_DURATION)),
+        accessor.invokeAsync(() -> executeRemoves(entries, WORK_DURATION)));
 
     // Retry the clear operation on the region until success (server2 will go down, but other
     // members will eventually become primary for those buckets previously hosted by server2).
@@ -278,12 +287,12 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
     server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
 
     // Let all VMs (except the one to kill) continuously execute gets, put and removes for 30"
-    final int workMillis = 30000;
+    // final int workMillis = 30000;
     List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
-        server1.invokeAsync(() -> executeGets(entries, workMillis)),
-        server1.invokeAsync(() -> executePuts(entries, workMillis)),
-        accessor.invokeAsync(() -> executeGets(entries, workMillis)),
-        accessor.invokeAsync(() -> executeRemoves(entries, workMillis)));
+        server1.invokeAsync(() -> executeGets(entries, WORK_DURATION)),
+        server1.invokeAsync(() -> executePuts(entries, WORK_DURATION)),
+        accessor.invokeAsync(() -> executeGets(entries, WORK_DURATION)),
+        accessor.invokeAsync(() -> executeRemoves(entries, WORK_DURATION)));
 
     // Clear the region.
     getVM(coordinatorVM.getVmId()).invoke(() -> {
@@ -315,10 +324,10 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
     server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
 
     // Let all VMs continuously execute putAll/removeAll for 30 seconds.
-    final int workMillis = 30000;
+    // final int workMillis = 30000;
     List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
-        server1.invokeAsync(() -> executePutAlls(0, 6000, workMillis)),
-        accessor.invokeAsync(() -> executeRemoveAlls(2000, 4000, workMillis)));
+        server1.invokeAsync(() -> executePutAlls(0, 6000, WORK_DURATION)),
+        accessor.invokeAsync(() -> executeRemoveAlls(2000, 4000, WORK_DURATION)));
 
     // Retry the clear operation on the region until success (server2 will go down, but other
     // members will eventually become primary for those buckets previously hosted by server2).
@@ -360,10 +369,9 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
     createRegions(RegionShortcut.PARTITION);
     server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
 
-    final int workMillis = 30000;
     List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
-        server1.invokeAsync(() -> executePutAlls(0, 6000, workMillis)),
-        accessor.invokeAsync(() -> executeRemoveAlls(2000, 4000, workMillis)));
+        server1.invokeAsync(() -> executePutAlls(0, 6000, WORK_DURATION)),
+        accessor.invokeAsync(() -> executeRemoveAlls(2000, 4000, WORK_DURATION)));
 
     // Clear the region.
     getVM(coordinatorVM.getVmId()).invoke(() -> {
@@ -520,12 +528,12 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   }
 
   /**
-   * Continuously execute get operations on the PartitionedRegion for the given durationInMillis.
+   * Continuously execute get operations on the PartitionedRegion for the given duration.
    */
-  private void executeGets(final int numEntries, final long durationInMillis) {
+  private void executeGets(final int numEntries, final Duration duration) {
     Cache cache = cacheRule.getCache();
     Region<String, String> region = cache.getRegion(REGION_NAME);
-    Instant finishTime = Instant.now().plusMillis(durationInMillis);
+    Instant finishTime = Instant.now().plusMillis(duration.toMillis());
 
     while (Instant.now().isBefore(finishTime)) {
       // Region might have been cleared in between, that's why we check for null.
@@ -537,12 +545,12 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   }
 
   /**
-   * Continuously execute put operations on the PartitionedRegion for the given durationInMillis.
+   * Continuously execute put operations on the PartitionedRegion for the given duration.
    */
-  private void executePuts(final int numEntries, final long durationInMillis) {
+  private void executePuts(final int numEntries, final Duration duration) {
     Cache cache = cacheRule.getCache();
     Region<String, String> region = cache.getRegion(REGION_NAME);
-    Instant finishTime = Instant.now().plusMillis(durationInMillis);
+    Instant finishTime = Instant.now().plusMillis(duration.toMillis());
 
     while (Instant.now().isBefore(finishTime)) {
       IntStream.range(0, numEntries).forEach(i -> region.put(String.valueOf(i), "Value_" + i));
@@ -550,16 +558,15 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   }
 
   /**
-   * Continuously execute putAll operations on the PartitionedRegion for the given
-   * durationInMillis.
+   * Continuously execute putAll operations on the PartitionedRegion for the given duration.
    */
-  private void executePutAlls(final int startKey, final int finalKey, final long durationInMillis) {
+  private void executePutAlls(final int startKey, final int finalKey, final Duration duration) {
     Cache cache = cacheRule.getCache();
     Map<String, String> valuesToInsert = new HashMap<>();
     Region<String, String> region = cache.getRegion(REGION_NAME);
     IntStream.range(startKey, finalKey)
         .forEach(i -> valuesToInsert.put(String.valueOf(i), "Value_" + i));
-    Instant finishTime = Instant.now().plusMillis(durationInMillis);
+    Instant finishTime = Instant.now().plusMillis(duration.toMillis());
 
     while (Instant.now().isBefore(finishTime)) {
       region.putAll(valuesToInsert);
@@ -567,13 +574,12 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   }
 
   /**
-   * Continuously execute remove operations on the PartitionedRegion for the given
-   * durationInMillis.
+   * Continuously execute remove operations on the PartitionedRegion for the given duration.
    */
-  private void executeRemoves(final int numEntries, final long durationInMillis) {
+  private void executeRemoves(final int numEntries, final Duration duration) {
     Cache cache = cacheRule.getCache();
     Region<String, String> region = cache.getRegion(REGION_NAME);
-    Instant finishTime = Instant.now().plusMillis(durationInMillis);
+    Instant finishTime = Instant.now().plusMillis(duration.toMillis());
 
     while (Instant.now().isBefore(finishTime)) {
       // Region might have been cleared in between, that's why we check for null.
@@ -585,16 +591,14 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   }
 
   /**
-   * Continuously execute removeAll operations on the PartitionedRegion for the given
-   * durationInMillis.
+   * Continuously execute removeAll operations on the PartitionedRegion for the given duration.
    */
-  private void executeRemoveAlls(final int startKey, final int finalKey,
-      final long durationInMillis) {
+  private void executeRemoveAlls(final int startKey, final int finalKey, final Duration duration) {
     Cache cache = cacheRule.getCache();
     List<String> keysToRemove = new ArrayList<>();
     Region<String, String> region = cache.getRegion(REGION_NAME);
     IntStream.range(startKey, finalKey).forEach(i -> keysToRemove.add(String.valueOf(i)));
-    Instant finishTime = Instant.now().plusMillis(durationInMillis);
+    Instant finishTime = Instant.now().plusMillis(duration.toMillis());
 
     while (Instant.now().isBefore(finishTime)) {
       region.removeAll(keysToRemove);
@@ -622,13 +626,13 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   }
 
   /**
-   * Continuously execute clear operations on the PartitionedRegion every periodInMillis for the
-   * given durationInMillis.
+   * Continuously execute clear operations on the PartitionedRegion every period for the
+   * given duration.
    */
-  private void executeClears(final long durationInMillis, final long periodInMillis) {
+  private void executeClears(final Duration duration, final Duration period) {
     Cache cache = cacheRule.getCache();
     Region<String, String> region = cache.getRegion(REGION_NAME);
-    long minimumInvocationCount = durationInMillis / periodInMillis;
+    long minimumInvocationCount = duration.toMillis() / period.toMillis();
 
     for (int invocationCount = 0; invocationCount < minimumInvocationCount; invocationCount++) {
       region.clear();
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
index 569f78c..b8597c1 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
@@ -23,6 +23,7 @@ import java.util.Set;
 import org.apache.logging.log4j.Logger;
 
 import org.apache.geode.CancelException;
+import org.apache.geode.annotations.VisibleForTesting;
 import org.apache.geode.cache.CacheWriterException;
 import org.apache.geode.cache.Operation;
 import org.apache.geode.cache.OperationAbortedException;
@@ -30,10 +31,12 @@ import org.apache.geode.cache.PartitionedRegionPartialClearException;
 import org.apache.geode.cache.asyncqueue.AsyncEventQueue;
 import org.apache.geode.cache.asyncqueue.internal.AsyncEventQueueImpl;
 import org.apache.geode.cache.partition.PartitionRegionHelper;
+import org.apache.geode.distributed.DistributedLockService;
 import org.apache.geode.distributed.internal.DistributionManager;
 import org.apache.geode.distributed.internal.MembershipListener;
 import org.apache.geode.distributed.internal.ReplyException;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.cache.PartitionedRegionClearMessage.PartitionedRegionClearResponse;
 import org.apache.geode.internal.serialization.KnownVersion;
 import org.apache.geode.logging.internal.log4j.api.LogService;
 
@@ -47,6 +50,8 @@ public class PartitionedRegionClear {
 
   private final PartitionedRegion partitionedRegion;
 
+  private final DistributedLockService distributedLockService;
+
   protected final LockForListenerAndClientNotification lockForListenerAndClientNotification =
       new LockForListenerAndClientNotification();
 
@@ -55,8 +60,35 @@ public class PartitionedRegionClear {
   protected final PartitionedRegionClearListener partitionedRegionClearListener =
       new PartitionedRegionClearListener();
 
+  private final ColocationLeaderRegionProvider colocationLeaderRegionProvider;
+  private final AssignBucketsToPartitions assignBucketsToPartitions;
+  private final UpdateAttributesProcessorFactory updateAttributesProcessorFactory;
+
   public PartitionedRegionClear(PartitionedRegion partitionedRegion) {
+    this(partitionedRegion,
+        partitionedRegion.getPartitionedRegionLockService(),
+        ColocationHelper::getLeaderRegion,
+        PartitionRegionHelper::assignBucketsToPartitions,
+        pr -> new UpdateAttributesProcessor(pr, true));
+  }
+
+  @VisibleForTesting
+  PartitionedRegionClear(PartitionedRegion partitionedRegion,
+      DistributedLockService distributedLockService,
+      ColocationLeaderRegionProvider colocationLeaderRegionProvider,
+      AssignBucketsToPartitions assignBucketsToPartitions,
+      UpdateAttributesProcessorFactory updateAttributesProcessorFactory) {
     this.partitionedRegion = partitionedRegion;
+    this.distributedLockService = distributedLockService;
+    this.colocationLeaderRegionProvider = colocationLeaderRegionProvider;
+    this.assignBucketsToPartitions = assignBucketsToPartitions;
+    this.updateAttributesProcessorFactory = updateAttributesProcessorFactory;
+
+    // TODO: initialize needs to move out of constructor to prevent escape of reference to 'this'
+    initialize();
+  }
+
+  private void initialize() {
     partitionedRegion.getDistributionManager()
         .addMembershipListener(partitionedRegionClearListener);
   }
@@ -67,7 +99,7 @@ public class PartitionedRegionClear {
 
   void acquireDistributedClearLock(String clearLock) {
     try {
-      partitionedRegion.getPartitionedRegionLockService().lock(clearLock, -1, -1);
+      distributedLockService.lock(clearLock, -1, -1);
     } catch (IllegalStateException e) {
       partitionedRegion.lockCheckReadiness();
       throw e;
@@ -76,7 +108,7 @@ public class PartitionedRegionClear {
 
   void releaseDistributedClearLock(String clearLock) {
     try {
-      partitionedRegion.getPartitionedRegionLockService().unlock(clearLock);
+      distributedLockService.unlock(clearLock);
     } catch (IllegalStateException e) {
       partitionedRegion.lockCheckReadiness();
     } catch (Exception ex) {
@@ -303,7 +335,7 @@ public class PartitionedRegionClear {
   protected Set<Integer> attemptToSendPartitionedRegionClearMessage(RegionEventImpl event,
       PartitionedRegionClearMessage.OperationType op)
       throws ForceReattemptException {
-    Set<Integer> bucketsOperated = new HashSet<>();
+    Set<Integer> clearedBuckets = new HashSet<>();
 
     if (partitionedRegion.getPRRoot() == null) {
       if (logger.isDebugEnabled()) {
@@ -311,8 +343,10 @@ public class PartitionedRegionClear {
             "Partition region {} failed to initialize. Remove its profile from remote members.",
             this.partitionedRegion);
       }
-      new UpdateAttributesProcessor(partitionedRegion, true).distribute(false);
-      return bucketsOperated;
+      updateAttributesProcessorFactory
+          .create(partitionedRegion)
+          .distribute(false);
+      return clearedBuckets;
     }
 
     final Set<InternalDistributedMember> configRecipients =
@@ -325,9 +359,9 @@ public class PartitionedRegionClear {
 
       if (prConfig != null) {
         for (Node node : prConfig.getNodes()) {
-          InternalDistributedMember idm = node.getMemberId();
-          if (!idm.equals(partitionedRegion.getMyId())) {
-            configRecipients.add(idm);
+          InternalDistributedMember memberId = node.getMemberId();
+          if (!memberId.equals(partitionedRegion.getMyId())) {
+            configRecipients.add(memberId);
           }
         }
       }
@@ -336,29 +370,29 @@ public class PartitionedRegionClear {
     }
 
     try {
-      PartitionedRegionClearMessage.PartitionedRegionClearResponse resp =
-          new PartitionedRegionClearMessage.PartitionedRegionClearResponse(
-              partitionedRegion.getSystem(), configRecipients);
-      PartitionedRegionClearMessage partitionedRegionClearMessage =
-          new PartitionedRegionClearMessage(configRecipients, partitionedRegion, resp, op, event);
-      partitionedRegionClearMessage.send();
+      PartitionedRegionClearResponse clearResponse =
+          new PartitionedRegionClearResponse(partitionedRegion.getSystem(), configRecipients);
+      PartitionedRegionClearMessage clearMessage =
+          new PartitionedRegionClearMessage(configRecipients, partitionedRegion, clearResponse, op,
+              event);
+      clearMessage.send();
 
-      resp.waitForRepliesUninterruptibly();
-      bucketsOperated = resp.bucketsCleared;
+      clearResponse.waitForRepliesUninterruptibly();
+      clearedBuckets = clearResponse.bucketsCleared;
 
     } catch (ReplyException e) {
-      Throwable t = e.getCause();
-      if (t instanceof ForceReattemptException) {
-        throw (ForceReattemptException) t;
+      Throwable cause = e.getCause();
+      if (cause instanceof ForceReattemptException) {
+        throw (ForceReattemptException) cause;
       }
-      if (t instanceof PartitionedRegionPartialClearException) {
-        throw new PartitionedRegionPartialClearException(t.getMessage(), t);
+      if (cause instanceof PartitionedRegionPartialClearException) {
+        throw (PartitionedRegionPartialClearException) cause;
       }
       logger.warn(
           "PartitionedRegionClear#sendPartitionedRegionClearMessage: Caught exception during ClearRegionMessage send and waiting for response",
           e);
     }
-    return bucketsOperated;
+    return clearedBuckets;
   }
 
   /**
@@ -412,14 +446,9 @@ public class PartitionedRegionClear {
         invokeCacheWriter(regionEvent);
       }
 
-      // Check if there are any listeners or clients interested. If so, then clear write
-      // locks needs to be taken on all local and remote primary buckets in order to
-      // preserve the ordering of client events (for concurrent operations on the region).
-      boolean acquireClearLockForNotification =
-          (partitionedRegion.hasAnyClientsInterested() || partitionedRegion.hasListener());
-      if (acquireClearLockForNotification) {
-        obtainLockForClear(regionEvent);
-      }
+      // clear write locks need to be taken on all local and remote primary buckets
+      // whether or not the partitioned region has any listeners clients interested
+      obtainLockForClear(regionEvent);
       try {
         Set<Integer> bucketsCleared = clearRegion(regionEvent);
 
@@ -435,9 +464,7 @@ public class PartitionedRegionClear {
           throw new PartitionedRegionPartialClearException(message);
         }
       } finally {
-        if (acquireClearLockForNotification) {
-          releaseLockForClear(regionEvent);
-        }
+        releaseLockForClear(regionEvent);
       }
     } finally {
       releaseDistributedClearLock(lockName);
@@ -458,8 +485,8 @@ public class PartitionedRegionClear {
   }
 
   protected void assignAllPrimaryBuckets() {
-    PartitionedRegion leader = ColocationHelper.getLeaderRegion(partitionedRegion);
-    PartitionRegionHelper.assignBucketsToPartitions(leader);
+    PartitionedRegion leader = colocationLeaderRegionProvider.getLeaderRegion(partitionedRegion);
+    assignBucketsToPartitions.assignBucketsToPartitions(leader);
   }
 
   protected void handleClearFromDepartedMember(InternalDistributedMember departedMember) {
@@ -505,6 +532,24 @@ public class PartitionedRegionClear {
     return membershipChange;
   }
 
+  @FunctionalInterface
+  @VisibleForTesting
+  interface ColocationLeaderRegionProvider {
+    PartitionedRegion getLeaderRegion(PartitionedRegion partitionedRegion);
+  }
+
+  @FunctionalInterface
+  @VisibleForTesting
+  interface AssignBucketsToPartitions {
+    void assignBucketsToPartitions(PartitionedRegion partitionedRegion);
+  }
+
+  @FunctionalInterface
+  @VisibleForTesting
+  interface UpdateAttributesProcessorFactory {
+    UpdateAttributesProcessor create(PartitionedRegion partitionedRegion);
+  }
+
   protected class PartitionedRegionClearListener implements MembershipListener {
 
     @Override
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java
index 721d236..376fc8e 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java
@@ -22,6 +22,7 @@ import static org.assertj.core.api.Assertions.assertThat;
 import static org.assertj.core.api.Assertions.catchThrowable;
 import static org.mockito.ArgumentCaptor.forClass;
 import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyInt;
 import static org.mockito.ArgumentMatchers.anyString;
 import static org.mockito.ArgumentMatchers.eq;
 import static org.mockito.Mockito.doNothing;
@@ -42,7 +43,6 @@ import org.mockito.ArgumentCaptor;
 import org.apache.geode.CancelCriterion;
 import org.apache.geode.cache.PartitionedRegionPartialClearException;
 import org.apache.geode.cache.Region;
-import org.apache.geode.cache.asyncqueue.AsyncEventQueue;
 import org.apache.geode.distributed.DistributedLockService;
 import org.apache.geode.distributed.internal.DMStats;
 import org.apache.geode.distributed.internal.DistributionManager;
@@ -50,41 +50,74 @@ import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.distributed.internal.MembershipListener;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
 import org.apache.geode.internal.cache.PartitionedRegion.RetryTimeKeeper;
+import org.apache.geode.internal.cache.PartitionedRegionClear.AssignBucketsToPartitions;
+import org.apache.geode.internal.cache.PartitionedRegionClear.ColocationLeaderRegionProvider;
 import org.apache.geode.internal.cache.PartitionedRegionClear.PartitionedRegionClearListener;
+import org.apache.geode.internal.cache.PartitionedRegionClear.UpdateAttributesProcessorFactory;
 import org.apache.geode.internal.cache.PartitionedRegionClearMessage.OperationType;
 import org.apache.geode.internal.cache.partitioned.RegionAdvisor;
 import org.apache.geode.internal.serialization.KnownVersion;
 
 public class PartitionedRegionClearTest {
 
-  private GemFireCacheImpl cache;
-  private HashSet<AsyncEventQueue> allAEQs = new HashSet<>();
   private PartitionedRegionClear partitionedRegionClear;
   private DistributionManager distributionManager;
   private PartitionedRegion partitionedRegion;
   private RegionAdvisor regionAdvisor;
   private InternalDistributedMember internalDistributedMember;
+  private DistributedLockService distributedLockService;
 
   @Before
   public void setUp() {
-
-    cache = mock(GemFireCacheImpl.class);
+    AssignBucketsToPartitions assignBucketsToPartitions = mock(AssignBucketsToPartitions.class);
+    GemFireCacheImpl cache = mock(GemFireCacheImpl.class);
+    ColocationLeaderRegionProvider colocationLeaderRegionProvider =
+        mock(ColocationLeaderRegionProvider.class);
+    distributedLockService = mock(DistributedLockService.class);
     distributionManager = mock(DistributionManager.class);
+    FilterProfile filterProfile = mock(FilterProfile.class);
     internalDistributedMember = mock(InternalDistributedMember.class);
     partitionedRegion = mock(PartitionedRegion.class);
     regionAdvisor = mock(RegionAdvisor.class);
+    UpdateAttributesProcessorFactory updateAttributesProcessorFactory =
+        mock(UpdateAttributesProcessorFactory.class);
+
+    when(cache.getAsyncEventQueues(false))
+        .thenReturn(emptySet());
+    when(colocationLeaderRegionProvider.getLeaderRegion(any()))
+        .thenReturn(partitionedRegion);
+    when(distributedLockService.lock(anyString(), anyInt(), anyInt()))
+        .thenReturn(true);
+    when(distributionManager.getDistributionManagerId())
+        .thenReturn(internalDistributedMember);
+    when(distributionManager.getId())
+        .thenReturn(internalDistributedMember);
+    when(internalDistributedMember.getVersion())
+        .thenReturn(KnownVersion.CURRENT);
+    when(partitionedRegion.getCache())
+        .thenReturn(cache);
+    when(partitionedRegion.getDistributionManager())
+        .thenReturn(distributionManager);
+    when(partitionedRegion.getName())
+        .thenReturn("prRegion");
+    when(partitionedRegion.getRegionAdvisor())
+        .thenReturn(regionAdvisor);
+    when(partitionedRegion.getFilterProfile())
+        .thenReturn(filterProfile);
+    when(filterProfile.getFilterRoutingInfoPart1(any(), any(), any()))
+        .thenReturn(mock(FilterRoutingInfo.class));
+    when(filterProfile.getFilterRoutingInfoPart2(any(), any()))
+        .thenReturn(mock(FilterRoutingInfo.class));
+    when(regionAdvisor.getDistributionManager())
+        .thenReturn(distributionManager);
+    when(updateAttributesProcessorFactory.create(any()))
+        .thenReturn(mock(UpdateAttributesProcessor.class));
 
-    when(distributionManager.getDistributionManagerId()).thenReturn(internalDistributedMember);
-    when(distributionManager.getId()).thenReturn(internalDistributedMember);
-    when(internalDistributedMember.getVersion()).thenReturn(KnownVersion.CURRENT);
-    when(partitionedRegion.getCache()).thenReturn(cache);
-    when(cache.getAsyncEventQueues(false)).thenReturn(allAEQs);
-    when(partitionedRegion.getDistributionManager()).thenReturn(distributionManager);
-    when(partitionedRegion.getName()).thenReturn("prRegion");
-    when(partitionedRegion.getRegionAdvisor()).thenReturn(regionAdvisor);
-    when(regionAdvisor.getDistributionManager()).thenReturn(distributionManager);
+    doNothing().when(distributedLockService).unlock(anyString());
 
-    partitionedRegionClear = new PartitionedRegionClear(partitionedRegion);
+    partitionedRegionClear = new PartitionedRegionClear(partitionedRegion, distributedLockService,
+        colocationLeaderRegionProvider, assignBucketsToPartitions,
+        updateAttributesProcessorFactory);
   }
 
   @Test
@@ -115,9 +148,7 @@ public class PartitionedRegionClearTest {
   @Test
   public void acquireDistributedClearLockGetsDistributedLock() {
     // arrange
-    DistributedLockService distributedLockService = mock(DistributedLockService.class);
     String lockName = PartitionedRegionClear.CLEAR_OPERATION + partitionedRegion.getName();
-    when(partitionedRegion.getPartitionedRegionLockService()).thenReturn(distributedLockService);
 
     // act
     partitionedRegionClear.acquireDistributedClearLock(lockName);
@@ -129,9 +160,7 @@ public class PartitionedRegionClearTest {
   @Test
   public void releaseDistributedClearLockReleasesDistributedLock() {
     // arrange
-    DistributedLockService distributedLockService = mock(DistributedLockService.class);
     String lockName = PartitionedRegionClear.CLEAR_OPERATION + partitionedRegion.getName();
-    when(partitionedRegion.getPartitionedRegionLockService()).thenReturn(distributedLockService);
 
     // act
     partitionedRegionClear.releaseDistributedClearLock(lockName);
@@ -567,6 +596,7 @@ public class PartitionedRegionClearTest {
   public void doClearAcquiresAndReleasesDistributedClearLockAndCreatesAllPrimaryBuckets() {
     // arrange
     RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    when(regionEvent.clone()).thenReturn(regionEvent);
 
     // partial mocking to stub some methods and verify
     PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
@@ -587,6 +617,7 @@ public class PartitionedRegionClearTest {
   public void doClearInvokesCacheWriterWhenCacheWriteIsSet() {
     // arrange
     RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    when(regionEvent.clone()).thenReturn(regionEvent);
 
     // partial mocking to stub some methods and verify
     PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
@@ -605,6 +636,7 @@ public class PartitionedRegionClearTest {
   public void doClearDoesNotInvokesCacheWriterWhenCacheWriteIsNotSet() {
     // arrange
     RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    when(regionEvent.clone()).thenReturn(regionEvent);
 
     // partial mocking to stub some methods and verify
     PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
@@ -669,12 +701,13 @@ public class PartitionedRegionClearTest {
   }
 
   @Test
-  public void doClearDoesNotObtainLockForClearWhenRegionHasNoListenerAndNoClientInterest() {
+  public void doClearObtainsLockForClearWhenRegionHasNoListenerAndNoClientInterest() {
     // arrange
     RegionEventImpl regionEvent = mock(RegionEventImpl.class);
 
     when(partitionedRegion.hasAnyClientsInterested()).thenReturn(false);
     when(partitionedRegion.hasListener()).thenReturn(false);
+    when(regionEvent.clone()).thenReturn(regionEvent);
 
     // partial mocking to stub some methods and verify
     PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
@@ -688,8 +721,8 @@ public class PartitionedRegionClearTest {
     spyPartitionedRegionClear.doClear(regionEvent, false);
 
     // assert
-    verify(spyPartitionedRegionClear, never()).obtainLockForClear(regionEvent);
-    verify(spyPartitionedRegionClear, never()).releaseLockForClear(regionEvent);
+    verify(spyPartitionedRegionClear).obtainLockForClear(regionEvent);
+    verify(spyPartitionedRegionClear).releaseLockForClear(regionEvent);
   }
 
   @Test
@@ -867,6 +900,60 @@ public class PartitionedRegionClearTest {
         .isNotNull();
   }
 
+  @Test
+  public void doClearAcquiresLockForClearWhenHasAnyClientsInterestedIsTrue() {
+    // arrange
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    when(partitionedRegion.hasAnyClientsInterested()).thenReturn(true);
+    when(partitionedRegion.hasListener()).thenReturn(false);
+    when(regionEvent.clone()).thenReturn(regionEvent);
+
+    partitionedRegionClear = spy(partitionedRegionClear);
+    doNothing().when(partitionedRegionClear).obtainLockForClear(regionEvent);
+
+    // act
+    partitionedRegionClear.doClear(regionEvent, false);
+
+    // assert
+    verify(partitionedRegionClear).obtainLockForClear(regionEvent);
+  }
+
+  @Test
+  public void doClearAcquiresLockForClearWhenHasListenerIsTrue() {
+    // arrange
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    when(partitionedRegion.hasAnyClientsInterested()).thenReturn(false);
+    when(partitionedRegion.hasListener()).thenReturn(true);
+    when(regionEvent.clone()).thenReturn(regionEvent);
+
+    partitionedRegionClear = spy(partitionedRegionClear);
+    doNothing().when(partitionedRegionClear).obtainLockForClear(regionEvent);
+
+    // act
+    partitionedRegionClear.doClear(regionEvent, false);
+
+    // assert
+    verify(partitionedRegionClear).obtainLockForClear(regionEvent);
+  }
+
+  @Test
+  public void doClearAcquiresLockForClearWhenHasAnyClientsInterestedAndHasListenerAreFalse() {
+    // arrange
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    when(partitionedRegion.hasAnyClientsInterested()).thenReturn(false);
+    when(partitionedRegion.hasListener()).thenReturn(false);
+    when(regionEvent.clone()).thenReturn(regionEvent);
+
+    partitionedRegionClear = spy(partitionedRegionClear);
+    doNothing().when(partitionedRegionClear).obtainLockForClear(regionEvent);
+
+    // act
+    partitionedRegionClear.doClear(regionEvent, false);
+
+    // assert
+    verify(partitionedRegionClear).obtainLockForClear(regionEvent);
+  }
+
   private Set<BucketRegion> setupBucketRegions(
       PartitionedRegionDataStore dataStore,
       BucketAdvisor bucketAdvisor) {

[geode] 01/17: GEODE-7683: introduce BR.cmnClearRegion

Posted by nn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

nnag pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit a64d533500ee7bc54f6a413b1970a66bf5eaf986
Author: zhouxh <gz...@pivotal.io>
AuthorDate: Mon Jan 27 17:02:48 2020 -0800

    GEODE-7683: introduce BR.cmnClearRegion
    
    Co-authored-by: Xiaojian Zhou <gz...@pivotal.io>
    
    GEODE-7684: Create messaging class for PR Clear (#4689)
    
    * Added new message class and test
    
    Co-authored-by: Benjamin Ross <br...@pivotal.io>
    Co-authored-by: Donal Evans <do...@pivotal.io>
    
    GEODE-7682: add PR.clear  API (#4755)
    
    * GEODE-7683: introduce BR.cmnClearRegion
    
    Co-authored-by: Xiaojian Zhou <gz...@pivotal.io>
    
    PR.clear's event id should be created and used in BR (#4805)
    
    * GEODE-7857: PR.clear's event id should be created and used in BR
    
    GEODE-7912: cacheWriter should be triggered when PR.clear (#4882)
    
            Co-authored-by: Anil <ag...@pivotal.io>
            Co-authored-by: Xiaojian Zhou <gz...@pivotal.io>
    
    GEODE-7983: Clear region writer callbacks should not be invoked for bucket regions (#4954)
    
    GEODE-7676: Add PR clear with expiration tests (#4970)
    
    Added distributed tests to verify the clear operation on Partitioned
    Regions works as expected when expiration is configured.
    
    - Added unit and distributed tests.
    - Fixed LocalRegion class to clear the entryExpiryTasks Map whenever
      the cancelAllEntryExpiryTasks method is invoked.
    
    GEODE-7667: Add a 'clear' gfsh command for PR and RR clear (#4818)
    
    * Added clear command and modified remove functionality to clear PR
    
    Authored-by: Benjamin Ross <br...@pivotal.io>
    
    GEODE-7676: Conversion of duration to seconds.
    
    GEODE-7894: Moving expiry tasks to AbstractRegion.
    
    GEODE-7667: Fixing test to include PR clear help text.
    
    GEODE-7678 (2nd PR) - Support for cache-listener and client-notification for Partitioned Region Clear operation  (#5124)
    
    * GEODE-7678: Add support for cache listener and client notification for PR clear
    
    The changes are made to PR clear messaging and locking mechanism to preserve
    cache-listener and client-events ordering during concurrent cache operation
    while clear in progress.
    
    GEODE-7669 Test coverage for Partitioned Region clear with Overflow enabled (#5189)
    
    Authored-by: Jianxia Chen <jc...@apache.org>
    
    GEODE-8173: Add unit test (coverage) for PartitionedRegionClear class. (#5208)
    
    * GEODE-8173: Add unit test (coverage) for PartitionedRegionClear class.
    Co-authored-by: anilkumar gingade <an...@anilg.local>
    
    GEODE-8334: PR.clear should sync with putAll or removeAll on rvvLock (#5365)
    
        Co-authored-by: Xiaojian Zhou <zh...@vmware.com>
        Co-authored-by: Anil Gingade <ag...@vmware.com>
    
    GEODE-8361: Use Set instead of List to track cleared buckets (#5379)
    
    - Refactor PartitionRegionClear to use Set instead of List
    - Some other changes to remove warnings/alerts from PartitionedRegionClear and PartitionedRegionClearMessage
    
    Authored-by: Donal Evans <do...@vmware.com>
    
    GEODE-7670: PR Clear with Concurrent Ops DUnitTests (#4848)
    
    Added distributed tests to verify that the clear operation on
    Partitioned Regions works as expected when there are other
    concurrent operations happening on the cache (put, putAll, get,
    remove, removeAll, members added and members removed).
    
    GEODE-7680: PR.clear must be successful when interacting with rebalance (#5095)
    
    - Added DUnit tests to confirm that clear does not interfere with
    rebalance or vice versa
    - Test when member departs during clear/rebalance
    - Test when member joins during clear/rebalance
    - Fixed typo in PartitionedRegionClearWithExpirationDUnitTest
    - Fixed typo in PartitionedRegion
    - Call assignBucketsToPartitions() on leader colocated region during clear
    instead of target region
    
    Authored-by: Donal Evans <do...@pivotal.io>
    
    GEODE-7846: Adding Stats for Partitioned Region Clear (#5391)
    
    Added stats to CachePerfStats for PR Clear
    - Changed clears to 'regionClears' and 'bucketClears' to differentiate between the number of times the region was cleared and the number of times a bucket was cleared in a PartitionedRegion
    - Added Local and Total duration stats to record how long clear has been running for a specific region, as well as how long it was spent clearing any specific member
    
    GEODE-7672: add dunit test to verify OQL index after PR clear. (#5436)
    
    * require rvv lock when create index
    
    fix rebase compiling error
    
    GEODE-7845 blocking PR region clear if one or more server versions are too old (#5577)
    
    - if a server is running an old version when a PR clear is invoked
    by the client, the client will receive a ServerOperationException
    with a cause of ServerVersionMismatchException.
    
    GEODE-7845: Adding a cleaner simpler test. (#5622)
    
    - Changed the test for ServerVersionMismatchException to be more readable.
    
    GEODE-7845: Now behaving with clients of various versions. (#5645)
    
    - added functionality that would allow the tests to be run using various versions of the clients against and and new versions of the server.
    
    GEODE-7858: PR.clear notify client should let the queue holder member to notify (#5677)
    
    GEODE-7679 Partitioned Region clear is successful while region is being altered (#5516)
    
    GEODE-7675: Partitioned Region clear should be successful when clients are present with subscription enabled (#5727)
    
    GEODE-8771: invalidate should acquire the lock before initIndex (#5823)
    
    GEODE-8878: PR clear should also send a lock message to the secondary members. (#5950)
    
    GEODE-9132: Minor cleanup of PartitionedRegionClearTest
---
 .../integrationTest/resources/assembly_content.txt |   1 +
 .../cache/PRCacheListenerDistributedTest.java      | 585 +++++++++++++-
 .../cache/RegionClearStatsDistributedTest.java     |   2 +-
 .../ReplicateCacheListenerDistributedTest.java     | 111 ++-
 .../partitioned/PRClearCreateIndexDUnitTest.java   | 265 ++++++
 .../partitioned/PRClearQueryIndexDUnitTest.java    | 376 +++++++++
 ...ionedRegionAfterClearNotificationDUnitTest.java | 372 +++++++++
 .../cache/PartitionedRegionClearDUnitTest.java     | 465 +++++++++++
 ...itionedRegionClearWithAlterRegionDUnitTest.java | 803 ++++++++++++++++++
 ...gionClearWithConcurrentOperationsDUnitTest.java | 747 +++++++++++++++++
 ...titionedRegionClearWithExpirationDUnitTest.java | 501 ++++++++++++
 ...rtitionedRegionClearWithRebalanceDUnitTest.java | 578 +++++++++++++
 .../PartitionedRegionOverflowClearDUnitTest.java   | 380 +++++++++
 .../PartitionedRegionPersistentClearDUnitTest.java |  38 +-
 ...titionedRegionClearWithExpirationDUnitTest.java | 530 ++++++++++++
 .../query/partitioned/PRClearIntegrationTest.java  |  73 ++
 .../cache/PartitionedRegionIntegrationTest.java    |  45 ++
 ...itionedRegionSingleNodeOperationsJUnitTest.java |  66 --
 .../codeAnalysis/sanctionedDataSerializables.txt   |  16 +
 .../PartitionedRegionPartialClearException.java    |  45 +-
 .../main/java/org/apache/geode/cache/Region.java   |   6 +-
 .../cache/query/internal/DefaultQueryService.java  |   4 +-
 .../cache/query/internal/index/IndexManager.java   |   4 +-
 .../org/apache/geode/internal/DSFIDFactory.java    |   8 +
 .../geode/internal/cache/AbstractRegion.java       |   1 +
 .../geode/internal/cache/AbstractRegionMap.java    |  27 +-
 .../apache/geode/internal/cache/BucketAdvisor.java |   2 +-
 .../apache/geode/internal/cache/BucketRegion.java  |  42 +-
 .../geode/internal/cache/CachePerfStats.java       |  59 +-
 .../geode/internal/cache/ColocationHelper.java     |  10 +-
 .../internal/cache/DistributedClearOperation.java  |  20 +-
 .../geode/internal/cache/DistributedRegion.java    |  57 +-
 .../geode/internal/cache/InternalRegion.java       |   3 +
 .../apache/geode/internal/cache/LocalRegion.java   |  25 +-
 .../geode/internal/cache/PartitionedRegion.java    | 118 ++-
 .../internal/cache/PartitionedRegionClear.java     | 506 ++++++++++++
 .../cache/PartitionedRegionClearMessage.java       | 289 +++++++
 .../internal/cache/PartitionedRegionDataStore.java |   8 +
 .../geode/internal/cache/RegionEventImpl.java      |   5 +
 .../geode/internal/cache/RegionPerfStats.java      |  12 +-
 .../apache/geode/internal/cache/RegionStats.java   |   4 +-
 .../internal/cache/partitioned/ClearPRMessage.java | 320 ++++++++
 .../cache/partitioned/PutAllPRMessage.java         |   9 +
 .../internal/cache/partitioned/RegionAdvisor.java  |  11 +
 .../cache/partitioned/RemoveAllPRMessage.java      |   9 +
 .../cache/versions/RegionVersionVector.java        |   4 +-
 .../geode/management/internal/i18n/CliStrings.java |  14 +-
 .../sanctioned-geode-core-serializables.txt        |   2 +
 .../internal/cache/BucketRegionJUnitTest.java      |  85 +-
 .../geode/internal/cache/CachePerfStatsTest.java   |  53 +-
 .../internal/cache/DistributedRegionJUnitTest.java |  18 +
 .../geode/internal/cache/LocalRegionTest.java      |  22 +
 .../internal/cache/PartitionedRegionClearTest.java | 897 +++++++++++++++++++++
 .../internal/cache/PartitionedRegionTest.java      |  27 +
 .../cache/partitioned/ClearPRMessageTest.java      | 260 ++++++
 .../cache/partitioned/PutAllPRMessageTest.java     |  29 +
 .../cache/partitioned/RemoveAllPRMessageTest.java  |  29 +
 .../RollingUpgrade2DUnitTestBase.java              |   4 +-
 ...ePartitionRegionClearServerVersionMismatch.java | 212 +++++
 .../internal/cache/PartitionRegionClearHATest.java | 236 ++++++
 .../PartitionRegionClearMessageQueueDUnitTest.java | 165 ++++
 .../geode/test/dunit/rules/ClusterStartupRule.java |  23 +-
 .../geode/test/junit/rules/MemberStarterRule.java  |   9 +
 .../cli/commands/ClearCommandDUnitTest.java        | 120 +++
 .../cli/commands/RemoveCommandDUnitTest.java       |  13 +-
 .../GfshParserAutoCompletionIntegrationTest.java   |   2 +-
 .../{RemoveCommand.java => ClearCommand.java}      |  53 +-
 .../cli/commands/CommandAvailabilityIndicator.java |   1 +
 .../internal/cli/commands/RemoveCommand.java       |   9 +-
 .../internal/cli/domain/DataCommandResult.java     |  12 +
 .../cli/functions/DataCommandFunction.java         |  23 +-
 .../internal/cli/commands/ClearCommandTest.java    | 115 +++
 .../org/apache/geode/cache/query/data/City.java    |   5 +-
 .../test/concurrent/FileBasedCountDownLatch.java   |   2 +-
 .../serialization/DataSerializableFixedID.java     |   5 +
 75 files changed, 9691 insertions(+), 316 deletions(-)

diff --git a/geode-assembly/src/integrationTest/resources/assembly_content.txt b/geode-assembly/src/integrationTest/resources/assembly_content.txt
index 76a6dd3..609a100 100644
--- a/geode-assembly/src/integrationTest/resources/assembly_content.txt
+++ b/geode-assembly/src/integrationTest/resources/assembly_content.txt
@@ -221,6 +221,7 @@ javadoc/org/apache/geode/cache/PartitionAttributes.html
 javadoc/org/apache/geode/cache/PartitionAttributesFactory.html
 javadoc/org/apache/geode/cache/PartitionResolver.html
 javadoc/org/apache/geode/cache/PartitionedRegionDistributionException.html
+javadoc/org/apache/geode/cache/PartitionedRegionPartialClearException.html
 javadoc/org/apache/geode/cache/PartitionedRegionStorageException.html
 javadoc/org/apache/geode/cache/Region.Entry.html
 javadoc/org/apache/geode/cache/Region.html
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/PRCacheListenerDistributedTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/PRCacheListenerDistributedTest.java
index 559def7..ac02b65 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/PRCacheListenerDistributedTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/PRCacheListenerDistributedTest.java
@@ -14,14 +14,35 @@
  */
 package org.apache.geode.cache;
 
+import static org.apache.geode.test.dunit.VM.getVM;
+import static org.apache.geode.test.dunit.VM.getVMCount;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.fail;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
+import static org.hamcrest.Matchers.nullValue;
+
+import java.io.Serializable;
 import java.util.Arrays;
+import java.util.Collection;
 
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameter;
 import org.junit.runners.Parameterized.Parameters;
 import org.junit.runners.Parameterized.UseParametersRunnerFactory;
 
+import org.apache.geode.cache.util.CacheListenerAdapter;
+import org.apache.geode.logging.internal.log4j.api.LogService;
+import org.apache.geode.test.dunit.rules.CacheRule;
+import org.apache.geode.test.dunit.rules.DistributedCounters;
+import org.apache.geode.test.dunit.rules.DistributedErrorCollector;
+import org.apache.geode.test.dunit.rules.DistributedRule;
+import org.apache.geode.test.junit.rules.serializable.SerializableTestName;
 import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactory;
 
 /**
@@ -36,47 +57,591 @@ import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactor
 @RunWith(Parameterized.class)
 @UseParametersRunnerFactory(CategoryWithParameterizedRunnerFactory.class)
 @SuppressWarnings("serial")
-public class PRCacheListenerDistributedTest extends ReplicateCacheListenerDistributedTest {
+public class PRCacheListenerDistributedTest implements Serializable {
+
+  protected static final String CLEAR = "CLEAR";
+  protected static final String REGION_DESTROY = "REGION_DESTROY";
+  private static final String CREATES = "CREATES";
+  private static final String UPDATES = "UPDATES";
+  private static final String INVALIDATES = "INVALIDATES";
+  private static final String DESTROYS = "DESTROYS";
+  private static final int ENTRY_VALUE = 0;
+  private static final int UPDATED_ENTRY_VALUE = 1;
+  private static final String KEY = "key-1";
+  @Rule
+  public DistributedRule distributedRule = new DistributedRule();
+  @Rule
+  public CacheRule cacheRule = CacheRule.builder().createCacheInAll().build();
+  @Rule
+  public SerializableTestName testName = new SerializableTestName();
+  @Rule
+  public DistributedCounters sharedCountersRule = new DistributedCounters();
+  @Rule
+  public DistributedErrorCollector errorCollector = new DistributedErrorCollector();
+  protected String regionName;
 
-  @Parameters(name = "{index}: redundancy={0}")
-  public static Iterable<Integer> data() {
-    return Arrays.asList(0, 3);
+  @Parameters
+  public static Collection<Object[]> data() {
+    return Arrays.asList(new Object[][] {
+        {1, Boolean.FALSE},
+        {3, Boolean.TRUE},
+    });
   }
 
   @Parameter
   public int redundancy;
 
-  @Override
+  @Parameter(1)
+  public Boolean withData;
+
   protected Region<String, Integer> createRegion(final String name,
       final CacheListener<String, Integer> listener) {
+    return createPartitionedRegion(name, listener, false);
+  }
+
+  protected Region<String, Integer> createAccessorRegion(final String name,
+      final CacheListener<String, Integer> listener) {
+    return createPartitionedRegion(name, listener, true);
+  }
+
+  private Region<String, Integer> createPartitionedRegion(String name,
+      CacheListener<String, Integer> listener, boolean accessor) {
+    LogService.getLogger()
+        .info("Params [Redundancy: " + redundancy + " withData:" + withData + "]");
     PartitionAttributesFactory<String, Integer> paf = new PartitionAttributesFactory<>();
     paf.setRedundantCopies(redundancy);
 
+    if (accessor) {
+      paf.setLocalMaxMemory(0);
+    }
     RegionFactory<String, Integer> regionFactory = cacheRule.getCache().createRegionFactory();
-    regionFactory.addCacheListener(listener);
+    if (listener != null) {
+      regionFactory.addCacheListener(listener);
+    }
     regionFactory.setDataPolicy(DataPolicy.PARTITION);
     regionFactory.setPartitionAttributes(paf.create());
 
     return regionFactory.create(name);
   }
 
-  @Override
+  private void withData(Region region) {
+    if (withData) {
+      // Fewer buckets.
+      // Covers case where node doesn't have any buckets depending on redundancy.
+      region.put("key1", "value1");
+      region.put("key2", "value2");
+    }
+  }
+
   protected int expectedCreates() {
     return 1;
   }
 
-  @Override
   protected int expectedUpdates() {
     return 1;
   }
 
-  @Override
   protected int expectedInvalidates() {
     return 1;
   }
 
-  @Override
   protected int expectedDestroys() {
     return 1;
   }
+
+  @Test
+  public void afterRegionDestroyIsInvokedInEveryMember() {
+    CacheListener<String, Integer> listener = new RegionDestroyCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, listener));
+      });
+    }
+
+    region.destroyRegion();
+
+    assertThat(sharedCountersRule.getTotal(REGION_DESTROY))
+        .isGreaterThanOrEqualTo(expectedRegionDestroys());
+  }
+
+  @Test
+  public void afterRegionDestroyIsInvokedOnNodeWithListener() {
+    CacheListener<String, Integer> listener = new RegionDestroyCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, null));
+      });
+    }
+
+    region.destroyRegion();
+
+    assertThat(sharedCountersRule.getTotal(REGION_DESTROY)).isEqualTo(1);
+  }
+
+  @Test
+  public void afterRegionDestroyIsInvokedOnRemoteNodeWithListener() {
+    CacheListener<String, Integer> listener = new RegionDestroyCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, null);
+
+    getVM(0).invoke(() -> {
+      createRegion(regionName, listener);
+    });
+
+    for (int i = 1; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, null));
+      });
+    }
+
+    region.destroyRegion();
+
+    assertThat(sharedCountersRule.getTotal(REGION_DESTROY)).isEqualTo(1);
+  }
+
+  @Test
+  public void afterRegionDestroyIsInvokedOnAccessorAndDataMembers() {
+    CacheListener<String, Integer> listener = new RegionDestroyCountingCacheListener();
+    Region<String, Integer> region = createAccessorRegion(regionName, listener);
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, listener));
+      });
+    }
+
+    region.destroyRegion();
+
+    assertThat(sharedCountersRule.getTotal(REGION_DESTROY))
+        .isGreaterThanOrEqualTo(expectedRegionDestroys());
+  }
+
+  @Test
+  public void afterRegionDestroyIsInvokedOnAccessor() {
+    CacheListener<String, Integer> listener = new RegionDestroyCountingCacheListener();
+    Region<String, Integer> region = createAccessorRegion(regionName, listener);
+
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, null));
+      });
+    }
+
+    region.destroyRegion();
+
+    assertThat(sharedCountersRule.getTotal(REGION_DESTROY)).isEqualTo(1);
+  }
+
+  @Test
+  public void afterRegionDestroyIsInvokedOnNonAccessor() {
+    CacheListener<String, Integer> listener = new RegionDestroyCountingCacheListener();
+    Region<String, Integer> region = createAccessorRegion(regionName, null);
+    getVM(0).invoke(() -> {
+      createRegion(regionName, listener);
+    });
+    for (int i = 1; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, null));
+      });
+    }
+
+    region.destroyRegion();
+
+    assertThat(sharedCountersRule.getTotal(REGION_DESTROY)).isEqualTo(1);
+  }
+
+  @Test
+  public void afterRegionClearIsInvokedInEveryMember() {
+    CacheListener<String, Integer> listener = new ClearCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, listener));
+      });
+    }
+
+    region.clear();
+
+    assertThat(sharedCountersRule.getTotal(CLEAR)).isEqualTo(expectedClears());
+  }
+
+  @Test
+  public void afterClearIsInvokedOnNodeWithListener() {
+    CacheListener<String, Integer> listener = new ClearCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, null));
+      });
+    }
+
+    region.clear();
+
+    assertThat(sharedCountersRule.getTotal(CLEAR)).isEqualTo(1);
+  }
+
+  @Test
+  public void afterRegionClearIsInvokedOnRemoteNodeWithListener() {
+    CacheListener<String, Integer> listener = new ClearCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, null);
+    getVM(0).invoke(() -> {
+      createRegion(regionName, listener);
+    });
+    for (int i = 1; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, null));
+      });
+    }
+
+    region.clear();
+
+    assertThat(sharedCountersRule.getTotal(CLEAR)).isEqualTo(1);
+  }
+
+  @Test
+  public void afterRegionClearIsInvokedOnAccessorAndDataMembers() {
+    CacheListener<String, Integer> listener = new ClearCountingCacheListener();
+    Region<String, Integer> region = createAccessorRegion(regionName, listener);
+
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, listener));
+      });
+    }
+
+    region.clear();
+
+    assertThat(sharedCountersRule.getTotal(CLEAR)).isEqualTo(expectedClears());
+  }
+
+  @Test
+  public void afterRegionClearIsInvokedOnAccessor() {
+    CacheListener<String, Integer> listener = new ClearCountingCacheListener();
+    Region<String, Integer> region = createAccessorRegion(regionName, listener);
+
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, null));
+      });
+    }
+
+    region.clear();
+
+    assertThat(sharedCountersRule.getTotal(CLEAR)).isEqualTo(1);
+  }
+
+  @Test
+  public void afterRegionClearIsInvokedOnNonAccessor() {
+    CacheListener<String, Integer> listener = new ClearCountingCacheListener();
+    Region<String, Integer> region = createAccessorRegion(regionName, null);
+
+    getVM(0).invoke(() -> {
+      createRegion(regionName, listener);
+    });
+    for (int i = 1; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        withData(createRegion(regionName, null));
+      });
+    }
+
+    region.clear();
+
+    assertThat(sharedCountersRule.getTotal(CLEAR)).isEqualTo(1);
+  }
+
+  @Before
+  public void setUp() {
+    regionName = getClass().getSimpleName();
+
+    sharedCountersRule.initialize(CREATES);
+    sharedCountersRule.initialize(DESTROYS);
+    sharedCountersRule.initialize(INVALIDATES);
+    sharedCountersRule.initialize(UPDATES);
+    sharedCountersRule.initialize(CLEAR);
+    sharedCountersRule.initialize(REGION_DESTROY);
+  }
+
+  @Test
+  public void afterCreateIsInvokedInEveryMember() {
+    CacheListener<String, Integer> listener = new CreateCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        createRegion(regionName, listener);
+      });
+    }
+
+    region.put(KEY, ENTRY_VALUE, cacheRule.getSystem().getDistributedMember());
+
+    assertThat(sharedCountersRule.getTotal(CREATES)).isEqualTo(expectedCreates());
+  }
+
+  @Test
+  public void afterUpdateIsInvokedInEveryMember() {
+    CacheListener<String, Integer> listener = new UpdateCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        createRegion(regionName, listener);
+      });
+    }
+
+    region.put(KEY, ENTRY_VALUE, cacheRule.getSystem().getDistributedMember());
+    region.put(KEY, UPDATED_ENTRY_VALUE, cacheRule.getSystem().getDistributedMember());
+
+    assertThat(sharedCountersRule.getTotal(UPDATES)).isEqualTo(expectedUpdates());
+  }
+
+  @Test
+  public void afterInvalidateIsInvokedInEveryMember() {
+    CacheListener<String, Integer> listener = new InvalidateCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        createRegion(regionName, listener);
+      });
+    }
+
+    region.put(KEY, 0, cacheRule.getSystem().getDistributedMember());
+    region.invalidate(KEY);
+
+    assertThat(sharedCountersRule.getTotal(INVALIDATES)).isEqualTo(expectedInvalidates());
+    assertThat(region.get(KEY)).isNull();
+  }
+
+  @Test
+  public void afterDestroyIsInvokedInEveryMember() {
+    CacheListener<String, Integer> listener = new DestroyCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        createRegion(regionName, listener);
+      });
+    }
+
+    region.put(KEY, 0, cacheRule.getSystem().getDistributedMember());
+    region.destroy(KEY);
+
+    assertThat(sharedCountersRule.getTotal(DESTROYS)).isEqualTo(expectedDestroys());
+  }
+
+  @Test
+  public void afterClearIsInvokedInEveryMember() {
+    CacheListener<String, Integer> listener = new ClearCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        createRegion(regionName, listener);
+      });
+    }
+
+    region.clear();
+
+    assertThat(sharedCountersRule.getTotal(CLEAR)).isEqualTo(expectedClears());
+  }
+
+  protected int expectedClears() {
+    return getVMCount() + 1;
+  }
+
+  protected int expectedRegionDestroys() {
+    return getVMCount() + 1;
+  }
+
+  /**
+   * Overridden within tests to increment shared counters.
+   */
+  private abstract static class BaseCacheListener extends CacheListenerAdapter<String, Integer>
+      implements Serializable {
+
+    @Override
+    public void afterCreate(final EntryEvent<String, Integer> event) {
+      fail("Unexpected listener callback: afterCreate");
+    }
+
+    @Override
+    public void afterInvalidate(final EntryEvent<String, Integer> event) {
+      fail("Unexpected listener callback: afterInvalidate");
+    }
+
+    @Override
+    public void afterDestroy(final EntryEvent<String, Integer> event) {
+      fail("Unexpected listener callback: afterDestroy");
+    }
+
+    @Override
+    public void afterUpdate(final EntryEvent<String, Integer> event) {
+      fail("Unexpected listener callback: afterUpdate");
+    }
+
+    @Override
+    public void afterRegionInvalidate(final RegionEvent<String, Integer> event) {
+      fail("Unexpected listener callback: afterRegionInvalidate");
+    }
+  }
+
+  private class CreateCountingCacheListener extends BaseCacheListener {
+
+    @Override
+    public void afterCreate(final EntryEvent<String, Integer> event) {
+      sharedCountersRule.increment(CREATES);
+
+      errorCollector.checkThat(event.getDistributedMember(), equalTo(event.getCallbackArgument()));
+      errorCollector.checkThat(event.getOperation(), equalTo(Operation.CREATE));
+      errorCollector.checkThat(event.getOldValue(), nullValue());
+      errorCollector.checkThat(event.getNewValue(), equalTo(ENTRY_VALUE));
+
+      if (event.getSerializedOldValue() != null) {
+        errorCollector.checkThat(event.getSerializedOldValue().getDeserializedValue(),
+            equalTo(event.getOldValue()));
+      }
+      if (event.getSerializedNewValue() != null) {
+        errorCollector.checkThat(event.getSerializedNewValue().getDeserializedValue(),
+            equalTo(event.getNewValue()));
+      }
+    }
+  }
+
+  private class UpdateCountingCacheListener extends BaseCacheListener {
+
+    @Override
+    public void afterCreate(final EntryEvent<String, Integer> event) {
+      // nothing
+    }
+
+    @Override
+    public void afterUpdate(final EntryEvent<String, Integer> event) {
+      sharedCountersRule.increment(UPDATES);
+
+      errorCollector.checkThat(event.getDistributedMember(), equalTo(event.getCallbackArgument()));
+      errorCollector.checkThat(event.getOperation(), equalTo(Operation.UPDATE));
+      errorCollector.checkThat(event.getOldValue(), anyOf(equalTo(ENTRY_VALUE), nullValue()));
+      errorCollector.checkThat(event.getNewValue(), equalTo(UPDATED_ENTRY_VALUE));
+
+      if (event.getSerializedOldValue() != null) {
+        errorCollector.checkThat(event.getSerializedOldValue().getDeserializedValue(),
+            equalTo(event.getOldValue()));
+      }
+      if (event.getSerializedNewValue() != null) {
+        errorCollector.checkThat(event.getSerializedNewValue().getDeserializedValue(),
+            equalTo(event.getNewValue()));
+      }
+    }
+  }
+
+  private class InvalidateCountingCacheListener extends BaseCacheListener {
+
+    @Override
+    public void afterCreate(final EntryEvent<String, Integer> event) {
+      // ignore
+    }
+
+    @Override
+    public void afterInvalidate(final EntryEvent<String, Integer> event) {
+      sharedCountersRule.increment(INVALIDATES);
+
+      if (event.isOriginRemote()) {
+        errorCollector.checkThat(event.getDistributedMember(),
+            not(cacheRule.getSystem().getDistributedMember()));
+      } else {
+        errorCollector.checkThat(event.getDistributedMember(),
+            equalTo(cacheRule.getSystem().getDistributedMember()));
+      }
+      errorCollector.checkThat(event.getOperation(), equalTo(Operation.INVALIDATE));
+      errorCollector.checkThat(event.getOldValue(), anyOf(equalTo(ENTRY_VALUE), nullValue()));
+      errorCollector.checkThat(event.getNewValue(), nullValue());
+    }
+  }
+
+  private class DestroyCountingCacheListener extends BaseCacheListener {
+
+    @Override
+    public void afterCreate(final EntryEvent<String, Integer> event) {
+      sharedCountersRule.increment(CREATES);
+    }
+
+    @Override
+    public void afterUpdate(final EntryEvent<String, Integer> event) {
+      sharedCountersRule.increment(UPDATES);
+    }
+
+    @Override
+    public void afterDestroy(final EntryEvent<String, Integer> event) {
+      sharedCountersRule.increment(DESTROYS);
+
+      if (event.isOriginRemote()) {
+        errorCollector.checkThat(event.getDistributedMember(),
+            not(cacheRule.getSystem().getDistributedMember()));
+      } else {
+        errorCollector.checkThat(event.getDistributedMember(),
+            equalTo(cacheRule.getSystem().getDistributedMember()));
+      }
+      errorCollector.checkThat(event.getOperation(), equalTo(Operation.DESTROY));
+      errorCollector.checkThat(event.getOldValue(), anyOf(equalTo(ENTRY_VALUE), nullValue()));
+      errorCollector.checkThat(event.getNewValue(), nullValue());
+    }
+  }
+
+  protected class ClearCountingCacheListener extends BaseCacheListener {
+
+    @Override
+    public void afterCreate(final EntryEvent<String, Integer> event) {
+      sharedCountersRule.increment(CREATES);
+    }
+
+    @Override
+    public void afterUpdate(final EntryEvent<String, Integer> event) {
+      sharedCountersRule.increment(UPDATES);
+    }
+
+    @Override
+    public void afterRegionClear(RegionEvent<String, Integer> event) {
+
+      sharedCountersRule.increment(CLEAR);
+      if (!event.getRegion().getAttributes().getDataPolicy().withPartitioning()) {
+        if (event.isOriginRemote()) {
+          errorCollector.checkThat(event.getDistributedMember(),
+              not(cacheRule.getSystem().getDistributedMember()));
+        } else {
+          errorCollector.checkThat(event.getDistributedMember(),
+              equalTo(cacheRule.getSystem().getDistributedMember()));
+        }
+      }
+      errorCollector.checkThat(event.getOperation(), equalTo(Operation.REGION_CLEAR));
+      errorCollector.checkThat(event.getRegion().getName(), equalTo(regionName));
+    }
+  }
+
+  protected class RegionDestroyCountingCacheListener extends BaseCacheListener {
+
+    @Override
+    public void afterCreate(final EntryEvent<String, Integer> event) {
+      sharedCountersRule.increment(CREATES);
+    }
+
+    @Override
+    public void afterUpdate(final EntryEvent<String, Integer> event) {
+      sharedCountersRule.increment(UPDATES);
+    }
+
+    @Override
+    public void afterRegionDestroy(final RegionEvent<String, Integer> event) {
+      sharedCountersRule.increment(REGION_DESTROY);
+
+      if (!event.getRegion().getAttributes().getDataPolicy().withPartitioning()) {
+        if (event.isOriginRemote()) {
+          errorCollector.checkThat(event.getDistributedMember(),
+              not(cacheRule.getSystem().getDistributedMember()));
+        } else {
+          errorCollector.checkThat(event.getDistributedMember(),
+              equalTo(cacheRule.getSystem().getDistributedMember()));
+        }
+      }
+      errorCollector.checkThat(event.getOperation(), equalTo(Operation.REGION_DESTROY));
+      errorCollector.checkThat(event.getRegion().getName(), equalTo(regionName));
+    }
+  }
 }
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/RegionClearStatsDistributedTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/RegionClearStatsDistributedTest.java
index 52a4ade..50cea82 100755
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/RegionClearStatsDistributedTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/RegionClearStatsDistributedTest.java
@@ -169,7 +169,7 @@ public class RegionClearStatsDistributedTest implements Serializable {
   }
 
   private void validateClearCountStat() {
-    assertThat(cacheRule.getCache().getCachePerfStats().getClearCount())
+    assertThat(cacheRule.getCache().getCachePerfStats().getRegionClearCount())
         .isEqualTo(EXPECTED_CLEAR_COUNT_STAT_VALUE);
   }
 }
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/ReplicateCacheListenerDistributedTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/ReplicateCacheListenerDistributedTest.java
index 3eedcef..dd229de 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/ReplicateCacheListenerDistributedTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/ReplicateCacheListenerDistributedTest.java
@@ -51,13 +51,15 @@ public class ReplicateCacheListenerDistributedTest implements Serializable {
   private static final String UPDATES = "UPDATES";
   private static final String INVALIDATES = "INVALIDATES";
   private static final String DESTROYS = "DESTROYS";
+  private static final String CLEAR = "CLEAR";
+  private static final String REGION_DESTROY = "REGION_DESTROY";
 
   private static final int ENTRY_VALUE = 0;
   private static final int UPDATED_ENTRY_VALUE = 1;
 
   private static final String KEY = "key-1";
 
-  private String regionName;
+  protected String regionName;
 
   @Rule
   public DistributedRule distributedRule = new DistributedRule();
@@ -82,6 +84,8 @@ public class ReplicateCacheListenerDistributedTest implements Serializable {
     distributedCounters.initialize(DESTROYS);
     distributedCounters.initialize(INVALIDATES);
     distributedCounters.initialize(UPDATES);
+    distributedCounters.initialize(CLEAR);
+    distributedCounters.initialize(REGION_DESTROY);
   }
 
   @Test
@@ -148,6 +152,36 @@ public class ReplicateCacheListenerDistributedTest implements Serializable {
     assertThat(distributedCounters.getTotal(DESTROYS)).isEqualTo(expectedDestroys());
   }
 
+  @Test
+  public void afterClearIsInvokedInEveryMember() {
+    CacheListener<String, Integer> listener = new ClearCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        createRegion(regionName, listener);
+      });
+    }
+
+    region.clear();
+
+    assertThat(distributedCounters.getTotal(CLEAR)).isEqualTo(expectedClears());
+  }
+
+  @Test
+  public void afterRegionDestroyIsInvokedInEveryMember() {
+    CacheListener<String, Integer> listener = new RegionDestroyCountingCacheListener();
+    Region<String, Integer> region = createRegion(regionName, listener);
+    for (int i = 0; i < getVMCount(); i++) {
+      getVM(i).invoke(() -> {
+        createRegion(regionName, listener);
+      });
+    }
+
+    region.destroyRegion();
+
+    assertThat(distributedCounters.getTotal(REGION_DESTROY)).isEqualTo(expectedRegionDestroys());
+  }
+
   protected Region<String, Integer> createRegion(final String name,
       final CacheListener<String, Integer> listener) {
     RegionFactory<String, Integer> regionFactory = cacheRule.getCache().createRegionFactory();
@@ -174,6 +208,14 @@ public class ReplicateCacheListenerDistributedTest implements Serializable {
     return getVMCount() + 1;
   }
 
+  protected int expectedClears() {
+    return getVMCount() + 1;
+  }
+
+  protected int expectedRegionDestroys() {
+    return getVMCount() + 1;
+  }
+
   /**
    * Overridden within tests to increment shared counters.
    */
@@ -283,7 +325,12 @@ public class ReplicateCacheListenerDistributedTest implements Serializable {
 
     @Override
     public void afterCreate(final EntryEvent<String, Integer> event) {
-      // ignore
+      distributedCounters.increment(CREATES);
+    }
+
+    @Override
+    public void afterUpdate(final EntryEvent<String, Integer> event) {
+      distributedCounters.increment(UPDATES);
     }
 
     @Override
@@ -302,4 +349,64 @@ public class ReplicateCacheListenerDistributedTest implements Serializable {
       errorCollector.checkThat(event.getNewValue(), nullValue());
     }
   }
+
+  protected class ClearCountingCacheListener extends BaseCacheListener {
+
+    @Override
+    public void afterCreate(final EntryEvent<String, Integer> event) {
+      distributedCounters.increment(CREATES);
+    }
+
+    @Override
+    public void afterUpdate(final EntryEvent<String, Integer> event) {
+      distributedCounters.increment(UPDATES);
+    }
+
+    @Override
+    public void afterRegionClear(RegionEvent<String, Integer> event) {
+
+      distributedCounters.increment(CLEAR);
+      if (!event.getRegion().getAttributes().getDataPolicy().withPartitioning()) {
+        if (event.isOriginRemote()) {
+          errorCollector.checkThat(event.getDistributedMember(),
+              not(cacheRule.getSystem().getDistributedMember()));
+        } else {
+          errorCollector.checkThat(event.getDistributedMember(),
+              equalTo(cacheRule.getSystem().getDistributedMember()));
+        }
+      }
+      errorCollector.checkThat(event.getOperation(), equalTo(Operation.REGION_CLEAR));
+      errorCollector.checkThat(event.getRegion().getName(), equalTo(regionName));
+    }
+  }
+
+  protected class RegionDestroyCountingCacheListener extends BaseCacheListener {
+
+    @Override
+    public void afterCreate(final EntryEvent<String, Integer> event) {
+      distributedCounters.increment(CREATES);
+    }
+
+    @Override
+    public void afterUpdate(final EntryEvent<String, Integer> event) {
+      distributedCounters.increment(UPDATES);
+    }
+
+    @Override
+    public void afterRegionDestroy(final RegionEvent<String, Integer> event) {
+      distributedCounters.increment(REGION_DESTROY);
+
+      if (!event.getRegion().getAttributes().getDataPolicy().withPartitioning()) {
+        if (event.isOriginRemote()) {
+          errorCollector.checkThat(event.getDistributedMember(),
+              not(cacheRule.getSystem().getDistributedMember()));
+        } else {
+          errorCollector.checkThat(event.getDistributedMember(),
+              equalTo(cacheRule.getSystem().getDistributedMember()));
+        }
+      }
+      errorCollector.checkThat(event.getOperation(), equalTo(Operation.REGION_DESTROY));
+      errorCollector.checkThat(event.getRegion().getName(), equalTo(regionName));
+    }
+  }
 }
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRClearCreateIndexDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRClearCreateIndexDUnitTest.java
new file mode 100644
index 0000000..1c94c2d
--- /dev/null
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRClearCreateIndexDUnitTest.java
@@ -0,0 +1,265 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.geode.cache.query.partitioned;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.Serializable;
+import java.util.stream.IntStream;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.client.ClientCache;
+import org.apache.geode.cache.client.ClientRegionShortcut;
+import org.apache.geode.cache.query.QueryService;
+import org.apache.geode.distributed.internal.ClusterDistributionManager;
+import org.apache.geode.distributed.internal.DistributionMessage;
+import org.apache.geode.distributed.internal.DistributionMessageObserver;
+import org.apache.geode.internal.cache.DistributedClearOperation;
+import org.apache.geode.internal.cache.DistributedClearOperation.ClearRegionMessage;
+import org.apache.geode.internal.cache.PartitionedRegionClearMessage;
+import org.apache.geode.test.dunit.AsyncInvocation;
+import org.apache.geode.test.dunit.rules.ClientVM;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+
+public class PRClearCreateIndexDUnitTest implements Serializable {
+  @Rule
+  public ClusterStartupRule cluster = new ClusterStartupRule(4, true);
+
+  private MemberVM primary, secondary;
+  private ClientVM client;
+
+  @Before
+  public void before() throws Exception {
+    int locatorPort = ClusterStartupRule.getDUnitLocatorPort();
+    primary = cluster.startServerVM(0, locatorPort);
+    secondary = cluster.startServerVM(1, locatorPort);
+
+    // create region on server1 first, making sure server1 has the primary bucket
+    primary.invoke(() -> {
+      DistributionMessageObserver.setInstance(new MessageObserver());
+      Region<Object, Object> region =
+          ClusterStartupRule.memberStarter.createPartitionRegion("regionA",
+              f -> f.setTotalNumBuckets(1).setRedundantCopies(1));
+      IntStream.range(0, 100).forEach(i -> region.put(i, "value" + i));
+    });
+
+    // server2 has the secondary bucket
+    secondary.invoke(() -> {
+      DistributionMessageObserver.setInstance(new MessageObserver());
+      ClusterStartupRule.memberStarter.createPartitionRegion("regionA",
+          f -> f.setTotalNumBuckets(1).setRedundantCopies(1));
+    });
+  }
+
+  @After
+  public void after() throws Exception {
+    primary.invoke(() -> {
+      DistributionMessageObserver.setInstance(null);
+    });
+    secondary.invoke(() -> {
+      DistributionMessageObserver.setInstance(null);
+    });
+  }
+
+  // All tests create index on secondary members. These tests are making sure we are requesting
+  // locks for clear on secondary members as well. If we create index on the primary, the clear
+  // and createIndex will run sequentially so there would be no error. But if we create index on
+  // the secondary member and if the secondary member will not
+  // request a lock for clear operation, it will result in an EntryDestroyedException when create
+  // index is happening.
+
+  // Note: OP_LOCK_FOR_CLEAR, OP_CLEAR, OP_UNLOCK_FOR_CLEAR are messages for secondary members
+  // OP_LOCK_FOR_PR_CLEAR, OP_UNLOCK_FOR_PR_CLEAR, OP_PR_CLEAR can be for anybody
+
+  @Test
+  // all local buckets are primary, so only OP_LOCK_FOR_CLEAR and OP_CLEAR messages are sent to the
+  // secondary member
+  // in the end an OP_PR_CLEAR is sent to the secondary for no effect
+  public void clearFromPrimaryMember() throws Exception {
+    AsyncInvocation createIndex = secondary.invokeAsync(PRClearCreateIndexDUnitTest::createIndex);
+    AsyncInvocation clear = primary.invokeAsync(PRClearCreateIndexDUnitTest::clear);
+
+    createIndex.get();
+    clear.get();
+
+    // assert that secondary member received these messages
+    primary.invoke(() -> verifyEvents(false, false, false, false));
+    secondary.invoke(() -> verifyEvents(false, true, true, true));
+  }
+
+  @Test
+  // all local buckets are secondary, so an OP_PR_CLEAR is sent to the primary member, from there
+  // a OP_LOCK_FOR_CLEAR and OP_CLEAR messages are sent back to the secondary
+  public void clearFromSecondaryMember() throws Exception {
+    AsyncInvocation createIndex = secondary.invokeAsync(PRClearCreateIndexDUnitTest::createIndex);
+    AsyncInvocation clear = secondary.invokeAsync(PRClearCreateIndexDUnitTest::clear);
+
+    createIndex.get();
+    clear.get();
+
+    // assert that secondary member received these messages
+    primary.invoke(() -> verifyEvents(false, true, false, false));
+    secondary.invoke(() -> verifyEvents(false, false, true, true));
+  }
+
+  /**
+   * For interested client connecting to secondary member
+   * 1. locks all local primary region
+   * 2. send OP_LOCK_FOR_PR_CLEAR to lock all other members
+   * 3. send OP_PR_CLEAR to primary to clear
+   * 4. primary will send a OP_CLEAR message back to the secondary to clear
+   */
+  @Test
+  public void clearFromInterestedClientConnectingToSecondaryMember() throws Exception {
+    int port = secondary.getPort();
+    client = cluster.startClientVM(2, c -> c.withServerConnection(port).withPoolSubscription(true));
+    AsyncInvocation createIndex = secondary.invokeAsync(PRClearCreateIndexDUnitTest::createIndex);
+
+    AsyncInvocation clear = client.invokeAsync(() -> {
+      Thread.sleep(200);
+      ClientCache clientCache = ClusterStartupRule.getClientCache();
+      Region<Object, Object> regionA =
+          clientCache.createClientRegionFactory(ClientRegionShortcut.PROXY).create("regionA");
+      regionA.registerInterestForAllKeys();
+      regionA.clear();
+    });
+
+    createIndex.get();
+    clear.get();
+    primary.invoke(() -> verifyEvents(true, true, false, false));
+    secondary.invoke(() -> verifyEvents(false, false, true, true));
+  }
+
+  @Test
+  /**
+   * For interested client connecting to primary member, behaves like starting from primary member
+   * except it locks first
+   * 1. locks local primary regions
+   * 2. send OP_LOCK_FOR_PR_CLEAR to lock all other members' primary buckets
+   * 3. send a OP_LOCK_FOR_CLEAR message to lock all secondary buckets
+   * 4. send OP_CLEAR to clear all secondary buckets
+   */
+  public void clearFromInterestedClientConnectingToPrimaryMember() throws Exception {
+    int port = primary.getPort();
+    client = cluster.startClientVM(2, c -> c.withServerConnection(port).withPoolSubscription(true));
+    AsyncInvocation createIndex = secondary.invokeAsync(PRClearCreateIndexDUnitTest::createIndex);
+
+    AsyncInvocation clear = client.invokeAsync(() -> {
+      Thread.sleep(200);
+      ClientCache clientCache = ClusterStartupRule.getClientCache();
+      Region<Object, Object> regionA =
+          clientCache.createClientRegionFactory(ClientRegionShortcut.PROXY).create("regionA");
+      regionA.registerInterestForAllKeys();
+      regionA.clear();
+    });
+
+    createIndex.get();
+    clear.get();
+    primary.invoke(() -> verifyEvents(false, false, false, false));
+    secondary.invoke(() -> verifyEvents(true, true, true, true));
+  }
+
+  private static void clear() throws InterruptedException {
+    // start the clear a bit later that the createIndex operation, to reveal the race condition
+    // comment it out since the test does not need the race condition to happen anymore
+    // Thread.sleep(200);
+    Region region = ClusterStartupRule.getCache().getRegion("/regionA");
+    region.clear();
+  }
+
+  private static void createIndex() {
+    QueryService queryService = ClusterStartupRule.getCache().getQueryService();
+    // run create index multiple times to make sure the clear operation fall inside a
+    // createIndex Operation
+    IntStream.range(0, 10).forEach(i -> {
+      try {
+        queryService.createIndex("index" + i, "name" + i, "/regionA");
+      } catch (Exception e) {
+        throw new RuntimeException(e.getMessage(), e);
+      }
+    });
+  }
+
+  private static void verifyEvents(boolean lockOthers, boolean clearOthers, boolean lockSecondary,
+      boolean clearSecondary) {
+    MessageObserver observer = (MessageObserver) DistributionMessageObserver.getInstance();
+    assertThat(observer.isLock_others())
+        .describedAs("OP_LOCK_FOR_PR_CLEAR received: %s", observer.isLock_others())
+        .isEqualTo(lockOthers);
+    assertThat(observer.isClear_others())
+        .describedAs("OP_PR_CLEAR received: %s", observer.isClear_others()).isEqualTo(clearOthers);
+    assertThat(observer.isLock_secondary())
+        .describedAs("OP_LOCK_FOR_CLEAR received: %s", observer.isLock_secondary())
+        .isEqualTo(lockSecondary);
+    assertThat(observer.isClear_secondary())
+        .describedAs("OP_CLEAR received: %s", observer.isClear_secondary())
+        .isEqualTo(clearSecondary);
+  }
+
+  private static class MessageObserver extends DistributionMessageObserver {
+    private volatile boolean lock_secondary = false;
+    private volatile boolean clear_secondary = false;
+    private volatile boolean clear_others = false;
+    private volatile boolean lock_others = false;
+
+    @Override
+    public void beforeProcessMessage(ClusterDistributionManager dm, DistributionMessage message) {
+      if (message instanceof ClearRegionMessage) {
+        ClearRegionMessage clearMessage = (ClearRegionMessage) message;
+        if (clearMessage
+            .getOperationType() == DistributedClearOperation.OperationType.OP_LOCK_FOR_CLEAR) {
+          lock_secondary = true;
+        }
+        if (clearMessage.getOperationType() == DistributedClearOperation.OperationType.OP_CLEAR) {
+          clear_secondary = true;
+        }
+      }
+      if (message instanceof PartitionedRegionClearMessage) {
+        PartitionedRegionClearMessage clearMessage = (PartitionedRegionClearMessage) message;
+        if (clearMessage
+            .getOp() == PartitionedRegionClearMessage.OperationType.OP_LOCK_FOR_PR_CLEAR) {
+          lock_others = true;
+        }
+        if (clearMessage.getOp() == PartitionedRegionClearMessage.OperationType.OP_PR_CLEAR) {
+          clear_others = true;
+        }
+      }
+    }
+
+    public boolean isLock_secondary() {
+      return lock_secondary;
+    }
+
+    public boolean isClear_secondary() {
+      return clear_secondary;
+    }
+
+    public boolean isClear_others() {
+      return clear_others;
+    }
+
+    public boolean isLock_others() {
+      return lock_others;
+    }
+  }
+
+}
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRClearQueryIndexDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRClearQueryIndexDUnitTest.java
new file mode 100644
index 0000000..feed3fc
--- /dev/null
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRClearQueryIndexDUnitTest.java
@@ -0,0 +1,376 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.geode.cache.query.partitioned;
+
+import static org.apache.geode.distributed.ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER;
+import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.apache.geode.test.junit.rules.VMProvider.invokeInEveryMember;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.IntStream;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.client.ClientCache;
+import org.apache.geode.cache.client.ServerOperationException;
+import org.apache.geode.cache.query.Index;
+import org.apache.geode.cache.query.IndexStatistics;
+import org.apache.geode.cache.query.Query;
+import org.apache.geode.cache.query.QueryService;
+import org.apache.geode.cache.query.SelectResults;
+import org.apache.geode.cache.query.data.City;
+import org.apache.geode.internal.cache.InternalCache;
+import org.apache.geode.test.dunit.AsyncInvocation;
+import org.apache.geode.test.dunit.DUnitBlackboard;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+import org.apache.geode.test.junit.rules.ClientCacheRule;
+import org.apache.geode.test.junit.rules.ExecutorServiceRule;
+
+public class PRClearQueryIndexDUnitTest {
+  public static final String MUMBAI_QUERY = "select * from /cities c where c.name = 'MUMBAI'";
+  public static final String ID_10_QUERY = "select * from /cities c where c.id = 10";
+  @ClassRule
+  public static ClusterStartupRule cluster = new ClusterStartupRule(4, true);
+
+  private static MemberVM server1;
+  private static MemberVM server2;
+
+  private static DUnitBlackboard blackboard;
+
+  @Rule
+  public ClientCacheRule clientCacheRule = new ClientCacheRule();
+
+  @Rule
+  public ExecutorServiceRule executor = ExecutorServiceRule.builder().build();
+
+  private ClientCache clientCache;
+  private Region cities;
+
+  // class test setup. set up the servers, regions and indexes on the servers
+  @BeforeClass
+  public static void beforeClass() {
+    int locatorPort = ClusterStartupRule.getDUnitLocatorPort();
+    server1 = cluster.startServerVM(1, s -> s.withConnectionToLocator(locatorPort)
+        .withProperty(SERIALIZABLE_OBJECT_FILTER, "org.apache.geode.cache.query.data.*")
+        .withRegion(RegionShortcut.PARTITION, "cities"));
+    server2 = cluster.startServerVM(2, s -> s.withConnectionToLocator(locatorPort)
+        .withProperty(SERIALIZABLE_OBJECT_FILTER, "org.apache.geode.cache.query.data.*")
+        .withRegion(RegionShortcut.PARTITION, "cities"));
+
+    server1.invoke(() -> {
+      Cache cache = ClusterStartupRule.getCache();
+      Region region = cache.getRegion("cities");
+      // create indexes
+      QueryService queryService = cache.getQueryService();
+      queryService.createKeyIndex("cityId", "c.id", "/cities c");
+      queryService.createIndex("cityName", "c.name", "/cities c");
+      assertThat(cache.getQueryService().getIndexes(region))
+          .extracting(Index::getName).containsExactlyInAnyOrder("cityId", "cityName");
+    });
+
+    server2.invoke(() -> {
+      Cache cache = ClusterStartupRule.getCache();
+      Region region = cache.getRegion("cities");
+      assertThat(cache.getQueryService().getIndexes(region))
+          .extracting(Index::getName).containsExactlyInAnyOrder("cityId", "cityName");
+    });
+  }
+
+  // before every test method, create the client cache and region
+  @Before
+  public void before() throws Exception {
+    int locatorPort = ClusterStartupRule.getDUnitLocatorPort();
+    clientCache = clientCacheRule.withLocatorConnection(locatorPort).createCache();
+    cities = clientCacheRule.createProxyRegion("cities");
+  }
+
+  @Test
+  public void clearOnEmptyRegion() throws Exception {
+    cities.clear();
+    invokeInEveryMember(() -> {
+      verifyIndexesAfterClear("cities", "cityId", "cityName");
+    }, server1, server2);
+
+    IntStream.range(0, 10).forEach(i -> cities.put(i, new City(i)));
+    cities.clear();
+    invokeInEveryMember(() -> {
+      verifyIndexesAfterClear("cities", "cityId", "cityName");
+    }, server1, server2);
+  }
+
+  @Test
+  public void createIndexWhileClear() throws Exception {
+    IntStream.range(0, 1000).forEach(i -> cities.put(i, new City(i)));
+
+    // create index while clear
+    AsyncInvocation createIndex = server1.invokeAsync("create index", () -> {
+      Cache cache = ClusterStartupRule.getCache();
+      QueryService queryService = cache.getQueryService();
+      Index cityZip = queryService.createIndex("cityZip", "c.zip", "/cities c");
+      assertThat(cityZip).isNotNull();
+    });
+
+    // do clear for 3 times at the same time to increease the concurrency of clear and createIndex
+    for (int i = 0; i < 3; i++) {
+      cities.clear();
+    }
+    createIndex.await();
+
+    invokeInEveryMember(() -> {
+      verifyIndexesAfterClear("cities", "cityId", "cityName");
+    }, server1, server2);
+
+    QueryService queryService = clientCache.getQueryService();
+    Query query =
+        queryService.newQuery("select * from /cities c where c.zip < " + (City.ZIP_START + 10));
+    assertThat(((SelectResults) query.execute()).size()).isEqualTo(0);
+
+    IntStream.range(0, 10).forEach(i -> cities.put(i, new City(i)));
+    assertThat(((SelectResults) query.execute()).size()).isEqualTo(10);
+  }
+
+  @Test
+  public void createIndexWhileClearOnReplicateRegion() throws Exception {
+    invokeInEveryMember(() -> {
+      Cache cache = ClusterStartupRule.getCache();
+      cache.createRegionFactory(RegionShortcut.PARTITION)
+          .create("replicateCities");
+    }, server1, server2);
+
+    Region replicateCities = clientCacheRule.createProxyRegion("replicateCities");
+    IntStream.range(0, 1000).forEach(i -> replicateCities.put(i, new City(i)));
+
+    // create index while clear
+    AsyncInvocation createIndex = server1.invokeAsync("create index on replicate regions", () -> {
+      Cache cache = ClusterStartupRule.getCache();
+      QueryService queryService = cache.getQueryService();
+      Index cityZip = queryService.createIndex("cityZip_replicate", "c.zip", "/replicateCities c");
+      assertThat(cityZip).isNotNull();
+    });
+
+    // do clear at the same time for 3 timese
+    for (int i = 0; i < 3; i++) {
+      replicateCities.clear();
+    }
+    createIndex.await();
+
+    invokeInEveryMember(() -> {
+      verifyIndexesAfterClear("replicateCities", "cityZip_replicate");
+    }, server1, server2);
+
+    QueryService queryService = clientCache.getQueryService();
+    Query query =
+        queryService
+            .newQuery("select * from /replicateCities c where c.zip < " + (City.ZIP_START + 10));
+    assertThat(((SelectResults) query.execute()).size()).isEqualTo(0);
+
+    IntStream.range(0, 10).forEach(i -> replicateCities.put(i, new City(i)));
+    assertThat(((SelectResults) query.execute()).size()).isEqualTo(10);
+  }
+
+  @Test
+  public void removeIndexWhileClear() throws Exception {
+    // create cityZip index
+    server1.invoke("create index", () -> {
+      Cache cache = ClusterStartupRule.getCache();
+      QueryService queryService = cache.getQueryService();
+      Index cityZip = queryService.createIndex("cityZip", "c.zip", "/cities c");
+      assertThat(cityZip).isNotNull();
+    });
+
+    // remove index while clear
+    // removeIndex has to be invoked on each server. It's not distributed
+    AsyncInvocation removeIndex1 = server1.invokeAsync("remove index",
+        PRClearQueryIndexDUnitTest::removeCityZipIndex);
+    AsyncInvocation removeIndex2 = server2.invokeAsync("remove index",
+        PRClearQueryIndexDUnitTest::removeCityZipIndex);
+
+    cities.clear();
+    removeIndex1.await();
+    removeIndex2.await();
+
+    // make sure removeIndex and clear operations are successful
+    invokeInEveryMember(() -> {
+      InternalCache internalCache = ClusterStartupRule.getCache();
+      QueryService qs = internalCache.getQueryService();
+      Region region = internalCache.getRegion("cities");
+      assertThat(region.size()).isEqualTo(0);
+      // verify only 2 indexes created in the beginning of the tests exist
+      assertThat(qs.getIndexes(region)).extracting(Index::getName)
+          .containsExactlyInAnyOrder("cityId", "cityName");
+    }, server1, server2);
+  }
+
+  private static void removeCityZipIndex() {
+    Cache cache = ClusterStartupRule.getCache();
+    QueryService qs = cache.getQueryService();
+    Region<Object, Object> region = cache.getRegion("cities");
+    Index cityZip = qs.getIndex(region, "cityZip");
+    if (cityZip != null) {
+      qs.removeIndex(cityZip);
+    }
+  }
+
+  @Test
+  public void verifyQuerySucceedsAfterClear() throws Exception {
+    // put in some data
+    IntStream.range(0, 100).forEach(i -> cities.put(i, new City(i)));
+
+    QueryService queryService = clientCache.getQueryService();
+    Query query = queryService.newQuery(MUMBAI_QUERY);
+    Query query2 = queryService.newQuery(ID_10_QUERY);
+    assertThat(((SelectResults) query.execute()).size()).isEqualTo(50);
+    assertThat(((SelectResults) query2.execute()).size()).isEqualTo(1);
+
+    cities.clear();
+    invokeInEveryMember(() -> {
+      verifyIndexesAfterClear("cities", "cityId", "cityName");
+    }, server1, server2);
+
+    assertThat(((SelectResults) query.execute()).size()).isEqualTo(0);
+    assertThat(((SelectResults) query2.execute()).size()).isEqualTo(0);
+  }
+
+  private static void verifyIndexesAfterClear(String regionName, String... indexes) {
+    InternalCache internalCache = ClusterStartupRule.getCache();
+    QueryService qs = internalCache.getQueryService();
+    Region region = internalCache.getRegion(regionName);
+    assertThat(region.size()).isEqualTo(0);
+    for (String indexName : indexes) {
+      Index index = qs.getIndex(region, indexName);
+      IndexStatistics statistics = index.getStatistics();
+      assertThat(statistics.getNumberOfKeys()).isEqualTo(0);
+      assertThat(statistics.getNumberOfValues()).isEqualTo(0);
+    }
+  }
+
+  @Test
+  public void concurrentClearAndQuery() {
+    QueryService queryService = clientCache.getQueryService();
+    Query query = queryService.newQuery(MUMBAI_QUERY);
+    Query query2 = queryService.newQuery(ID_10_QUERY);
+
+    IntStream.range(0, 100).forEach(i -> cities.put(i, new City(i)));
+
+    server1.invokeAsync(() -> {
+      Cache cache = ClusterStartupRule.getCache();
+      Region region = cache.getRegion("cities");
+      region.clear();
+    });
+
+    await().untilAsserted(() -> {
+      assertThat(((SelectResults) query.execute()).size()).isEqualTo(0);
+      assertThat(((SelectResults) query2.execute()).size()).isEqualTo(0);
+    });
+  }
+
+  @Test
+  public void concurrentClearAndPut() throws Exception {
+    AsyncInvocation puts = server1.invokeAsync(() -> {
+      Cache cache = ClusterStartupRule.getCache();
+      Region region = cache.getRegion("cities");
+      for (int i = 0; i < 1000; i++) {
+        // wait for gate to open
+        getBlackboard().waitForGate("proceedToPut", 60, TimeUnit.SECONDS);
+        region.put(i, new City(i));
+      }
+    });
+
+    AsyncInvocation clears = server2.invokeAsync(() -> {
+      Cache cache = ClusterStartupRule.getCache();
+      Region region = cache.getRegion("cities");
+      // do clear 10 times
+      for (int i = 0; i < 10; i++) {
+        try {
+          // don't allow put to proceed. It's like "close the gate"
+          getBlackboard().clearGate("proceedToPut");
+          region.clear();
+          verifyIndexesAfterClear("cities", "cityId", "cityName");
+        } finally {
+          // allow put to proceed. It's like "open the gate"
+          getBlackboard().signalGate("proceedToPut");
+        }
+      }
+    });
+
+    puts.await();
+    clears.await();
+  }
+
+  @Test
+  public void serverLeavingAndJoiningWhilePutAndClear() throws Exception {
+    int locatorPort = ClusterStartupRule.getDUnitLocatorPort();
+    Future<Void> startStopServer = executor.submit(() -> {
+      for (int i = 0; i < 3; i++) {
+        MemberVM server3 = cluster.startServerVM(3, s -> s.withConnectionToLocator(locatorPort)
+            .withProperty(SERIALIZABLE_OBJECT_FILTER, "org.apache.geode.cache.query.data.*")
+            .withRegion(RegionShortcut.PARTITION, "cities"));
+        server3.stop(false);
+      }
+    });
+
+    Future<Void> putAndClear = executor.submit(() -> {
+      for (int i = 0; i < 30; i++) {
+        IntStream.range(0, 100).forEach(j -> cities.put(j, new City(j)));
+        try {
+          cities.clear();
+
+          // only verify if clear is successful
+          QueryService queryService = clientCache.getQueryService();
+          Query query = queryService.newQuery(MUMBAI_QUERY);
+          Query query2 = queryService.newQuery(ID_10_QUERY);
+          assertThat(((SelectResults) query.execute()).size()).isEqualTo(0);
+          assertThat(((SelectResults) query2.execute()).size()).isEqualTo(0);
+        } catch (ServerOperationException e) {
+          assertThat(e.getCause().getMessage())
+              .contains("Unable to clear all the buckets from the partitioned region cities")
+              .contains("either data (buckets) moved or member departed");
+        }
+      }
+    });
+    startStopServer.get(60, TimeUnit.SECONDS);
+    putAndClear.get(60, TimeUnit.SECONDS);
+  }
+
+  private static DUnitBlackboard getBlackboard() {
+    if (blackboard == null) {
+      blackboard = new DUnitBlackboard();
+    }
+    return blackboard;
+  }
+
+  @After
+  public void tearDown() {
+    invokeInEveryMember(() -> {
+      if (blackboard != null) {
+        blackboard.clearGate("proceedToPut");
+      }
+      // remove the cityZip index
+      removeCityZipIndex();
+    }, server1, server2);
+  }
+}
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionAfterClearNotificationDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionAfterClearNotificationDUnitTest.java
new file mode 100644
index 0000000..237b6a8
--- /dev/null
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionAfterClearNotificationDUnitTest.java
@@ -0,0 +1,372 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.geode.test.dunit.rules.ClusterStartupRule.getCache;
+import static org.apache.geode.test.dunit.rules.ClusterStartupRule.getClientCache;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.Serializable;
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.IntStream;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.InterestResultPolicy;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionEvent;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.client.ClientRegionShortcut;
+import org.apache.geode.cache.server.CacheServer;
+import org.apache.geode.cache.util.CacheListenerAdapter;
+import org.apache.geode.distributed.internal.ClusterDistributionManager;
+import org.apache.geode.distributed.internal.DistributionMessage;
+import org.apache.geode.distributed.internal.DistributionMessageObserver;
+import org.apache.geode.test.awaitility.GeodeAwaitility;
+import org.apache.geode.test.dunit.AsyncInvocation;
+import org.apache.geode.test.dunit.DUnitBlackboard;
+import org.apache.geode.test.dunit.SerializableCallableIF;
+import org.apache.geode.test.dunit.rules.ClientVM;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+
+public class PartitionedRegionAfterClearNotificationDUnitTest implements Serializable {
+  protected static final String REGION_NAME = "testPR";
+  protected static final int NUM_ENTRIES = 100;
+
+  protected int locatorPort;
+  protected MemberVM locator;
+  protected MemberVM dataStore1;
+  protected MemberVM dataStore2;
+  protected MemberVM dataStore3;
+  protected MemberVM accessor;
+
+  protected ClientVM client1;
+  protected ClientVM client2;
+
+  private static volatile DUnitBlackboard blackboard;
+
+  @Rule
+  public ClusterStartupRule cluster = new ClusterStartupRule(7);
+
+  @Before
+  public void setUp() throws Exception {
+    locator = cluster.startLocatorVM(0);
+    locatorPort = locator.getPort();
+    dataStore1 = cluster.startServerVM(1, getProperties(), locatorPort);
+    dataStore2 = cluster.startServerVM(2, getProperties(), locatorPort);
+    dataStore3 = cluster.startServerVM(3, getProperties(), locatorPort);
+    accessor = cluster.startServerVM(4, getProperties(), locatorPort);
+
+    client1 = cluster.startClientVM(5,
+        c -> c.withPoolSubscription(true).withLocatorConnection((locatorPort)));
+    client2 = cluster.startClientVM(6,
+        c -> c.withPoolSubscription(true).withLocatorConnection((locatorPort)));
+
+    dataStore1.invoke(this::initDataStore);
+    dataStore2.invoke(this::initDataStore);
+    dataStore3.invoke(this::initDataStore);
+    accessor.invoke(this::initAccessor);
+
+    getBlackboard().initBlackboard();
+  }
+
+  protected RegionShortcut getRegionShortCut() {
+    return RegionShortcut.PARTITION_REDUNDANT;
+  }
+
+  protected Properties getProperties() {
+    Properties properties = new Properties();
+    return properties;
+  }
+
+  private Region getRegion(boolean isClient) {
+    if (isClient) {
+      return getClientCache().getRegion(REGION_NAME);
+    } else {
+      return getCache().getRegion(REGION_NAME);
+    }
+  }
+
+  private void verifyRegionSize(boolean isClient, int expectedNum) {
+    GeodeAwaitility.await()
+        .untilAsserted(() -> assertThat(getRegion(isClient).size()).isEqualTo(expectedNum));
+  }
+
+  private void initClientCache() {
+    Region region = getClientCache().createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
+        .create(REGION_NAME);
+    region.registerInterestForAllKeys(InterestResultPolicy.KEYS);
+  }
+
+  private void stopServers() {
+    List<CacheServer> cacheServers = getCache().getCacheServers();
+    for (CacheServer server : cacheServers) {
+      server.stop();
+    }
+  }
+
+  private void initDataStore() {
+    getCache().createRegionFactory(getRegionShortCut())
+        .setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(10).create())
+        .addCacheListener(new CountingCacheListener())
+        .create(REGION_NAME);
+  }
+
+  private void initAccessor() {
+    RegionShortcut shortcut = getRegionShortCut();
+    getCache().createRegionFactory(shortcut)
+        .setPartitionAttributes(
+            new PartitionAttributesFactory().setTotalNumBuckets(10).setLocalMaxMemory(0).create())
+        .addCacheListener(new CountingCacheListener())
+        .create(REGION_NAME);
+  }
+
+  private void feed(boolean isClient) {
+    Region region = getRegion(isClient);
+    IntStream.range(0, NUM_ENTRIES).forEach(i -> region.put(i, "value" + i));
+  }
+
+  private void verifyServerRegionSize(int expectedNum) {
+    accessor.invoke(() -> verifyRegionSize(false, expectedNum));
+    dataStore1.invoke(() -> verifyRegionSize(false, expectedNum));
+    dataStore2.invoke(() -> verifyRegionSize(false, expectedNum));
+    dataStore3.invoke(() -> verifyRegionSize(false, expectedNum));
+  }
+
+  private void verifyClientRegionSize(int expectedNum) {
+    client1.invoke(() -> verifyRegionSize(true, expectedNum));
+    client2.invoke(() -> verifyRegionSize(true, expectedNum));
+  }
+
+  private void verifyCacheListenerTriggerCount(MemberVM serverVM) {
+    SerializableCallableIF<Integer> getListenerTriggerCount = () -> {
+      CountingCacheListener countingCacheListener =
+          (CountingCacheListener) getRegion(false).getAttributes()
+              .getCacheListeners()[0];
+      return countingCacheListener.getClears();
+    };
+
+    int count = accessor.invoke(getListenerTriggerCount)
+        + dataStore1.invoke(getListenerTriggerCount)
+        + dataStore2.invoke(getListenerTriggerCount)
+        + dataStore3.invoke(getListenerTriggerCount);
+    assertThat(count).isEqualTo(4);
+
+    if (serverVM != null) {
+      assertThat(serverVM.invoke(getListenerTriggerCount)).isEqualTo(1);
+    }
+  }
+
+  @Test
+  public void invokeClearOnDataStoreAndVerifyListenerCount() {
+    accessor.invoke(() -> feed(false));
+    verifyServerRegionSize(NUM_ENTRIES);
+
+    dataStore1.invoke(() -> getRegion(false).clear());
+
+    verifyServerRegionSize(0);
+    verifyCacheListenerTriggerCount(dataStore1);
+  }
+
+  @Test
+  public void invokeClearOnAccessorAndVerifyListenerCount() {
+    accessor.invoke(() -> feed(false));
+    verifyServerRegionSize(NUM_ENTRIES);
+    accessor.invoke(() -> getRegion(false).clear());
+    verifyServerRegionSize(0);
+    verifyCacheListenerTriggerCount(accessor);
+  }
+
+  @Test
+  public void invokeClearFromClientAndVerifyListenerCount() {
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
+    client1.invoke(() -> feed(true));
+    verifyClientRegionSize(NUM_ENTRIES);
+    verifyServerRegionSize(NUM_ENTRIES);
+
+    client1.invoke(() -> getRegion(true).clear());
+
+    verifyServerRegionSize(0);
+    verifyClientRegionSize(0);
+    verifyCacheListenerTriggerCount(null);
+  }
+
+  @Test
+  public void invokeClearFromClientWithAccessorAsServer() {
+    dataStore1.invoke(this::stopServers);
+    dataStore2.invoke(this::stopServers);
+    dataStore3.invoke(this::stopServers);
+
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
+    client1.invoke(() -> feed(true));
+    verifyClientRegionSize(NUM_ENTRIES);
+    verifyServerRegionSize(NUM_ENTRIES);
+
+    client1.invoke(() -> getRegion(true).clear());
+
+    verifyServerRegionSize(0);
+    verifyClientRegionSize(0);
+    verifyCacheListenerTriggerCount(null);
+  }
+
+  @Test
+  public void invokeClearFromDataStoreWithClientInterest() {
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
+    accessor.invoke(() -> feed(false));
+    verifyServerRegionSize(NUM_ENTRIES);
+
+    dataStore1.invoke(() -> getRegion(false).clear());
+
+    verifyServerRegionSize(0);
+    verifyCacheListenerTriggerCount(dataStore1);
+  }
+
+  @Test(expected = AssertionError.class)
+  public void verifyTheLocksAreClearedWhenMemberDepartsAfterTakingClearLockOnRemoteMembers()
+      throws Exception {
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
+    accessor.invoke(() -> feed(false));
+    verifyServerRegionSize(NUM_ENTRIES);
+    dataStore2.invoke(() -> DistributionMessageObserver.setInstance(
+        testHookToKillMemberCallingClearBeforeMessageProcessed()));
+
+    AsyncInvocation ds1ClearAsync = dataStore1.invokeAsync(() -> getRegion(false).clear());
+
+    getBlackboard().waitForGate("CLOSE_CACHE", 30, SECONDS);
+
+    dataStore1.invoke(() -> getCache().close());
+    getBlackboard().signalGate("CACHE_CLOSED");
+
+    // This should not be blocked.
+    dataStore2.invoke(() -> feed(false));
+    dataStore3.invoke(() -> feed(false));
+
+    dataStore2.invoke(() -> verifyRegionSize(false, NUM_ENTRIES));
+    dataStore3.invoke(() -> verifyRegionSize(false, NUM_ENTRIES));
+
+    ds1ClearAsync.await();
+  }
+
+  @Test
+  public void verifyTheLocksAreClearedWhenMemberDepartsAfterTakingClearLockOnRemoteMembersAfterMessageProcessed()
+      throws Exception {
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
+    accessor.invoke(() -> feed(false));
+    verifyServerRegionSize(NUM_ENTRIES);
+
+    dataStore2.invoke(() -> DistributionMessageObserver.setInstance(
+        testHookToKillMemberCallingClearAfterMessageProcessed()));
+
+    AsyncInvocation ds1ClearAsync = dataStore1.invokeAsync(() -> getRegion(false).clear());
+
+    getBlackboard().waitForGate("CLOSE_CACHE", 30, SECONDS);
+
+    dataStore1.invoke(() -> getCache().close());
+    getBlackboard().signalGate("CACHE_CLOSED");
+
+    // This should not be blocked.
+    dataStore2.invoke(() -> feed(false));
+    dataStore3.invoke(() -> feed(false));
+
+    dataStore2.invoke(() -> verifyRegionSize(false, NUM_ENTRIES));
+    dataStore3.invoke(() -> verifyRegionSize(false, NUM_ENTRIES));
+
+    ds1ClearAsync.await();
+  }
+
+
+  private static class CountingCacheListener extends CacheListenerAdapter {
+    private final AtomicInteger clears = new AtomicInteger();
+
+    @Override
+    public void afterRegionClear(RegionEvent event) {
+      clears.incrementAndGet();
+    }
+
+    int getClears() {
+      return clears.get();
+
+    }
+  }
+
+  private DistributionMessageObserver testHookToKillMemberCallingClearBeforeMessageProcessed() {
+    return new DistributionMessageObserver() {
+
+      @Override
+      public void beforeProcessMessage(ClusterDistributionManager dm, DistributionMessage message) {
+        if (message instanceof PartitionedRegionClearMessage) {
+          if (((PartitionedRegionClearMessage) message)
+              .getOp() == PartitionedRegionClearMessage.OperationType.OP_LOCK_FOR_PR_CLEAR) {
+            DistributionMessageObserver.setInstance(null);
+            getBlackboard().signalGate("CLOSE_CACHE");
+            try {
+              getBlackboard().waitForGate("CACHE_CLOSED", 30, SECONDS);
+              GeodeAwaitility.await().untilAsserted(
+                  () -> assertThat(dm.isCurrentMember(message.getSender())).isFalse());
+            } catch (TimeoutException | InterruptedException e) {
+              throw new RuntimeException("Failed waiting for signal.");
+            }
+          }
+        }
+      }
+    };
+  }
+
+  private DistributionMessageObserver testHookToKillMemberCallingClearAfterMessageProcessed() {
+    return new DistributionMessageObserver() {
+      @Override
+      public void afterProcessMessage(ClusterDistributionManager dm, DistributionMessage message) {
+        if (message instanceof PartitionedRegionClearMessage) {
+          if (((PartitionedRegionClearMessage) message)
+              .getOp() == PartitionedRegionClearMessage.OperationType.OP_LOCK_FOR_PR_CLEAR) {
+            DistributionMessageObserver.setInstance(null);
+            getBlackboard().signalGate("CLOSE_CACHE");
+            try {
+              getBlackboard().waitForGate("CACHE_CLOSED", 30, SECONDS);
+            } catch (TimeoutException | InterruptedException e) {
+              throw new RuntimeException("Failed waiting for signal.");
+            }
+          }
+        }
+      }
+    };
+  }
+
+  private static DUnitBlackboard getBlackboard() {
+    if (blackboard == null) {
+      blackboard = new DUnitBlackboard();
+    }
+    return blackboard;
+  }
+
+}
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java
new file mode 100644
index 0000000..b871926
--- /dev/null
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearDUnitTest.java
@@ -0,0 +1,465 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.internal.Assert.fail;
+import static org.apache.geode.test.dunit.rules.ClusterStartupRule.getCache;
+import static org.apache.geode.test.dunit.rules.ClusterStartupRule.getClientCache;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.Properties;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.IntStream;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.CacheWriterException;
+import org.apache.geode.cache.InterestResultPolicy;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionEvent;
+import org.apache.geode.cache.RegionFactory;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.client.ClientRegionShortcut;
+import org.apache.geode.cache.util.CacheWriterAdapter;
+import org.apache.geode.test.dunit.SerializableCallableIF;
+import org.apache.geode.test.dunit.rules.ClientVM;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+
+public class PartitionedRegionClearDUnitTest implements Serializable {
+  protected static final String REGION_NAME = "testPR";
+  protected static final int TOTAL_BUCKET_NUM = 10;
+  protected static final int NUM_ENTRIES = 1000;
+
+  protected int locatorPort;
+  protected MemberVM locator;
+  protected MemberVM dataStore1, dataStore2, dataStore3, accessor;
+  protected ClientVM client1, client2;
+
+  private static final Logger logger = LogManager.getLogger();
+
+  @Rule
+  public ClusterStartupRule cluster = new ClusterStartupRule(7);
+
+  @Before
+  public void setUp() throws Exception {
+    locator = cluster.startLocatorVM(0);
+    locatorPort = locator.getPort();
+    dataStore1 = cluster.startServerVM(1, getProperties(), locatorPort);
+    dataStore2 = cluster.startServerVM(2, getProperties(), locatorPort);
+    dataStore3 = cluster.startServerVM(3, getProperties(), locatorPort);
+    accessor = cluster.startServerVM(4, getProperties(), locatorPort);
+    client1 = cluster.startClientVM(5,
+        c -> c.withPoolSubscription(true).withLocatorConnection((locatorPort)));
+    client2 = cluster.startClientVM(6,
+        c -> c.withPoolSubscription(true).withLocatorConnection((locatorPort)));
+  }
+
+  protected RegionShortcut getRegionShortCut() {
+    return RegionShortcut.PARTITION_REDUNDANT;
+  }
+
+  protected Properties getProperties() {
+    Properties properties = new Properties();
+    return properties;
+  }
+
+  private Region getRegion(boolean isClient) {
+    if (isClient) {
+      return getClientCache().getRegion(REGION_NAME);
+    } else {
+      return getCache().getRegion(REGION_NAME);
+    }
+  }
+
+  private void verifyRegionSize(boolean isClient, int expectedNum) {
+    assertThat(getRegion(isClient).size()).isEqualTo(expectedNum);
+  }
+
+  private void initClientCache() {
+    Region region = getClientCache().createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
+        .create(REGION_NAME);
+    region.registerInterestForAllKeys(InterestResultPolicy.KEYS);
+  }
+
+  private void initDataStore(boolean withWriter) {
+    RegionFactory factory = getCache().createRegionFactory(getRegionShortCut())
+        .setPartitionAttributes(
+            new PartitionAttributesFactory().setTotalNumBuckets(TOTAL_BUCKET_NUM).create());
+    if (withWriter) {
+      factory.setCacheWriter(new CountingCacheWriter());
+    }
+    factory.create(REGION_NAME);
+    clearsByRegion = new HashMap<>();
+    destroysByRegion = new HashMap<>();
+  }
+
+  private void initAccessor(boolean withWriter) {
+    RegionShortcut shortcut = getRegionShortCut();
+    if (shortcut.isPersistent()) {
+      if (shortcut == RegionShortcut.PARTITION_PERSISTENT) {
+        shortcut = RegionShortcut.PARTITION;
+      } else if (shortcut == RegionShortcut.PARTITION_PERSISTENT_OVERFLOW) {
+        shortcut = RegionShortcut.PARTITION_OVERFLOW;
+      } else if (shortcut == RegionShortcut.PARTITION_REDUNDANT_PERSISTENT) {
+        shortcut = RegionShortcut.PARTITION_REDUNDANT;
+      } else if (shortcut == RegionShortcut.PARTITION_REDUNDANT_PERSISTENT_OVERFLOW) {
+        shortcut = RegionShortcut.PARTITION_REDUNDANT_OVERFLOW;
+      } else {
+        fail("Wrong region type:" + shortcut);
+      }
+    }
+    RegionFactory factory = getCache().createRegionFactory(shortcut)
+        .setPartitionAttributes(
+            new PartitionAttributesFactory().setTotalNumBuckets(10).setLocalMaxMemory(0).create())
+        .setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(10).create());
+    if (withWriter) {
+      factory.setCacheWriter(new CountingCacheWriter());
+    }
+    factory.create(REGION_NAME);
+    clearsByRegion = new HashMap<>();
+    destroysByRegion = new HashMap<>();
+  }
+
+  private void feed(boolean isClient) {
+    Region region = getRegion(isClient);
+    IntStream.range(0, NUM_ENTRIES).forEach(i -> region.put(i, "value" + i));
+  }
+
+  private void verifyServerRegionSize(int expectedNum) {
+    accessor.invoke(() -> verifyRegionSize(false, expectedNum));
+    dataStore1.invoke(() -> verifyRegionSize(false, expectedNum));
+    dataStore2.invoke(() -> verifyRegionSize(false, expectedNum));
+    dataStore3.invoke(() -> verifyRegionSize(false, expectedNum));
+  }
+
+  private void verifyDatastoreStats(MemberVM datastore, boolean isCoordinator) {
+    datastore.invoke(() -> {
+      PartitionedRegion region = (PartitionedRegion) getRegion(false);
+      long clearCount = 0L;
+      int bucketCount = region.getDataStore().getAllLocalBucketRegions().size();
+
+      for (BucketRegion bucket : region.getDataStore().getAllLocalBucketRegions()) {
+        if (clearCount == 0) {
+          clearCount = bucket.getCachePerfStats().getBucketClearCount();
+        }
+        assertThat(bucket.getCachePerfStats().getBucketClearCount()).isEqualTo(bucketCount);
+      }
+
+      CachePerfStats stats = region.getRegionCachePerfStats();
+
+      assertThat(stats.getRegionClearCount()).isEqualTo(1);
+      assertThat(stats.getPartitionedRegionClearLocalDuration())
+          .isGreaterThan(0);
+      if (isCoordinator) {
+        assertThat(stats.getPartitionedRegionClearTotalDuration())
+            .isGreaterThan(0);
+      } else {
+        assertThat(stats.getPartitionedRegionClearTotalDuration())
+            .isEqualTo(0);
+      }
+    });
+  }
+
+  private void verifyClientRegionSize(int expectedNum) {
+    client1.invoke(() -> verifyRegionSize(true, expectedNum));
+    // TODO: notify register clients
+    // client2.invoke(()->verifyRegionSize(true, expectedNum));
+  }
+
+  SerializableCallableIF<Integer> getWriterClears = () -> {
+    int clears =
+        clearsByRegion.get(REGION_NAME) == null ? 0 : clearsByRegion.get(REGION_NAME).get();
+    return clears;
+  };
+
+  SerializableCallableIF<Integer> getWriterDestroys = () -> {
+    int destroys =
+        destroysByRegion.get(REGION_NAME) == null ? 0 : destroysByRegion.get(REGION_NAME).get();
+    return destroys;
+  };
+
+  SerializableCallableIF<Integer> getBucketRegionWriterClears = () -> {
+    int clears = 0;
+    for (int i = 0; i < TOTAL_BUCKET_NUM; i++) {
+      String bucketRegionName = "_B__" + REGION_NAME + "_" + i;
+      clears += clearsByRegion.get(bucketRegionName) == null ? 0
+          : clearsByRegion.get(bucketRegionName).get();
+    }
+    return clears;
+  };
+
+  SerializableCallableIF<Integer> getBucketRegionWriterDestroys = () -> {
+    int destroys = 0;
+    for (int i = 0; i < TOTAL_BUCKET_NUM; i++) {
+      String bucketRegionName = "_B__" + REGION_NAME + "_" + i;
+      destroys += destroysByRegion.get(bucketRegionName) == null ? 0
+          : destroysByRegion.get(bucketRegionName).get();
+    }
+    return destroys;
+  };
+
+  void configureServers(boolean dataStoreWithWriter, boolean accessorWithWriter) {
+    dataStore1.invoke(() -> initDataStore(dataStoreWithWriter));
+    dataStore2.invoke(() -> initDataStore(dataStoreWithWriter));
+    dataStore3.invoke(() -> initDataStore(dataStoreWithWriter));
+    accessor.invoke(() -> initAccessor(accessorWithWriter));
+    // make sure only datastore3 has cacheWriter
+    dataStore1.invoke(() -> {
+      Region region = getRegion(false);
+      region.getAttributesMutator().setCacheWriter(null);
+    });
+    dataStore2.invoke(() -> {
+      Region region = getRegion(false);
+      region.getAttributesMutator().setCacheWriter(null);
+    });
+  }
+
+  @Test
+  public void normalClearFromDataStoreWithWriterOnDataStore() {
+    configureServers(true, true);
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
+    accessor.invoke(() -> feed(false));
+    verifyServerRegionSize(NUM_ENTRIES);
+    dataStore3.invoke(() -> getRegion(false).clear());
+    verifyServerRegionSize(0);
+
+    // do the region destroy to compare that the same callbacks will be triggered
+    dataStore3.invoke(() -> {
+      Region region = getRegion(false);
+      region.destroyRegion();
+    });
+
+    assertThat(dataStore1.invoke(getWriterDestroys)).isEqualTo(dataStore1.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(dataStore2.invoke(getWriterDestroys)).isEqualTo(dataStore2.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(dataStore3.invoke(getWriterDestroys)).isEqualTo(dataStore3.invoke(getWriterClears))
+        .isEqualTo(1);
+    assertThat(accessor.invoke(getWriterDestroys)).isEqualTo(accessor.invoke(getWriterClears))
+        .isEqualTo(0);
+
+    assertThat(dataStore3.invoke(getBucketRegionWriterDestroys))
+        .isEqualTo(dataStore3.invoke(getBucketRegionWriterClears))
+        .isEqualTo(0);
+  }
+
+  @Test
+  public void normalClearFromDataStoreWithoutWriterOnDataStore() {
+    configureServers(false, true);
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
+    accessor.invoke(() -> feed(false));
+    verifyServerRegionSize(NUM_ENTRIES);
+    dataStore1.invoke(() -> getRegion(false).clear());
+    verifyServerRegionSize(0);
+
+    // do the region destroy to compare that the same callbacks will be triggered
+    dataStore1.invoke(() -> {
+      Region region = getRegion(false);
+      region.destroyRegion();
+    });
+
+    assertThat(dataStore1.invoke(getWriterDestroys)).isEqualTo(dataStore1.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(dataStore2.invoke(getWriterDestroys)).isEqualTo(dataStore2.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(dataStore3.invoke(getWriterDestroys)).isEqualTo(dataStore3.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(accessor.invoke(getWriterDestroys)).isEqualTo(accessor.invoke(getWriterClears))
+        .isEqualTo(1);
+
+    assertThat(accessor.invoke(getBucketRegionWriterDestroys))
+        .isEqualTo(accessor.invoke(getBucketRegionWriterClears))
+        .isEqualTo(0);
+  }
+
+  @Test
+  public void normalClearFromAccessorWithWriterOnDataStore() {
+    configureServers(true, true);
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
+    accessor.invoke(() -> feed(false));
+    verifyServerRegionSize(NUM_ENTRIES);
+    accessor.invoke(() -> getRegion(false).clear());
+    verifyServerRegionSize(0);
+
+    // do the region destroy to compare that the same callbacks will be triggered
+    accessor.invoke(() -> {
+      Region region = getRegion(false);
+      region.destroyRegion();
+    });
+
+    assertThat(dataStore1.invoke(getWriterDestroys)).isEqualTo(dataStore1.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(dataStore2.invoke(getWriterDestroys)).isEqualTo(dataStore2.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(dataStore3.invoke(getWriterDestroys)).isEqualTo(dataStore3.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(accessor.invoke(getWriterDestroys)).isEqualTo(accessor.invoke(getWriterClears))
+        .isEqualTo(1);
+
+    assertThat(accessor.invoke(getBucketRegionWriterDestroys))
+        .isEqualTo(accessor.invoke(getBucketRegionWriterClears))
+        .isEqualTo(0);
+  }
+
+  @Test
+  public void normalClearFromAccessorWithoutWriterButWithWriterOnDataStore() {
+    configureServers(true, false);
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
+    accessor.invoke(() -> feed(false));
+    verifyServerRegionSize(NUM_ENTRIES);
+    accessor.invoke(() -> getRegion(false).clear());
+    verifyServerRegionSize(0);
+
+    // do the region destroy to compare that the same callbacks will be triggered
+    accessor.invoke(() -> {
+      Region region = getRegion(false);
+      region.destroyRegion();
+    });
+
+    assertThat(dataStore1.invoke(getWriterDestroys)).isEqualTo(dataStore1.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(dataStore2.invoke(getWriterDestroys)).isEqualTo(dataStore2.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(dataStore3.invoke(getWriterDestroys)).isEqualTo(dataStore3.invoke(getWriterClears))
+        .isEqualTo(1);
+    assertThat(accessor.invoke(getWriterDestroys)).isEqualTo(accessor.invoke(getWriterClears))
+        .isEqualTo(0);
+
+    assertThat(dataStore3.invoke(getBucketRegionWriterDestroys))
+        .isEqualTo(dataStore3.invoke(getBucketRegionWriterClears))
+        .isEqualTo(0);
+  }
+
+  @Test
+  public void normalClearFromDataStoreUpdatesStats() {
+    configureServers(false, true);
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
+    // Verify no clears have been recorded in stats
+    dataStore1.invoke(() -> {
+      PartitionedRegion region = (PartitionedRegion) getRegion(false);
+
+      for (BucketRegion bucket : region.getDataStore().getAllLocalBucketRegions()) {
+        long clearCount = bucket.getCachePerfStats().getRegionClearCount();
+        assertThat(clearCount).isEqualTo(0);
+      }
+    });
+
+    accessor.invoke(() -> feed(false));
+    verifyServerRegionSize(NUM_ENTRIES);
+    dataStore1.invoke(() -> getRegion(false).clear());
+    verifyServerRegionSize(0);
+
+    // Verify the stats were properly updated for the bucket regions
+    verifyDatastoreStats(dataStore1, true);
+    verifyDatastoreStats(dataStore2, false);
+    verifyDatastoreStats(dataStore3, false);
+
+
+    // The accessor shouldn't increment the region clear count
+    accessor.invoke(() -> {
+      PartitionedRegion region = (PartitionedRegion) getRegion(false);
+
+      assertThat(region.getRegionCachePerfStats()).isNull();
+      assertThat(region.getCachePerfStats().getRegionClearCount()).isEqualTo(0);
+      assertThat(region.getCachePerfStats().getPartitionedRegionClearLocalDuration()).isEqualTo(0);
+      assertThat(region.getCachePerfStats().getPartitionedRegionClearTotalDuration()).isEqualTo(0);
+    });
+  }
+
+  @Test
+  public void normalClearFromClient() {
+    configureServers(true, false);
+    client1.invoke(this::initClientCache);
+    client2.invoke(this::initClientCache);
+
+    client1.invoke(() -> feed(true));
+    verifyClientRegionSize(NUM_ENTRIES);
+    verifyServerRegionSize(NUM_ENTRIES);
+
+    client1.invoke(() -> getRegion(true).clear());
+    verifyServerRegionSize(0);
+    verifyClientRegionSize(0);
+
+    // do the region destroy to compare that the same callbacks will be triggered
+    client1.invoke(() -> {
+      Region region = getRegion(true);
+      region.destroyRegion();
+    });
+
+    assertThat(dataStore1.invoke(getWriterDestroys)).isEqualTo(dataStore1.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(dataStore2.invoke(getWriterDestroys)).isEqualTo(dataStore2.invoke(getWriterClears))
+        .isEqualTo(0);
+    assertThat(dataStore3.invoke(getWriterDestroys)).isEqualTo(dataStore3.invoke(getWriterClears))
+        .isEqualTo(1);
+    assertThat(accessor.invoke(getWriterDestroys)).isEqualTo(accessor.invoke(getWriterClears))
+        .isEqualTo(0);
+
+    assertThat(dataStore3.invoke(getBucketRegionWriterDestroys))
+        .isEqualTo(dataStore3.invoke(getBucketRegionWriterClears))
+        .isEqualTo(0);
+  }
+
+  public static HashMap<String, AtomicInteger> clearsByRegion = new HashMap<>();
+  public static HashMap<String, AtomicInteger> destroysByRegion = new HashMap<>();
+
+  private static class CountingCacheWriter extends CacheWriterAdapter {
+    @Override
+    public void beforeRegionClear(RegionEvent event) throws CacheWriterException {
+      Region region = event.getRegion();
+      AtomicInteger clears = clearsByRegion.get(region.getName());
+      if (clears == null) {
+        clears = new AtomicInteger(1);
+        clearsByRegion.put(region.getName(), clears);
+      } else {
+        clears.incrementAndGet();
+      }
+      logger
+          .info("Region " + region.getName() + " will be cleared, clear count is:" + clears.get());
+    }
+
+    @Override
+    public void beforeRegionDestroy(RegionEvent event) throws CacheWriterException {
+      Region region = event.getRegion();
+      AtomicInteger destroys = destroysByRegion.get(region.getName());
+      if (destroys == null) {
+        destroys = new AtomicInteger(1);
+        destroysByRegion.put(region.getName(), destroys);
+      } else {
+        destroys.incrementAndGet();
+      }
+      logger.info(
+          "Region " + region.getName() + " will be destroyed, destroy count is:" + destroys.get());
+    }
+  }
+}
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithAlterRegionDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithAlterRegionDUnitTest.java
new file mode 100644
index 0000000..fb74eb3
--- /dev/null
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithAlterRegionDUnitTest.java
@@ -0,0 +1,803 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+import java.io.Serializable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeoutException;
+import java.util.stream.IntStream;
+
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.AttributesMutator;
+import org.apache.geode.cache.CacheListener;
+import org.apache.geode.cache.CacheLoader;
+import org.apache.geode.cache.CacheLoaderException;
+import org.apache.geode.cache.CacheWriter;
+import org.apache.geode.cache.CacheWriterException;
+import org.apache.geode.cache.EntryEvent;
+import org.apache.geode.cache.ExpirationAction;
+import org.apache.geode.cache.ExpirationAttributes;
+import org.apache.geode.cache.LoaderHelper;
+import org.apache.geode.cache.PartitionedRegionPartialClearException;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionEvent;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.distributed.internal.ClusterDistributionManager;
+import org.apache.geode.distributed.internal.DistributionMessage;
+import org.apache.geode.distributed.internal.DistributionMessageObserver;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.membership.api.MembershipManagerHelper;
+import org.apache.geode.test.dunit.AsyncInvocation;
+import org.apache.geode.test.dunit.DUnitBlackboard;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.rules.CacheRule;
+import org.apache.geode.test.dunit.rules.DistributedRule;
+import org.apache.geode.test.junit.rules.ExecutorServiceRule;
+
+public class PartitionedRegionClearWithAlterRegionDUnitTest implements Serializable {
+
+  @Rule
+  public DistributedRule distributedRule = new DistributedRule();
+
+  @Rule
+  public CacheRule cacheRule = new CacheRule();
+
+  @Rule
+  public ExecutorServiceRule executorServiceRule = new ExecutorServiceRule();
+
+  private VM server1;
+
+  private VM server2;
+
+  private VM server3;
+
+  private static volatile DUnitBlackboard blackboard;
+
+  private static final String REGION_NAME = "testRegion";
+
+  private static final int NUM_ENTRIES = 1000000;
+
+  private static final String GATE_NAME = "ALLOW_ALTER_REGION";
+
+  private void initialize() {
+    server1 = VM.getVM(0);
+    server2 = VM.getVM(1);
+
+    server1.invoke(() -> {
+      cacheRule.createCache();
+      cacheRule.getCache().createRegionFactory(RegionShortcut.PARTITION).setStatisticsEnabled(true)
+          .create(REGION_NAME);
+    });
+
+    server2.invoke(() -> {
+      cacheRule.createCache();
+      cacheRule.getCache().createRegionFactory(RegionShortcut.PARTITION).setStatisticsEnabled(true)
+          .create(REGION_NAME);
+    });
+
+    server1.invoke(() -> {
+      populateRegion();
+      Region region = cacheRule.getCache().getRegion(REGION_NAME);
+      assertThat(region.size()).isEqualTo(NUM_ENTRIES);
+    });
+
+    server2.invoke(() -> {
+      Region region = cacheRule.getCache().getRegion(REGION_NAME);
+      assertThat(region.size()).isEqualTo(NUM_ENTRIES);
+    });
+  }
+
+  @Test
+  public void testClearRegionWhileAddingCacheLoaderBeforeProcessMessage()
+      throws InterruptedException {
+    initialize();
+
+    server1.invoke(() -> DistributionMessageObserver.setInstance(
+        getDistributionMessageObserverBeforeProcessMessage()));
+
+    AsyncInvocation asyncInvocation1 = server1.invokeAsync(() -> {
+      alterRegionSetCacheLoader();
+    });
+
+    AsyncInvocation asyncInvocation2 = server2.invokeAsync(() -> {
+      cacheRule.getCache().getRegion(REGION_NAME).clear();
+      assertThat(cacheRule.getCache().getRegion(REGION_NAME).size()).isEqualTo(0);
+    });
+
+    asyncInvocation1.await();
+    asyncInvocation2.await();
+  }
+
+  @Test
+  public void testClearRegionWhileAddingCacheLoaderAfterProcessMessage()
+      throws InterruptedException {
+    initialize();
+
+    server1.invoke(() -> DistributionMessageObserver.setInstance(
+        getDistributionMessageObserverAfterProcessMessage()));
+
+    AsyncInvocation asyncInvocation1 = server1.invokeAsync(() -> {
+      alterRegionSetCacheLoader();
+    });
+
+    AsyncInvocation asyncInvocation2 = server2.invokeAsync(() -> {
+      cacheRule.getCache().getRegion(REGION_NAME).clear();
+      assertThat(cacheRule.getCache().getRegion(REGION_NAME).size()).isEqualTo(0);
+    });
+
+    asyncInvocation1.await();
+    asyncInvocation2.await();
+  }
+
+  @Test
+  public void testClearRegionWhileAddingCacheWriterBeforeProcessMessage()
+      throws InterruptedException {
+    initialize();
+
+    server1.invoke(() -> DistributionMessageObserver.setInstance(
+        getDistributionMessageObserverBeforeProcessMessage()));
+
+    AsyncInvocation asyncInvocation1 = server1.invokeAsync(() -> {
+      alterRegionSetCacheWriter();
+    });
+
+    AsyncInvocation asyncInvocation2 = server2.invokeAsync(() -> {
+      cacheRule.getCache().getRegion(REGION_NAME).clear();
+      assertThat(cacheRule.getCache().getRegion(REGION_NAME).size()).isEqualTo(0);
+    });
+
+    asyncInvocation1.await();
+    asyncInvocation2.await();
+  }
+
+  @Test
+  public void testClearRegionWhileAddingCacheWriterAfterProcessMessage()
+      throws InterruptedException {
+    initialize();
+
+    server1.invoke(() -> DistributionMessageObserver.setInstance(
+        getDistributionMessageObserverAfterProcessMessage()));
+
+    AsyncInvocation asyncInvocation1 = server1.invokeAsync(() -> {
+      alterRegionSetCacheWriter();
+    });
+
+    AsyncInvocation asyncInvocation2 = server2.invokeAsync(() -> {
+      cacheRule.getCache().getRegion(REGION_NAME).clear();
+      assertThat(cacheRule.getCache().getRegion(REGION_NAME).size()).isEqualTo(0);
+    });
+
+    asyncInvocation1.await();
+    asyncInvocation2.await();
+  }
+
+  @Test
+  public void testClearRegionWhileAddingCacheListenerBeforeProcessMessage()
+      throws InterruptedException {
+    initialize();
+
+    server1.invoke(() -> DistributionMessageObserver.setInstance(
+        getDistributionMessageObserverBeforeProcessMessage()));
+
+    AsyncInvocation asyncInvocation1 = server1.invokeAsync(() -> {
+      alterRegionSetCacheListener();
+    });
+
+    AsyncInvocation asyncInvocation2 = server2.invokeAsync(() -> {
+      cacheRule.getCache().getRegion(REGION_NAME).clear();
+      assertThat(cacheRule.getCache().getRegion(REGION_NAME).size()).isEqualTo(0);
+    });
+
+    asyncInvocation1.await();
+    asyncInvocation2.await();
+  }
+
+  @Test
+  public void testClearRegionWhileAddingCacheListenerAfterProcessMessage()
+      throws InterruptedException {
+    initialize();
+
+    server1.invoke(() -> DistributionMessageObserver.setInstance(
+        getDistributionMessageObserverAfterProcessMessage()));
+
+    AsyncInvocation asyncInvocation1 = server1.invokeAsync(() -> {
+      alterRegionSetCacheListener();
+    });
+
+    AsyncInvocation asyncInvocation2 = server2.invokeAsync(() -> {
+      cacheRule.getCache().getRegion(REGION_NAME).clear();
+      assertThat(cacheRule.getCache().getRegion(REGION_NAME).size()).isEqualTo(0);
+    });
+
+    asyncInvocation1.await();
+    asyncInvocation2.await();
+  }
+
+  @Test
+  public void testClearRegionWhileChangingEvictionBeforeProcessMessage()
+      throws InterruptedException {
+    initialize();
+
+    server1.invoke(() -> DistributionMessageObserver.setInstance(
+        getDistributionMessageObserverBeforeProcessMessage()));
+
+    AsyncInvocation asyncInvocation1 = server1.invokeAsync(() -> {
+      Region region = cacheRule.getCache().getRegion(REGION_NAME);
+      AttributesMutator attributesMutator = region.getAttributesMutator();
+      getBlackboard().waitForGate(GATE_NAME);
+      attributesMutator.getEvictionAttributesMutator().setMaximum(1);
+      assertThat(region.getAttributes().getEvictionAttributes().getMaximum()).isEqualTo(1);
+    });
+
+    AsyncInvocation asyncInvocation2 = server2.invokeAsync(() -> {
+      cacheRule.getCache().getRegion(REGION_NAME).clear();
+      assertThat(cacheRule.getCache().getRegion(REGION_NAME).size()).isEqualTo(0);
+    });
+
+    asyncInvocation1.await();
+    asyncInvocation2.await();
+  }
+
+  @Test
+  public void testClearRegionWhileChangingEvictionAfterProcessMessage()
+      throws InterruptedException {
+    initialize();
+
+    server1.invoke(() -> DistributionMessageObserver.setInstance(
+        getDistributionMessageObserverAfterProcessMessage()));
+
+    AsyncInvocation asyncInvocation1 = server1.invokeAsync(() -> {
+      Region region = cacheRule.getCache().getRegion(REGION_NAME);
+      AttributesMutator attributesMutator = region.getAttributesMutator();
+      getBlackboard().waitForGate(GATE_NAME);
+      attributesMutator.getEvictionAttributesMutator().setMaximum(1);
+      assertThat(region.getAttributes().getEvictionAttributes().getMaximum()).isEqualTo(1);
+    });
+
+    AsyncInvocation asyncInvocation2 = server2.invokeAsync(() -> {
+      cacheRule.getCache().getRegion(REGION_NAME).clear();
+      assertThat(cacheRule.getCache().getRegion(REGION_NAME).size()).isEqualTo(0);
+    });
+
+    asyncInvocation1.await();
+    asyncInvocation2.await();
+  }
+
+  @Test
+  public void testClearRegionWhileChangingRegionTTLExpirationBeforeProcessMessage()
+      throws InterruptedException {
+    initialize();
+
+    server1.invoke(() -> DistributionMessageObserver.setInstance(
+        getDistributionMessageObserverBeforeProcessMessage()));
+
+    AsyncInvocation asyncInvocation1 = server1.invokeAsync(() -> {
+      Region region = cacheRule.getCache().getRegion(REGION_NAME);
+      AttributesMutator attributesMutator = region.getAttributesMutator();
+      ExpirationAttributes expirationAttributes = new ExpirationAttributes();
+      getBlackboard().waitForGate(GATE_NAME);
+      attributesMutator.setRegionTimeToLive(expirationAttributes);
+      assertThat(region.getAttributes().getRegionTimeToLive()).isEqualTo(expirationAttributes);
+    });
+
+    AsyncInvocation asyncInvocation2 = server2.invokeAsync(() -> {
+      cacheRule.getCache().getRegion(REGION_NAME).clear();
+      assertThat(cacheRule.getCache().getRegion(REGION_NAME).size()).isEqualTo(0);
+    });
+
+    asyncInvocation1.await();
+    asyncInvocation2.await();
+  }
+
+  @Test
+  public void testClearRegionWhileChangingRegionTTLExpirationAfterProcessMessage()
+      throws InterruptedException {
+    initialize();
+
+    server1.invoke(() -> DistributionMessageObserver.setInstance(
+        getDistributionMessageObserverAfterProcessMessage()));
+
+    AsyncInvocation asyncInvocation1 = server1.invokeAsync(() -> {
+      Region region = cacheRule.getCache().getRegion(REGION_NAME);
+      AttributesMutator attributesMutator = region.getAttributesMutator();
+      ExpirationAttributes expirationAttributes = new ExpirationAttributes();
+      getBlackboard().waitForGate(GATE_NAME);
+      attributesMutator.setRegionTimeToLive(expirationAttributes);
+      assertThat(region.getAttributes().getRegionTimeToLive()).isEqualTo(expirationAttributes);
+    });
+
+    AsyncInvocation asyncInvocation2 = server2.invokeAsync(() -> {
+      cacheRule.getCache().getRegion(REGION_NAME).clear();
+      assertThat(cacheRule.getCache().getRegion(REGION_NAME).size()).isEqualTo(0);
+    });
+
+    asyncInvocation1.await();
+    asyncInvocation2.await();
+  }
+
+  @Test
+  public void testClearRegionWhileChangingEntryTTLExpirationBeforeProcessMessage()
+      throws InterruptedException {
+    initialize();
+
+    server1.invoke(() -> DistributionMessageObserver.setInstance(
+        getDistributionMessageObserverBeforeProcessMessage()));
+
+    AsyncInvocation asyncInvocation1 = server1.invokeAsync(() -> {
+      Region region = cacheRule.getCache().getRegion(REGION_NAME);
+      AttributesMutator attributesMutator = region.getAttributesMutator();
+      ExpirationAttributes expirationAttributes = new ExpirationAttributes();
+      getBlackboard().waitForGate(GATE_NAME);
+      attributesMutator.setEntryTimeToLive(expirationAttributes);
+      assertThat(region.getAttributes().getEntryTimeToLive()).isEqualTo(expirationAttributes);
+    });
+
+    AsyncInvocation asyncInvocation2 = server2.invokeAsync(() -> {
+      cacheRule.getCache().getRegion(REGION_NAME).clear();
+      assertThat(cacheRule.getCache().getRegion(REGION_NAME).size()).isEqualTo(0);
+    });
+
+    asyncInvocation1.await();
+    asyncInvocation2.await();
+  }
+
+
+  @Test
+  public void testClearRegionWhileChangingEntryTTLExpirationAfterProcessMessage()
+      throws InterruptedException {
+    initialize();
+
+    server1.invoke(() -> DistributionMessageObserver.setInstance(
+        getDistributionMessageObserverAfterProcessMessage()));
+
+    AsyncInvocation asyncInvocation1 = server1.invokeAsync(() -> {
+      Region region = cacheRule.getCache().getRegion(REGION_NAME);
+      AttributesMutator attributesMutator = region.getAttributesMutator();
+      ExpirationAttributes expirationAttributes = new ExpirationAttributes();
+      getBlackboard().waitForGate(GATE_NAME);
+      attributesMutator.setEntryTimeToLive(expirationAttributes);
+      assertThat(region.getAttributes().getEntryTimeToLive()).isEqualTo(expirationAttributes);
+    });
+
+    AsyncInvocation asyncInvocation2 = server2.invokeAsync(() -> {
+      cacheRule.getCache().getRegion(REGION_NAME).clear();
+      assertThat(cacheRule.getCache().getRegion(REGION_NAME).size()).isEqualTo(0);
+    });
+
+    asyncInvocation1.await();
+    asyncInvocation2.await();
+  }
+
+  @Test
+  public void testClearRegionWhileChangingRegionIdleExpirationBeforeProcessMessage()
+      throws InterruptedException {
+    initialize();
+
+    server1.invoke(() -> DistributionMessageObserver.setInstance(
+        getDistributionMessageObserverBeforeProcessMessage()));
+
+    AsyncInvocation asyncInvocation1 = server1.invokeAsync(() -> {
+      Region region = cacheRule.getCache().getRegion(REGION_NAME);
+      AttributesMutator attributesMutator = region.getAttributesMutator();
+      ExpirationAttributes expirationAttributes = new ExpirationAttributes();
+      getBlackboard().waitForGate(GATE_NAME);
+      attributesMutator.setRegionIdleTimeout(expirationAttributes);
+      assertThat(region.getAttributes().getRegionIdleTimeout()).isEqualTo(expirationAttributes);
+    });
+
+    AsyncInvocation asyncInvocation2 = server2.invokeAsync(() -> {
+      cacheRule.getCache().getRegion(REGION_NAME).clear();
+      assertThat(cacheRule.getCache().getRegion(REGION_NAME).size()).isEqualTo(0);
+    });
+
+    asyncInvocation1.await();
+    asyncInvocation2.await();
+  }
+
+  @Test
+  public void testClearRegionWhileChangingRegionIdleExpirationAfterProcessMessage()
+      throws InterruptedException {
+    initialize();
+
+    server1.invoke(() -> DistributionMessageObserver.setInstance(
+        getDistributionMessageObserverAfterProcessMessage()));
+
+    AsyncInvocation asyncInvocation1 = server1.invokeAsync(() -> {
+      Region region = cacheRule.getCache().getRegion(REGION_NAME);
+      AttributesMutator attributesMutator = region.getAttributesMutator();
+      ExpirationAttributes expirationAttributes = new ExpirationAttributes();
+      getBlackboard().waitForGate(GATE_NAME);
+      attributesMutator.setRegionIdleTimeout(expirationAttributes);
+      assertThat(region.getAttributes().getRegionIdleTimeout()).isEqualTo(expirationAttributes);
+    });
+
+    AsyncInvocation asyncInvocation2 = server2.invokeAsync(() -> {
+      cacheRule.getCache().getRegion(REGION_NAME).clear();
+      assertThat(cacheRule.getCache().getRegion(REGION_NAME).size()).isEqualTo(0);
+    });
+
+    asyncInvocation1.await();
+    asyncInvocation2.await();
+  }
+
+  @Test
+  @Ignore // See GEODE-8680
+  public void testClearRegionWhileChangingEntryIdleExpirationBeforeProcessMessage()
+      throws InterruptedException {
+    initialize();
+
+    server1.invoke(() -> DistributionMessageObserver.setInstance(
+        getDistributionMessageObserverBeforeProcessMessage()));
+
+    AsyncInvocation asyncInvocation1 = server1.invokeAsync(() -> {
+      Region region = cacheRule.getCache().getRegion(REGION_NAME);
+      AttributesMutator attributesMutator = region.getAttributesMutator();
+      ExpirationAttributes expirationAttributes =
+          new ExpirationAttributes(1, ExpirationAction.DESTROY);
+      getBlackboard().waitForGate(GATE_NAME);
+      attributesMutator.setEntryIdleTimeout(expirationAttributes);
+      assertThat(region.getAttributes().getEntryIdleTimeout()).isEqualTo(expirationAttributes);
+    });
+
+    AsyncInvocation asyncInvocation2 = server2.invokeAsync(() -> {
+      cacheRule.getCache().getRegion(REGION_NAME).clear();
+      assertThat(cacheRule.getCache().getRegion(REGION_NAME).size()).isEqualTo(0);
+    });
+
+    asyncInvocation1.await();
+    asyncInvocation2.await();
+  }
+
+  @Test
+  @Ignore // See GEODE-8680
+  public void testClearRegionWhileChangingEntryIdleExpirationAfterProcessMessage()
+      throws InterruptedException {
+    initialize();
+
+    server1.invoke(() -> DistributionMessageObserver.setInstance(
+        getDistributionMessageObserverAfterProcessMessage()));
+
+    AsyncInvocation asyncInvocation1 = server1.invokeAsync(() -> {
+      Region region = cacheRule.getCache().getRegion(REGION_NAME);
+      AttributesMutator attributesMutator = region.getAttributesMutator();
+      ExpirationAttributes expirationAttributes =
+          new ExpirationAttributes(1, ExpirationAction.DESTROY);
+      getBlackboard().waitForGate(GATE_NAME);
+      attributesMutator.setEntryIdleTimeout(expirationAttributes);
+      assertThat(region.getAttributes().getEntryIdleTimeout()).isEqualTo(expirationAttributes);
+    });
+
+    AsyncInvocation asyncInvocation2 = server2.invokeAsync(() -> {
+      cacheRule.getCache().getRegion(REGION_NAME).clear();
+      assertThat(cacheRule.getCache().getRegion(REGION_NAME).size()).isEqualTo(0);
+    });
+
+    asyncInvocation1.await();
+    asyncInvocation2.await();
+  }
+
+  @Test
+  public void testMemberLeaveBeforeProcessMessage() throws InterruptedException {
+    initialize();
+
+    server3 = VM.getVM(2);
+
+    server3.invoke(() -> {
+      cacheRule.createCache();
+      cacheRule.getCache().createRegionFactory(RegionShortcut.PARTITION).setStatisticsEnabled(true)
+          .create(REGION_NAME);
+      Region region = cacheRule.getCache().getRegion(REGION_NAME);
+      assertThat(region.size()).isEqualTo(NUM_ENTRIES);
+    });
+
+    server2.invoke(() -> {
+      DistributionMessageObserver
+          .setInstance(
+              new MemberKiller(false));
+    });
+
+    server3.invoke(() -> DistributionMessageObserver.setInstance(
+        getDistributionMessageObserverBeforeProcessMessage()));
+
+    AsyncInvocation asyncInvocation1 = server1.invokeAsync(() -> {
+      assertThatThrownBy(() -> cacheRule.getCache().getRegion(REGION_NAME).clear())
+          .isInstanceOf(PartitionedRegionPartialClearException.class);
+    });
+
+    AsyncInvocation asyncInvocation2 = server3.invokeAsync(() -> {
+      alterRegionSetCacheWriter();
+    });
+
+    asyncInvocation1.await();
+    asyncInvocation2.await();
+  }
+
+  @Test
+  public void testMemberLeaveAfterProcessMessage() throws InterruptedException {
+    initialize();
+
+    server3 = VM.getVM(2);
+
+    server3.invoke(() -> {
+      cacheRule.createCache();
+      cacheRule.getCache().createRegionFactory(RegionShortcut.PARTITION).setStatisticsEnabled(true)
+          .create(REGION_NAME);
+      Region region = cacheRule.getCache().getRegion(REGION_NAME);
+      assertThat(region.size()).isEqualTo(NUM_ENTRIES);
+    });
+
+    server2.invoke(() -> {
+      DistributionMessageObserver
+          .setInstance(
+              new MemberKiller(false));
+    });
+
+    server3.invoke(() -> DistributionMessageObserver.setInstance(
+        getDistributionMessageObserverAfterProcessMessage()));
+
+    AsyncInvocation asyncInvocation1 = server1.invokeAsync(() -> {
+      assertThatThrownBy(() -> cacheRule.getCache().getRegion(REGION_NAME).clear())
+          .isInstanceOf(PartitionedRegionPartialClearException.class);
+    });
+
+    AsyncInvocation asyncInvocation2 = server3.invokeAsync(() -> {
+      alterRegionSetCacheWriter();
+    });
+
+    asyncInvocation1.await();
+    asyncInvocation2.await();
+  }
+
+  @Test
+  public void testSingleServer() throws InterruptedException, ExecutionException {
+    cacheRule.createCache();
+    cacheRule.getCache().createRegionFactory(RegionShortcut.PARTITION).setStatisticsEnabled(true)
+        .create(REGION_NAME);
+    populateRegion();
+    Region region = cacheRule.getCache().getRegion(REGION_NAME);
+    assertThat(region.size()).isEqualTo(NUM_ENTRIES);
+
+    Future future1 = executorServiceRule.runAsync(() -> {
+      cacheRule.getCache().getRegion(REGION_NAME).clear();
+      assertThat(cacheRule.getCache().getRegion(REGION_NAME).size()).isEqualTo(0);
+    });
+
+    Future future2 = executorServiceRule.runAsync(() -> {
+      AttributesMutator attributesMutator = region.getAttributesMutator();
+      TestCacheLoader testCacheLoader = new TestCacheLoader();
+      attributesMutator.setCacheLoader(testCacheLoader);
+      assertThat(region.getAttributes().getCacheLoader()).isEqualTo(testCacheLoader);
+    });
+
+    future1.get();
+    future2.get();
+  }
+
+  private void populateRegion() {
+    Region region = cacheRule.getCache().getRegion(REGION_NAME);
+    IntStream.range(0, NUM_ENTRIES).forEach(i -> region.put(i, i));
+  }
+
+  private void alterRegionSetCacheLoader() throws TimeoutException, InterruptedException {
+    Region region = cacheRule.getCache().getRegion(REGION_NAME);
+    AttributesMutator attributesMutator = region.getAttributesMutator();
+    TestCacheLoader testCacheLoader = new TestCacheLoader();
+    getBlackboard().waitForGate(GATE_NAME);
+    attributesMutator.setCacheLoader(testCacheLoader);
+    assertThat(region.getAttributes().getCacheLoader()).isEqualTo(testCacheLoader);
+  }
+
+  private void alterRegionSetCacheWriter() throws TimeoutException, InterruptedException {
+    Region region = cacheRule.getCache().getRegion(REGION_NAME);
+    AttributesMutator attributesMutator = region.getAttributesMutator();
+    TestCacheWriter testCacheWriter = new TestCacheWriter();
+    getBlackboard().waitForGate(GATE_NAME);
+    attributesMutator.setCacheWriter(testCacheWriter);
+    assertThat(region.getAttributes().getCacheWriter()).isEqualTo(testCacheWriter);
+  }
+
+  private void alterRegionSetCacheListener() throws TimeoutException, InterruptedException {
+    Region region = cacheRule.getCache().getRegion(REGION_NAME);
+    AttributesMutator attributesMutator = region.getAttributesMutator();
+    TestCacheListener testCacheListener = new TestCacheListener();
+    getBlackboard().waitForGate(GATE_NAME);
+    attributesMutator.addCacheListener(testCacheListener);
+    assertThat(region.getAttributes().getCacheListeners()).contains(testCacheListener);
+  }
+
+  private class TestCacheLoader implements CacheLoader {
+
+    @Override
+    public Object load(LoaderHelper helper) throws CacheLoaderException {
+      return new Integer(NUM_ENTRIES);
+    }
+  }
+
+  private class TestCacheWriter implements CacheWriter {
+
+    @Override
+    public void beforeUpdate(EntryEvent event) throws CacheWriterException {
+
+    }
+
+    @Override
+    public void beforeCreate(EntryEvent event) throws CacheWriterException {
+
+    }
+
+    @Override
+    public void beforeDestroy(EntryEvent event) throws CacheWriterException {
+
+    }
+
+    @Override
+    public void beforeRegionDestroy(RegionEvent event) throws CacheWriterException {
+
+    }
+
+    @Override
+    public void beforeRegionClear(RegionEvent event) throws CacheWriterException {
+      System.out.println("beforeRegionClear");
+    }
+  }
+
+  private class TestCacheListener implements CacheListener {
+
+    @Override
+    public void afterCreate(EntryEvent event) {
+
+    }
+
+    @Override
+    public void afterUpdate(EntryEvent event) {
+
+    }
+
+    @Override
+    public void afterInvalidate(EntryEvent event) {
+
+    }
+
+    @Override
+    public void afterDestroy(EntryEvent event) {
+
+    }
+
+    @Override
+    public void afterRegionInvalidate(RegionEvent event) {
+
+    }
+
+    @Override
+    public void afterRegionDestroy(RegionEvent event) {
+
+    }
+
+    @Override
+    public void afterRegionClear(RegionEvent event) {
+      System.out.println("afterRegionClear");
+    }
+
+    @Override
+    public void afterRegionCreate(RegionEvent event) {
+
+    }
+
+    @Override
+    public void afterRegionLive(RegionEvent event) {
+
+    }
+  }
+
+  private static DUnitBlackboard getBlackboard() {
+    if (blackboard == null) {
+      blackboard = new DUnitBlackboard();
+    }
+    return blackboard;
+  }
+
+  private DistributionMessageObserver getDistributionMessageObserverBeforeProcessMessage() {
+    return new DistributionMessageObserver() {
+      @Override
+      public void beforeProcessMessage(ClusterDistributionManager dm, DistributionMessage message) {
+        super.beforeProcessMessage(dm, message);
+        if (message instanceof PartitionedRegionClearMessage) {
+          DistributionMessageObserver.setInstance(null);
+          getBlackboard().signalGate(GATE_NAME);
+        }
+      }
+    };
+  }
+
+  private DistributionMessageObserver getDistributionMessageObserverAfterProcessMessage() {
+    return new DistributionMessageObserver() {
+      @Override
+      public void afterProcessMessage(ClusterDistributionManager dm, DistributionMessage message) {
+        super.afterProcessMessage(dm, message);
+        if (message instanceof PartitionedRegionClearMessage) {
+          DistributionMessageObserver.setInstance(null);
+          getBlackboard().signalGate(GATE_NAME);
+        }
+      }
+    };
+  }
+
+  /**
+   * Shutdowns a coordinator member while the clear operation is in progress.
+   */
+  public static class MemberKiller extends DistributionMessageObserver {
+    private final boolean coordinator;
+
+    public MemberKiller(boolean coordinator) {
+      this.coordinator = coordinator;
+    }
+
+    /**
+     * Shutdowns the VM whenever the message is an instance of
+     * {@link PartitionedRegionClearMessage}.
+     */
+    private void shutdownMember(DistributionMessage message) {
+      if (message instanceof PartitionedRegionClearMessage) {
+        if (((PartitionedRegionClearMessage) message)
+            .getOp() == PartitionedRegionClearMessage.OperationType.OP_PR_CLEAR) {
+          DistributionMessageObserver.setInstance(null);
+          InternalDistributedSystem.getConnectedInstance().stopReconnectingNoDisconnect();
+          MembershipManagerHelper
+              .crashDistributedSystem(InternalDistributedSystem.getConnectedInstance());
+          await().untilAsserted(
+              () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNull());
+        }
+      }
+    }
+
+    /**
+     * Invoked only on clear coordinator VM.
+     *
+     * @param dm the distribution manager that received the message
+     * @param message The message itself
+     */
+    @Override
+    public void beforeSendMessage(ClusterDistributionManager dm, DistributionMessage message) {
+      if (coordinator) {
+        shutdownMember(message);
+      } else {
+        super.beforeSendMessage(dm, message);
+      }
+    }
+
+    /**
+     * Invoked only on non clear coordinator VM.
+     *
+     * @param dm the distribution manager that received the message
+     * @param message The message itself
+     */
+    @Override
+    public void beforeProcessMessage(ClusterDistributionManager dm, DistributionMessage message) {
+      if (!coordinator) {
+        shutdownMember(message);
+      } else {
+        super.beforeProcessMessage(dm, message);
+      }
+    }
+  }
+}
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java
new file mode 100644
index 0000000..fdb91c7
--- /dev/null
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java
@@ -0,0 +1,747 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.internal.util.ArrayUtils.asList;
+import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.apache.geode.test.dunit.VM.getVM;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+import java.io.Serializable;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.IntStream;
+
+import junitparams.JUnitParamsRunner;
+import junitparams.Parameters;
+import junitparams.naming.TestCaseName;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import org.apache.geode.ForcedDisconnectException;
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.PartitionAttributes;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.PartitionedRegionPartialClearException;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.partition.PartitionRegionHelper;
+import org.apache.geode.distributed.DistributedSystemDisconnectedException;
+import org.apache.geode.distributed.internal.ClusterDistributionManager;
+import org.apache.geode.distributed.internal.DMStats;
+import org.apache.geode.distributed.internal.DistributionMessage;
+import org.apache.geode.distributed.internal.DistributionMessageObserver;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.membership.api.MembershipManagerHelper;
+import org.apache.geode.internal.cache.versions.RegionVersionHolder;
+import org.apache.geode.internal.cache.versions.RegionVersionVector;
+import org.apache.geode.internal.cache.versions.VersionSource;
+import org.apache.geode.test.awaitility.GeodeAwaitility;
+import org.apache.geode.test.dunit.AsyncInvocation;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.rules.CacheRule;
+import org.apache.geode.test.dunit.rules.DistributedRule;
+
+/**
+ * Tests to verify that {@link PartitionedRegion#clear()} operation can be executed multiple times
+ * on the same region while other cache operations are being executed concurrently and members are
+ * added or removed.
+ */
+@RunWith(JUnitParamsRunner.class)
+public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements Serializable {
+  private static final Integer BUCKETS = 13;
+  private static final String REGION_NAME = "PartitionedRegion";
+  private static final String TEST_CASE_NAME =
+      "[{index}] {method}(Coordinator:{0}, RegionType:{1})";
+
+  @Rule
+  public DistributedRule distributedRule = new DistributedRule(3);
+
+  @Rule
+  public CacheRule cacheRule = CacheRule.builder().createCacheInAll().build();
+
+  private VM server1;
+  private VM server2;
+  private VM accessor;
+
+  private enum TestVM {
+    ACCESSOR(0), SERVER1(1), SERVER2(2);
+
+    final int vmNumber;
+
+    TestVM(int vmNumber) {
+      this.vmNumber = vmNumber;
+    }
+  }
+
+  static RegionShortcut[] regionTypes() {
+    return new RegionShortcut[] {
+        RegionShortcut.PARTITION, RegionShortcut.PARTITION_REDUNDANT
+    };
+  }
+
+  @SuppressWarnings("unused")
+  static TestVM[] coordinators() {
+    return new TestVM[] {
+        TestVM.SERVER1, TestVM.ACCESSOR
+    };
+  }
+
+  @SuppressWarnings("unused")
+  static Object[] coordinatorsAndRegionTypes() {
+    ArrayList<Object[]> parameters = new ArrayList<>();
+    RegionShortcut[] regionShortcuts = regionTypes();
+
+    Arrays.stream(regionShortcuts).forEach(regionShortcut -> {
+      parameters.add(new Object[] {TestVM.SERVER1, regionShortcut});
+      parameters.add(new Object[] {TestVM.ACCESSOR, regionShortcut});
+    });
+
+    return parameters.toArray();
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    server1 = getVM(TestVM.SERVER1.vmNumber);
+    server2 = getVM(TestVM.SERVER2.vmNumber);
+    accessor = getVM(TestVM.ACCESSOR.vmNumber);
+  }
+
+  private void initAccessor(RegionShortcut regionShortcut) {
+    PartitionAttributes<String, String> attrs = new PartitionAttributesFactory<String, String>()
+        .setTotalNumBuckets(BUCKETS)
+        .setLocalMaxMemory(0)
+        .create();
+
+    cacheRule.getCache().createRegionFactory(regionShortcut)
+        .setPartitionAttributes(attrs)
+        .create(REGION_NAME);
+
+  }
+
+  private void initDataStore(RegionShortcut regionShortcut) {
+    PartitionAttributes<String, String> attrs = new PartitionAttributesFactory<String, String>()
+        .setTotalNumBuckets(BUCKETS)
+        .create();
+
+    cacheRule.getCache().createRegionFactory(regionShortcut)
+        .setPartitionAttributes(attrs)
+        .create(REGION_NAME);
+  }
+
+  private void parametrizedSetup(RegionShortcut regionShortcut) {
+    server1.invoke(() -> initDataStore(regionShortcut));
+    server2.invoke(() -> initDataStore(regionShortcut));
+    accessor.invoke(() -> initAccessor(regionShortcut));
+  }
+
+  private void waitForSilence() {
+    DMStats dmStats = cacheRule.getSystem().getDistributionManager().getStats();
+    PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME);
+    PartitionedRegionStats partitionedRegionStats = region.getPrStats();
+
+    await().untilAsserted(() -> {
+      assertThat(dmStats.getReplyWaitsInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getVolunteeringInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getBucketCreatesInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getPrimaryTransfersInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getRebalanceBucketCreatesInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getRebalancePrimaryTransfersInProgress()).isEqualTo(0);
+    });
+  }
+
+  /**
+   * Populates the region and verifies the data on the selected VMs.
+   */
+  private void populateRegion(VM feeder, int entryCount, List<VM> vms) {
+    feeder.invoke(() -> {
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      IntStream.range(0, entryCount).forEach(i -> region.put(String.valueOf(i), "Value_" + i));
+    });
+
+    vms.forEach(vm -> vm.invoke(() -> {
+      waitForSilence();
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+
+      IntStream.range(0, entryCount)
+          .forEach(i -> assertThat(region.get(String.valueOf(i))).isEqualTo("Value_" + i));
+    }));
+  }
+
+  /**
+   * Asserts that the RegionVersionVectors for both buckets are consistent.
+   *
+   * @param bucketId Id of the bucket to compare.
+   * @param bucketDump1 First bucketDump.
+   * @param bucketDump2 Second bucketDump.
+   */
+  private void assertRegionVersionVectorsConsistency(int bucketId, BucketDump bucketDump1,
+      BucketDump bucketDump2) {
+    RegionVersionVector<?> rvv1 = bucketDump1.getRvv();
+    RegionVersionVector<?> rvv2 = bucketDump2.getRvv();
+
+    if (rvv1 == null) {
+      assertThat(rvv2)
+          .as("Bucket " + bucketId + " has an RVV on member " + bucketDump2.getMember()
+              + ", but does not on member " + bucketDump1.getMember())
+          .isNull();
+    }
+
+    if (rvv2 == null) {
+      assertThat(rvv1)
+          .as("Bucket " + bucketId + " has an RVV on member " + bucketDump1.getMember()
+              + ", but does not on member " + bucketDump2.getMember())
+          .isNull();
+    }
+
+    assertThat(rvv1).isNotNull();
+    assertThat(rvv2).isNotNull();
+    Map<VersionSource<?>, RegionVersionHolder<?>> rvv2Members =
+        new HashMap<>(rvv1.getMemberToVersion());
+    Map<VersionSource<?>, RegionVersionHolder<?>> rvv1Members =
+        new HashMap<>(rvv1.getMemberToVersion());
+    for (Map.Entry<VersionSource<?>, RegionVersionHolder<?>> entry : rvv1Members.entrySet()) {
+      VersionSource<?> memberId = entry.getKey();
+      RegionVersionHolder<?> versionHolder1 = entry.getValue();
+      RegionVersionHolder<?> versionHolder2 = rvv2Members.remove(memberId);
+      assertThat(versionHolder1)
+          .as("RegionVersionVector for bucket " + bucketId + " on member " + bucketDump1.getMember()
+              + " is not consistent with member " + bucketDump2.getMember())
+          .isEqualTo(versionHolder2);
+    }
+  }
+
+  /**
+   * Asserts that the region data is consistent across buckets.
+   */
+  private void assertRegionBucketsConsistency() throws ForceReattemptException {
+    List<BucketDump> bucketDumps;
+    PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME);
+    // Redundant copies + 1 primary.
+    int expectedCopies = region.getRedundantCopies() + 1;
+
+    for (int bId = 0; bId < BUCKETS; bId++) {
+      final int bucketId = bId;
+      bucketDumps = region.getAllBucketEntries(bucketId);
+      assertThat(bucketDumps.size())
+          .as("Bucket " + bucketId + " should have " + expectedCopies + " copies, but has "
+              + bucketDumps.size())
+          .isEqualTo(expectedCopies);
+
+      // Check that all copies of the bucket have the same data.
+      if (bucketDumps.size() > 1) {
+        BucketDump firstDump = bucketDumps.get(0);
+
+        for (int j = 1; j < bucketDumps.size(); j++) {
+          BucketDump otherDump = bucketDumps.get(j);
+          assertRegionVersionVectorsConsistency(bucketId, firstDump, otherDump);
+
+          await().untilAsserted(() -> assertThat(otherDump.getValues())
+              .as("Values for bucket " + bucketId + " on member " + otherDump.getMember()
+                  + " are not consistent with member " + firstDump.getMember())
+              .isEqualTo(firstDump.getValues()));
+
+          await().untilAsserted(() -> assertThat(otherDump.getVersions())
+              .as("Versions for bucket " + bucketId + " on member " + otherDump.getMember()
+                  + " are not consistent with member " + firstDump.getMember())
+              .isEqualTo(firstDump.getVersions()));
+        }
+      }
+    }
+  }
+
+  /**
+   * Continuously execute get operations on the PartitionedRegion for the given durationInMillis.
+   */
+  private void executeGets(final int numEntries, final long durationInMillis) {
+    Cache cache = cacheRule.getCache();
+    Region<String, String> region = cache.getRegion(REGION_NAME);
+    Instant finishTime = Instant.now().plusMillis(durationInMillis);
+
+    while (Instant.now().isBefore(finishTime)) {
+      // Region might have been cleared in between, that's why we check for null.
+      IntStream.range(0, numEntries).forEach(i -> {
+        Optional<String> nullableValue = Optional.ofNullable(region.get(String.valueOf(i)));
+        nullableValue.ifPresent(value -> assertThat(value).isEqualTo("Value_" + i));
+      });
+    }
+  }
+
+  /**
+   * Continuously execute put operations on the PartitionedRegion for the given durationInMillis.
+   */
+  private void executePuts(final int numEntries, final long durationInMillis) {
+    Cache cache = cacheRule.getCache();
+    Region<String, String> region = cache.getRegion(REGION_NAME);
+    Instant finishTime = Instant.now().plusMillis(durationInMillis);
+
+    while (Instant.now().isBefore(finishTime)) {
+      IntStream.range(0, numEntries).forEach(i -> region.put(String.valueOf(i), "Value_" + i));
+    }
+  }
+
+  /**
+   * Continuously execute putAll operations on the PartitionedRegion for the given
+   * durationInMillis.
+   */
+  private void executePutAlls(final int startKey, final int finalKey, final long durationInMillis) {
+    Cache cache = cacheRule.getCache();
+    Map<String, String> valuesToInsert = new HashMap<>();
+    Region<String, String> region = cache.getRegion(REGION_NAME);
+    IntStream.range(startKey, finalKey)
+        .forEach(i -> valuesToInsert.put(String.valueOf(i), "Value_" + i));
+    Instant finishTime = Instant.now().plusMillis(durationInMillis);
+
+    while (Instant.now().isBefore(finishTime)) {
+      region.putAll(valuesToInsert);
+    }
+  }
+
+  /**
+   * Continuously execute remove operations on the PartitionedRegion for the given
+   * durationInMillis.
+   */
+  private void executeRemoves(final int numEntries, final long durationInMillis) {
+    Cache cache = cacheRule.getCache();
+    Region<String, String> region = cache.getRegion(REGION_NAME);
+    Instant finishTime = Instant.now().plusMillis(durationInMillis);
+
+    while (Instant.now().isBefore(finishTime)) {
+      // Region might have been cleared in between, that's why we check for null.
+      IntStream.range(0, numEntries).forEach(i -> {
+        Optional<String> nullableValue = Optional.ofNullable(region.remove(String.valueOf(i)));
+        nullableValue.ifPresent(value -> assertThat(value).isEqualTo("Value_" + i));
+      });
+    }
+  }
+
+  /**
+   * Continuously execute removeAll operations on the PartitionedRegion for the given
+   * durationInMillis.
+   */
+  private void executeRemoveAlls(final int startKey, final int finalKey,
+      final long durationInMillis) {
+    Cache cache = cacheRule.getCache();
+    List<String> keysToRemove = new ArrayList<>();
+    Region<String, String> region = cache.getRegion(REGION_NAME);
+    IntStream.range(startKey, finalKey).forEach(i -> keysToRemove.add(String.valueOf(i)));
+    Instant finishTime = Instant.now().plusMillis(durationInMillis);
+
+    while (Instant.now().isBefore(finishTime)) {
+      region.removeAll(keysToRemove);
+    }
+  }
+
+  /**
+   * Execute the clear operation and retry until success.
+   */
+  private void executeClearWithRetry(VM coordinator) {
+    coordinator.invoke(() -> {
+      boolean retry;
+
+      do {
+        retry = false;
+
+        try {
+          cacheRule.getCache().getRegion(REGION_NAME).clear();
+        } catch (PartitionedRegionPartialClearException pce) {
+          retry = true;
+        }
+
+      } while (retry);
+    });
+  }
+
+  /**
+   * Continuously execute clear operations on the PartitionedRegion every periodInMillis for the
+   * given
+   * durationInMillis.
+   */
+  private void executeClears(final long durationInMillis, final long periodInMillis)
+      throws InterruptedException {
+    Cache cache = cacheRule.getCache();
+    AtomicLong invocationCount = new AtomicLong(0);
+    Region<String, String> region = cache.getRegion(REGION_NAME);
+    Long minimumInvocationCount = (durationInMillis / periodInMillis);
+    ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
+    ScheduledFuture<?> scheduledFuture = executor.scheduleWithFixedDelay(() -> {
+      region.clear();
+      invocationCount.incrementAndGet();
+    }, 0, periodInMillis, TimeUnit.MILLISECONDS);
+
+    await().untilAsserted(
+        () -> assertThat(invocationCount.get()).isGreaterThanOrEqualTo(minimumInvocationCount));
+    scheduledFuture.cancel(false);
+    executor.shutdown();
+    executor.awaitTermination(GeodeAwaitility.getTimeout().getSeconds(), TimeUnit.SECONDS);
+  }
+
+  /**
+   * The test does the following (clear coordinator and regionType are parametrized):
+   * - Launches one thread per VM to continuously execute removes, puts and gets for a given time.
+   * - Clears the Partition Region continuously every X milliseconds for a given time.
+   * - Asserts that, after the clears have finished, the Region Buckets are consistent across
+   * members.
+   */
+  @Test
+  @TestCaseName(TEST_CASE_NAME)
+  @Parameters(method = "coordinatorsAndRegionTypes")
+  public void clearWithConcurrentPutGetRemoveShouldWorkCorrectly(TestVM coordinatorVM,
+      RegionShortcut regionShortcut) throws InterruptedException {
+    final int entries = 15000;
+    final int workMillis = 60000;
+    parametrizedSetup(regionShortcut);
+
+    // Let all VMs continuously execute puts and gets for 60 seconds.
+    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
+        server1.invokeAsync(() -> executePuts(entries, workMillis)),
+        server2.invokeAsync(() -> executeGets(entries, workMillis)),
+        accessor.invokeAsync(() -> executeRemoves(entries, workMillis)));
+
+    // Clear the region every second for 60 seconds.
+    getVM(coordinatorVM.vmNumber).invoke(() -> executeClears(workMillis, 1000));
+
+    // Let asyncInvocations finish.
+    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
+      asyncInvocation.await();
+    }
+
+    // Assert Region Buckets are consistent.
+    asList(accessor, server1, server2).forEach(vm -> vm.invoke(this::waitForSilence));
+    accessor.invoke(this::assertRegionBucketsConsistency);
+  }
+
+  /**
+   * The test does the following (clear coordinator and regionType are parametrized):
+   * - Launches two threads per VM to continuously execute putAll and removeAll for a given time.
+   * - Clears the Partition Region continuously every X milliseconds for a given time.
+   * - Asserts that, after the clears have finished, the Region Buckets are consistent across
+   * members.
+   */
+  @Test
+  @TestCaseName(TEST_CASE_NAME)
+  @Parameters(method = "coordinatorsAndRegionTypes")
+  public void clearWithConcurrentPutAllRemoveAllShouldWorkCorrectly(TestVM coordinatorVM,
+      RegionShortcut regionShortcut) throws InterruptedException {
+    final int workMillis = 15000;
+    parametrizedSetup(regionShortcut);
+
+    // Let all VMs continuously execute putAll and removeAll for 15 seconds.
+    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
+        server1.invokeAsync(() -> executePutAlls(0, 2000, workMillis)),
+        server1.invokeAsync(() -> executeRemoveAlls(0, 2000, workMillis)),
+        server2.invokeAsync(() -> executePutAlls(2000, 4000, workMillis)),
+        server2.invokeAsync(() -> executeRemoveAlls(2000, 4000, workMillis)),
+        accessor.invokeAsync(() -> executePutAlls(4000, 6000, workMillis)),
+        accessor.invokeAsync(() -> executeRemoveAlls(4000, 6000, workMillis)));
+
+    // Clear the region every half second for 15 seconds.
+    getVM(coordinatorVM.vmNumber).invoke(() -> executeClears(workMillis, 500));
+
+    // Let asyncInvocations finish.
+    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
+      asyncInvocation.await();
+    }
+
+    // Assert Region Buckets are consistent.
+    asList(accessor, server1, server2).forEach(vm -> vm.invoke(this::waitForSilence));
+    accessor.invoke(this::assertRegionBucketsConsistency);
+  }
+
+  /**
+   * The test does the following (regionType is parametrized):
+   * - Populates the Partition Region.
+   * - Verifies that the entries are synchronized on all members.
+   * - Sets the {@link MemberKiller} as a {@link DistributionMessageObserver} to stop the
+   * coordinator VM while the clear is in progress.
+   * - Clears the Partition Region (at this point the coordinator is restarted).
+   * - Asserts that, after the member joins again, the Region Buckets are consistent.
+   */
+  @Test
+  @TestCaseName("[{index}] {method}(RegionType:{0})")
+  @Parameters(method = "regionTypes")
+  public void clearShouldFailWhenCoordinatorMemberIsBounced(RegionShortcut regionShortcut) {
+    final int entries = 1000;
+    parametrizedSetup(regionShortcut);
+    populateRegion(accessor, entries, asList(accessor, server1, server2));
+
+    // Set the CoordinatorMemberKiller and try to clear the region
+    server1.invoke(() -> {
+      DistributionMessageObserver.setInstance(new MemberKiller(true));
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      assertThatThrownBy(region::clear)
+          .isInstanceOf(DistributedSystemDisconnectedException.class)
+          .hasCauseInstanceOf(ForcedDisconnectException.class);
+    });
+
+    // Wait for member to get back online and assign all buckets.
+    server1.invoke(() -> {
+      cacheRule.createCache();
+      initDataStore(regionShortcut);
+      await().untilAsserted(
+          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
+      PartitionRegionHelper.assignBucketsToPartitions(cacheRule.getCache().getRegion(REGION_NAME));
+    });
+
+    // Assert Region Buckets are consistent.
+    asList(accessor, server1, server2).forEach(vm -> vm.invoke(this::waitForSilence));
+    accessor.invoke(this::assertRegionBucketsConsistency);
+  }
+
+  /**
+   * The test does the following (clear coordinator is chosen through parameters):
+   * - Populates the Partition Region.
+   * - Verifies that the entries are synchronized on all members.
+   * - Sets the {@link MemberKiller} as a {@link DistributionMessageObserver} to stop a
+   * non-coordinator VM while the clear is in progress (the member has primary buckets, though, so
+   * participates on the clear operation).
+   * - Launches two threads per VM to continuously execute gets, puts and removes for a given time.
+   * - Clears the Partition Region (at this point the non-coordinator is restarted).
+   * - Asserts that, after the clear has finished, the Region Buckets are consistent across members.
+   */
+  @Test
+  @Parameters(method = "coordinators")
+  @TestCaseName("[{index}] {method}(Coordinator:{0})")
+  public void clearOnRedundantPartitionRegionWithConcurrentPutGetRemoveShouldWorkCorrectlyWhenNonCoordinatorMembersAreBounced(
+      TestVM coordinatorVM) throws InterruptedException {
+    final int entries = 7500;
+    final int workMillis = 30000;
+    parametrizedSetup(RegionShortcut.PARTITION_REDUNDANT);
+    populateRegion(accessor, entries, asList(accessor, server1, server2));
+    server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
+
+    // Let all VMs (except the one to kill) continuously execute gets, put and removes for 30"
+    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
+        server1.invokeAsync(() -> executeGets(entries, workMillis)),
+        server1.invokeAsync(() -> executePuts(entries, workMillis)),
+        accessor.invokeAsync(() -> executeGets(entries, workMillis)),
+        accessor.invokeAsync(() -> executeRemoves(entries, workMillis)));
+
+    // Retry the clear operation on the region until success (server2 will go down, but other
+    // members will eventually become primary for those buckets previously hosted by server2).
+    executeClearWithRetry(getVM(coordinatorVM.vmNumber));
+
+    // Wait for member to get back online.
+    server2.invoke(() -> {
+      cacheRule.createCache();
+      initDataStore(RegionShortcut.PARTITION_REDUNDANT);
+      await().untilAsserted(
+          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
+    });
+
+    // Let asyncInvocations finish.
+    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
+      asyncInvocation.await();
+    }
+
+    // Assert Region Buckets are consistent.
+    asList(accessor, server1, server2).forEach(vm -> vm.invoke(this::waitForSilence));
+    accessor.invoke(this::assertRegionBucketsConsistency);
+  }
+
+  /**
+   * The test does the following (clear coordinator is chosen through parameters):
+   * - Populates the Partition Region.
+   * - Verifies that the entries are synchronized on all members.
+   * - Sets the {@link MemberKiller} as a {@link DistributionMessageObserver} to stop a
+   * non-coordinator VM while the clear is in progress (the member has primary buckets, though, so
+   * participates on the clear operation).
+   * - Launches two threads per VM to continuously execute gets, puts and removes for a given time.
+   * - Clears the Partition Region (at this point the non-coordinator is restarted).
+   * - Asserts that the clear operation failed with PartitionedRegionPartialClearException (primary
+   * buckets on the the restarted members are not available).
+   */
+  @Test
+  @Parameters(method = "coordinators")
+  @TestCaseName("[{index}] {method}(Coordinator:{0})")
+  public void clearOnNonRedundantPartitionRegionWithConcurrentPutGetRemoveShouldFailWhenNonCoordinatorMembersAreBounced(
+      TestVM coordinatorVM) throws InterruptedException {
+    final int entries = 7500;
+    final int workMillis = 30000;
+    parametrizedSetup(RegionShortcut.PARTITION);
+    populateRegion(accessor, entries, asList(accessor, server1, server2));
+    server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
+
+    // Let all VMs (except the one to kill) continuously execute gets, put and removes for 30"
+    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
+        server1.invokeAsync(() -> executeGets(entries, workMillis)),
+        server1.invokeAsync(() -> executePuts(entries, workMillis)),
+        accessor.invokeAsync(() -> executeGets(entries, workMillis)),
+        accessor.invokeAsync(() -> executeRemoves(entries, workMillis)));
+
+    // Clear the region.
+    getVM(coordinatorVM.vmNumber).invoke(() -> {
+      assertThatThrownBy(() -> cacheRule.getCache().getRegion(REGION_NAME).clear())
+          .isInstanceOf(PartitionedRegionPartialClearException.class);
+    });
+
+    // Let asyncInvocations finish.
+    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
+      asyncInvocation.await();
+    }
+  }
+
+  /**
+   * The test does the following (clear coordinator is chosen through parameters):
+   * - Sets the {@link MemberKiller} as a {@link DistributionMessageObserver} to stop a
+   * non-coordinator VM while the clear is in progress (the member has primary buckets, though, so
+   * participates on the clear operation).
+   * - Launches one thread per VM to continuously execute putAll/removeAll for a given time.
+   * - Clears the Partition Region (at this point the non-coordinator is restarted).
+   * - Asserts that, after the clear has finished, the Region Buckets are consistent across members.
+   */
+  @Test
+  @Parameters(method = "coordinators")
+  @TestCaseName("[{index}] {method}(Coordinator:{0})")
+  public void clearOnRedundantPartitionRegionWithConcurrentPutAllRemoveAllShouldWorkCorrectlyWhenNonCoordinatorMembersAreBounced(
+      TestVM coordinatorVM) throws InterruptedException {
+    final int workMillis = 30000;
+    parametrizedSetup(RegionShortcut.PARTITION_REDUNDANT);
+    server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
+
+    // Let all VMs continuously execute putAll/removeAll for 30 seconds.
+    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
+        server1.invokeAsync(() -> executePutAlls(0, 6000, workMillis)),
+        accessor.invokeAsync(() -> executeRemoveAlls(2000, 4000, workMillis)));
+
+    // Retry the clear operation on the region until success (server2 will go down, but other
+    // members will eventually become primary for those buckets previously hosted by server2).
+    executeClearWithRetry(getVM(coordinatorVM.vmNumber));
+
+    // Wait for member to get back online.
+    server2.invoke(() -> {
+      cacheRule.createCache();
+      initDataStore(RegionShortcut.PARTITION_REDUNDANT);
+      await().untilAsserted(
+          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
+    });
+
+    // Let asyncInvocations finish.
+    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
+      asyncInvocation.await();
+    }
+
+    // Assert Region Buckets are consistent.
+    asList(accessor, server1, server2).forEach(vm -> vm.invoke(this::waitForSilence));
+    accessor.invoke(this::assertRegionBucketsConsistency);
+  }
+
+  /**
+   * The test does the following (clear coordinator is chosen through parameters):
+   * - Sets the {@link MemberKiller} as a {@link DistributionMessageObserver} to stop a
+   * non-coordinator VM while the clear is in progress (the member has primary buckets, though, so
+   * participates on the clear operation).
+   * - Launches one thread per VM to continuously execute putAll/removeAll for a given time.
+   * - Clears the Partition Region (at this point the non-coordinator is restarted).
+   * - Asserts that the clear operation failed with PartitionedRegionPartialClearException (primary
+   * buckets on the the restarted members are not available).
+   */
+  @Test
+  @Parameters(method = "coordinators")
+  @TestCaseName("[{index}] {method}(Coordinator:{0})")
+  public void clearOnNonRedundantPartitionRegionWithConcurrentPutAllRemoveAllShouldFailWhenNonCoordinatorMembersAreBounced(
+      TestVM coordinatorVM) throws InterruptedException {
+    final int workMillis = 30000;
+    parametrizedSetup(RegionShortcut.PARTITION);
+    server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
+
+    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
+        server1.invokeAsync(() -> executePutAlls(0, 6000, workMillis)),
+        accessor.invokeAsync(() -> executeRemoveAlls(2000, 4000, workMillis)));
+
+    // Clear the region.
+    getVM(coordinatorVM.vmNumber).invoke(() -> {
+      assertThatThrownBy(() -> cacheRule.getCache().getRegion(REGION_NAME).clear())
+          .isInstanceOf(PartitionedRegionPartialClearException.class);
+    });
+
+    // Let asyncInvocations finish.
+    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
+      asyncInvocation.await();
+    }
+  }
+
+  /**
+   * Shutdowns a coordinator member while the clear operation is in progress.
+   */
+  public static class MemberKiller extends DistributionMessageObserver {
+    private final boolean coordinator;
+
+    public MemberKiller(boolean coordinator) {
+      this.coordinator = coordinator;
+    }
+
+    /**
+     * Shutdowns the VM whenever the message is an instance of
+     * {@link PartitionedRegionClearMessage}.
+     */
+    private void shutdownMember(DistributionMessage message) {
+      if (message instanceof PartitionedRegionClearMessage) {
+        if (((PartitionedRegionClearMessage) message)
+            .getOp() == PartitionedRegionClearMessage.OperationType.OP_PR_CLEAR) {
+          DistributionMessageObserver.setInstance(null);
+          InternalDistributedSystem.getConnectedInstance().stopReconnectingNoDisconnect();
+          MembershipManagerHelper
+              .crashDistributedSystem(InternalDistributedSystem.getConnectedInstance());
+          await().untilAsserted(
+              () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNull());
+        }
+      }
+    }
+
+    /**
+     * Invoked only on clear coordinator VM.
+     *
+     * @param dm the distribution manager that received the message
+     * @param message The message itself
+     */
+    @Override
+    public void beforeSendMessage(ClusterDistributionManager dm, DistributionMessage message) {
+      if (coordinator) {
+        shutdownMember(message);
+      } else {
+        super.beforeSendMessage(dm, message);
+      }
+    }
+
+    /**
+     * Invoked only on non clear coordinator VM.
+     *
+     * @param dm the distribution manager that received the message
+     * @param message The message itself
+     */
+    @Override
+    public void beforeProcessMessage(ClusterDistributionManager dm, DistributionMessage message) {
+      if (!coordinator) {
+        shutdownMember(message);
+      } else {
+        super.beforeProcessMessage(dm, message);
+      }
+    }
+  }
+}
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithExpirationDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithExpirationDUnitTest.java
new file mode 100644
index 0000000..dfc9470
--- /dev/null
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithExpirationDUnitTest.java
@@ -0,0 +1,501 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.cache.ExpirationAction.DESTROY;
+import static org.apache.geode.cache.RegionShortcut.PARTITION;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_OVERFLOW;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_REDUNDANT;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_REDUNDANT_OVERFLOW;
+import static org.apache.geode.internal.util.ArrayUtils.asList;
+import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.apache.geode.test.dunit.VM.getVM;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.IntStream;
+
+import junitparams.JUnitParamsRunner;
+import junitparams.Parameters;
+import junitparams.naming.TestCaseName;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import org.apache.geode.ForcedDisconnectException;
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.CacheWriter;
+import org.apache.geode.cache.CacheWriterException;
+import org.apache.geode.cache.ExpirationAttributes;
+import org.apache.geode.cache.PartitionAttributes;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.PartitionedRegionPartialClearException;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionEvent;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.partition.PartitionRegionHelper;
+import org.apache.geode.cache.util.CacheWriterAdapter;
+import org.apache.geode.distributed.DistributedSystemDisconnectedException;
+import org.apache.geode.distributed.internal.DMStats;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.membership.api.MembershipManagerHelper;
+import org.apache.geode.test.awaitility.GeodeAwaitility;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.rules.CacheRule;
+import org.apache.geode.test.dunit.rules.DistributedDiskDirRule;
+import org.apache.geode.test.dunit.rules.DistributedRule;
+
+/**
+ * Tests to verify that {@link PartitionedRegion#clear()} cancels all remaining expiration tasks
+ * on the {@link PartitionedRegion} once the operation is executed.
+ */
+@RunWith(JUnitParamsRunner.class)
+public class PartitionedRegionClearWithExpirationDUnitTest implements Serializable {
+  private static final Integer BUCKETS = 13;
+  private static final Integer EXPIRATION_TIME = 5 * 60;
+  private static final Integer SMALL_EXPIRATION_TIME = 10;
+  private static final String REGION_NAME = "PartitionedRegion";
+
+  @Rule
+  public DistributedRule distributedRule = new DistributedRule(3);
+
+  @Rule
+  public CacheRule cacheRule = CacheRule.builder().createCacheInAll().build();
+
+  @Rule
+  public DistributedDiskDirRule distributedDiskDirRule = new DistributedDiskDirRule();
+
+  private VM accessor, server1, server2;
+
+  private enum TestVM {
+    ACCESSOR(0), SERVER1(1), SERVER2(2);
+
+    final int vmNumber;
+
+    TestVM(int vmNumber) {
+      this.vmNumber = vmNumber;
+    }
+  }
+
+  @SuppressWarnings("unused")
+  static RegionShortcut[] regionTypes() {
+    return new RegionShortcut[] {
+        PARTITION,
+        PARTITION_OVERFLOW,
+        PARTITION_REDUNDANT,
+        PARTITION_REDUNDANT_OVERFLOW,
+    };
+  }
+
+  @SuppressWarnings("unused")
+  static Object[] vmsAndRegionTypes() {
+    ArrayList<Object[]> parameters = new ArrayList<>();
+    RegionShortcut[] regionShortcuts = regionTypes();
+
+    Arrays.stream(regionShortcuts).forEach(regionShortcut -> {
+      parameters.add(new Object[] {TestVM.SERVER1, regionShortcut});
+      parameters.add(new Object[] {TestVM.ACCESSOR, regionShortcut});
+    });
+
+    return parameters.toArray();
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    server1 = getVM(TestVM.SERVER1.vmNumber);
+    server2 = getVM(TestVM.SERVER2.vmNumber);
+    accessor = getVM(TestVM.ACCESSOR.vmNumber);
+  }
+
+  private void initAccessor(RegionShortcut regionShortcut,
+      ExpirationAttributes expirationAttributes) {
+    PartitionAttributes<String, String> attributes =
+        new PartitionAttributesFactory<String, String>()
+            .setTotalNumBuckets(BUCKETS)
+            .setLocalMaxMemory(0)
+            .create();
+
+    cacheRule.getCache()
+        .<String, String>createRegionFactory(regionShortcut)
+        .setPartitionAttributes(attributes)
+        .setEntryTimeToLive(expirationAttributes)
+        .setEntryIdleTimeout(expirationAttributes)
+        .create(REGION_NAME);
+  }
+
+  private void initDataStore(RegionShortcut regionShortcut,
+      ExpirationAttributes expirationAttributes) {
+    PartitionAttributes<String, String> attributes =
+        new PartitionAttributesFactory<String, String>()
+            .setTotalNumBuckets(BUCKETS)
+            .create();
+
+    cacheRule.getCache()
+        .<String, String>createRegionFactory(regionShortcut)
+        .setPartitionAttributes(attributes)
+        .setEntryTimeToLive(expirationAttributes)
+        .setEntryIdleTimeout(expirationAttributes)
+        .create(REGION_NAME);
+
+    ExpiryTask.expiryTaskListener = new ExpirationListener();
+  }
+
+  private void parametrizedSetup(RegionShortcut regionShortcut,
+      ExpirationAttributes expirationAttributes) {
+    server1.invoke(() -> initDataStore(regionShortcut, expirationAttributes));
+    server2.invoke(() -> initDataStore(regionShortcut, expirationAttributes));
+    accessor.invoke(() -> initAccessor(regionShortcut, expirationAttributes));
+  }
+
+  private void waitForSilence() {
+    DMStats dmStats = cacheRule.getSystem().getDistributionManager().getStats();
+    PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME);
+    PartitionedRegionStats partitionedRegionStats = region.getPrStats();
+
+    await().untilAsserted(() -> {
+      assertThat(dmStats.getReplyWaitsInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getVolunteeringInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getBucketCreatesInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getPrimaryTransfersInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getRebalanceBucketCreatesInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getRebalancePrimaryTransfersInProgress()).isEqualTo(0);
+    });
+  }
+
+  /**
+   * Populates the region and verifies the data on the selected VMs.
+   */
+  private void populateRegion(VM feeder, int entryCount, List<VM> vms) {
+    feeder.invoke(() -> {
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      IntStream.range(0, entryCount).forEach(i -> region.put(String.valueOf(i), "Value_" + i));
+    });
+
+    vms.forEach(vm -> vm.invoke(() -> {
+      waitForSilence();
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+
+      IntStream.range(0, entryCount)
+          .forEach(i -> assertThat(region.get(String.valueOf(i))).isEqualTo("Value_" + i));
+    }));
+  }
+
+  /**
+   * Asserts that the region is empty on requested VMs.
+   */
+  private void assertRegionIsEmpty(List<VM> vms) {
+    vms.forEach(vm -> vm.invoke(() -> {
+      waitForSilence();
+      PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME);
+
+      assertThat(region.getLocalSize()).isEqualTo(0);
+    }));
+  }
+
+  /**
+   * Asserts that the region data is consistent across buckets.
+   */
+  private void assertRegionBucketsConsistency() throws ForceReattemptException {
+    waitForSilence();
+    List<BucketDump> bucketDumps;
+    PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME);
+    // Redundant copies + 1 primary.
+    int expectedCopies = region.getRedundantCopies() + 1;
+
+    for (int bucketId = 0; bucketId < BUCKETS; bucketId++) {
+      bucketDumps = region.getAllBucketEntries(bucketId);
+      assertThat(bucketDumps.size()).as("Bucket " + bucketId + " should have " + expectedCopies
+          + " copies, but has " + bucketDumps.size()).isEqualTo(expectedCopies);
+
+      // Check that all copies of the bucket have the same data.
+      if (bucketDumps.size() > 1) {
+        BucketDump firstDump = bucketDumps.get(0);
+
+        for (int j = 1; j < bucketDumps.size(); j++) {
+          BucketDump otherDump = bucketDumps.get(j);
+          assertThat(otherDump.getValues())
+              .as("Values for bucket " + bucketId + " on member " + otherDump.getMember()
+                  + " are not consistent with member " + firstDump.getMember())
+              .isEqualTo(firstDump.getValues());
+          assertThat(otherDump.getVersions())
+              .as("Versions for bucket " + bucketId + " on member " + otherDump.getMember()
+                  + " are not consistent with member " + firstDump.getMember())
+              .isEqualTo(firstDump.getVersions());
+        }
+      }
+    }
+  }
+
+  /**
+   * Register the MemberKiller CacheWriter on the given vms.
+   */
+  private void registerVMKillerAsCacheWriter(List<VM> vmsToBounce) {
+    vmsToBounce.forEach(vm -> vm.invoke(() -> {
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      region.getAttributesMutator().setCacheWriter(new MemberKiller());
+    }));
+  }
+
+  private void doClear() {
+    Cache cache = cacheRule.getCache();
+    boolean retry;
+    do {
+      retry = false;
+      try {
+        cache.getRegion(REGION_NAME).clear();
+      } catch (PartitionedRegionPartialClearException | CacheWriterException ex) {
+        retry = true;
+      }
+    } while (retry);
+  }
+
+  /**
+   * The test does the following (clear coordinator and region type are parametrized):
+   * - Populates the Partition Region (entries have expiration).
+   * - Verifies that the entries are synchronized on all members.
+   * - Clears the Partition Region once.
+   * - Asserts that, after the clear is finished:
+   * . No expiration tasks were executed.
+   * . All expiration tasks were cancelled.
+   * . Map of expiry tasks per bucket is empty.
+   * . The Partition Region is empty on all members.
+   */
+  @Test
+  @Parameters(method = "vmsAndRegionTypes")
+  @TestCaseName("[{index}] {method}(Coordinator:{0}, RegionType:{1})")
+  public void clearShouldRemoveRegisteredExpirationTasks(TestVM coordinatorVM,
+      RegionShortcut regionShortcut) {
+    final int entries = 500;
+    int expirationTime = (int) GeodeAwaitility.getTimeout().getSeconds();
+    parametrizedSetup(regionShortcut, new ExpirationAttributes(expirationTime, DESTROY));
+    populateRegion(accessor, entries, asList(accessor, server1, server2));
+
+    // Clear the region.
+    getVM(coordinatorVM.vmNumber).invoke(() -> doClear());
+
+    // Assert all expiration tasks were cancelled and none were executed.
+    asList(server1, server2).forEach(vm -> vm.invoke(() -> {
+      ExpirationListener listener = (ExpirationListener) EntryExpiryTask.expiryTaskListener;
+      assertThat(listener.tasksRan.get()).isEqualTo(0);
+      assertThat(listener.tasksCanceled.get()).isEqualTo(listener.tasksScheduled.get());
+
+      PartitionedRegionDataStore dataStore =
+          ((PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME)).getDataStore();
+      Set<BucketRegion> bucketRegions = dataStore.getAllLocalBucketRegions();
+      bucketRegions
+          .forEach(bucketRegion -> assertThat(bucketRegion.entryExpiryTasks.isEmpty()).isTrue());
+    }));
+
+    // Assert Region Buckets are consistent and region is empty,
+    accessor.invoke(this::assertRegionBucketsConsistency);
+    assertRegionIsEmpty(asList(accessor, server1, server2));
+  }
+
+  /**
+   * The test does the following (region type is parametrized):
+   * - Populates the Partition Region (entries have expiration).
+   * - Verifies that the entries are synchronized on all members.
+   * - Sets the {@link MemberKiller} as a {@link CacheWriter} to stop the coordinator VM while the
+   * clear is in progress.
+   * - Clears the Partition Region (at this point the coordinator is restarted).
+   * - Asserts that, after the clear is finished and the expiration time is reached:
+   * . No expiration tasks were cancelled.
+   * . All entries were removed due to the expiration.
+   * . The Partition Region Buckets are consistent on all members.
+   */
+  @Test
+  @Parameters(method = "regionTypes")
+  @TestCaseName("[{index}] {method}(RegionType:{0})")
+  public void clearShouldFailWhenCoordinatorMemberIsBouncedAndExpirationTasksShouldSurvive(
+      RegionShortcut regionShortcut) {
+    final int entries = 1000;
+    ExpirationAttributes expirationAttributes =
+        new ExpirationAttributes(SMALL_EXPIRATION_TIME, DESTROY);
+    parametrizedSetup(regionShortcut, expirationAttributes);
+    populateRegion(accessor, entries, asList(accessor, server1, server2));
+    registerVMKillerAsCacheWriter(Collections.singletonList(server1));
+
+    // Clear the region (it should fail).
+    server1.invoke(() -> {
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      assertThatThrownBy(region::clear)
+          .isInstanceOf(DistributedSystemDisconnectedException.class)
+          .hasCauseInstanceOf(ForcedDisconnectException.class);
+    });
+
+    // Wait for member to get back online and assign all buckets.
+    server1.invoke(() -> {
+      cacheRule.createCache();
+      initDataStore(regionShortcut, expirationAttributes);
+      await().untilAsserted(
+          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
+      PartitionRegionHelper.assignBucketsToPartitions(cacheRule.getCache().getRegion(REGION_NAME));
+    });
+
+    // Wait until all expiration tasks are executed.
+    asList(server1, server2).forEach(vm -> vm.invoke(() -> {
+      PartitionedRegionDataStore dataStore =
+          ((PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME)).getDataStore();
+      Set<BucketRegion> bucketRegions = dataStore.getAllLocalBucketRegions();
+      bucketRegions.forEach(bucketRegion -> await()
+          .untilAsserted(() -> assertThat(bucketRegion.entryExpiryTasks.isEmpty()).isTrue()));
+    }));
+
+    // At this point the entries should be either invalidated or destroyed (expiration tasks ran).
+    asList(accessor, server1, server2).forEach(vm -> vm.invoke(() -> {
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      IntStream.range(0, entries).forEach(i -> {
+        String key = String.valueOf(i);
+        assertThat(region.get(key)).isNull();
+      });
+    }));
+
+    // Assert Region Buckets are consistent.
+    accessor.invoke(this::assertRegionBucketsConsistency);
+  }
+
+  /**
+   * The test does the following (clear coordinator and region type are parametrized):
+   * - Populates the Partition Region (entries have expiration).
+   * - Verifies that the entries are synchronized on all members.
+   * - Sets the {@link MemberKiller} as a {@link CacheWriter} to stop a non-coordinator VM while the
+   * clear is in progress (the member has primary buckets, though, so participates on
+   * the clear operation).
+   * - Clears the Partition Region (at this point the non-coordinator is restarted).
+   * - Asserts that, after the clear is finished:
+   * . No expiration tasks were executed on the non-restarted members.
+   * . All expiration tasks were cancelled on the non-restarted members.
+   * . Map of expiry tasks per bucket is empty on the non-restarted members.
+   * . All expiration tasks were executed and all expired on the restarted members.
+   * . The Partition Region is empty and buckets are consistent across all members.
+   */
+  @Test
+  @Parameters(method = "vmsAndRegionTypes")
+  @TestCaseName("[{index}] {method}(Coordinator:{0}, RegionType:{1})")
+  public void clearShouldSucceedAndRemoveRegisteredExpirationTasksWhenNonCoordinatorMemberIsBounced(
+      TestVM coordinatorVM, RegionShortcut regionShortcut) {
+    final int entries = 500;
+
+    RegionShortcut rs = regionShortcut;
+    ExpirationAttributes expirationAttributes = new ExpirationAttributes(EXPIRATION_TIME, DESTROY);
+    parametrizedSetup(regionShortcut, expirationAttributes);
+    registerVMKillerAsCacheWriter(Collections.singletonList(server2));
+    populateRegion(accessor, entries, asList(accessor, server1, server2));
+
+    // Clear the region.
+    getVM(coordinatorVM.vmNumber).invoke(() -> doClear());
+
+    // Wait for member to get back online and assign buckets.
+    server2.invoke(() -> {
+      cacheRule.createCache();
+      initDataStore(rs, expirationAttributes);
+      await().untilAsserted(
+          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
+      PartitionRegionHelper.assignBucketsToPartitions(cacheRule.getCache().getRegion(REGION_NAME));
+    });
+
+    // Assert all expiration tasks were cancelled and none were executed (surviving members).
+    server1.invoke(() -> {
+      PartitionedRegionDataStore dataStore =
+          ((PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME)).getDataStore();
+      Set<BucketRegion> bucketRegions = dataStore.getAllLocalBucketRegions();
+      bucketRegions
+          .forEach(bucketRegion -> assertThat(bucketRegion.entryExpiryTasks.isEmpty()).isTrue());
+
+      ExpirationListener listener = (ExpirationListener) EntryExpiryTask.expiryTaskListener;
+      assertThat(listener.tasksRan.get()).isEqualTo(0);
+      assertThat(listener.tasksCanceled.get()).isEqualTo(listener.tasksScheduled.get());
+    });
+
+    // Assert all expiration tasks were expired as the region is empty (restarted member).
+    server2.invoke(() -> {
+      PartitionedRegionDataStore dataStore =
+          ((PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME)).getDataStore();
+      Set<BucketRegion> bucketRegions = dataStore.getAllLocalBucketRegions();
+
+      // During restart, the member loads the region from disk and automatically registers
+      // expiration tasks for each entry. After GII, however, the region is empty due to the
+      // clear operation and the tasks will just expire as there are no entries.
+      bucketRegions.forEach(bucketRegion -> await()
+          .untilAsserted(() -> assertThat(bucketRegion.entryExpiryTasks.isEmpty()).isTrue()));
+
+      ExpirationListener listener = (ExpirationListener) EntryExpiryTask.expiryTaskListener;
+      assertThat(listener.tasksExpired.get()).isEqualTo(listener.tasksRan.get());
+    });
+
+    // Assert Region Buckets are consistent and region is empty,
+    accessor.invoke(this::assertRegionBucketsConsistency);
+    assertRegionIsEmpty(asList(accessor, server1, server2));
+  }
+
+  /**
+   * Tracks expiration tasks lifecycle.
+   */
+  public static class ExpirationListener implements ExpiryTask.ExpiryTaskListener {
+    final AtomicInteger tasksRan = new AtomicInteger(0);
+    final AtomicInteger tasksExpired = new AtomicInteger(0);
+    final AtomicInteger tasksCanceled = new AtomicInteger(0);
+    final AtomicInteger tasksScheduled = new AtomicInteger(0);
+
+    @Override
+    public void afterSchedule(ExpiryTask et) {
+      tasksScheduled.incrementAndGet();
+    }
+
+    @Override
+    public void afterTaskRan(ExpiryTask et) {
+      tasksRan.incrementAndGet();
+    }
+
+    @Override
+    public void afterReschedule(ExpiryTask et) {}
+
+    @Override
+    public void afterExpire(ExpiryTask et) {
+      tasksExpired.incrementAndGet();
+    }
+
+    @Override
+    public void afterCancel(ExpiryTask et) {
+      tasksCanceled.incrementAndGet();
+    }
+  }
+
+  /**
+   * Shutdowns a member while the clear operation is in progress.
+   * The writer is only installed on the member the test wants to shutdown, doesn't matter whether
+   * it's the clear coordinator or another member holding primary buckets.
+   */
+  public static class MemberKiller extends CacheWriterAdapter<String, String> {
+
+    @Override
+    public synchronized void beforeRegionClear(RegionEvent<String, String> event)
+        throws CacheWriterException {
+      InternalDistributedSystem.getConnectedInstance().stopReconnectingNoDisconnect();
+      MembershipManagerHelper.crashDistributedSystem(
+          InternalDistributedSystem.getConnectedInstance());
+      await().untilAsserted(
+          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNull());
+    }
+  }
+}
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithRebalanceDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithRebalanceDUnitTest.java
new file mode 100644
index 0000000..f53fab7
--- /dev/null
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithRebalanceDUnitTest.java
@@ -0,0 +1,578 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.cache.PartitionAttributesFactory.GLOBAL_MAX_BUCKETS_DEFAULT;
+import static org.apache.geode.cache.RegionShortcut.PARTITION;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_REDUNDANT;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_REDUNDANT_PERSISTENT;
+import static org.apache.geode.internal.util.ArrayUtils.asList;
+import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.apache.geode.test.dunit.VM.getVM;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.Serializable;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.stream.IntStream;
+
+import junitparams.JUnitParamsRunner;
+import junitparams.Parameters;
+import junitparams.naming.TestCaseName;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import org.apache.geode.cache.CacheWriterException;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionEvent;
+import org.apache.geode.cache.RegionFactory;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.control.RebalanceFactory;
+import org.apache.geode.cache.control.RebalanceOperation;
+import org.apache.geode.cache.control.RebalanceResults;
+import org.apache.geode.cache.util.CacheWriterAdapter;
+import org.apache.geode.distributed.internal.DMStats;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.membership.api.MembershipManagerHelper;
+import org.apache.geode.test.awaitility.GeodeAwaitility;
+import org.apache.geode.test.dunit.AsyncInvocation;
+import org.apache.geode.test.dunit.DUnitBlackboard;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.rules.CacheRule;
+import org.apache.geode.test.dunit.rules.DistributedDiskDirRule;
+import org.apache.geode.test.dunit.rules.DistributedRule;
+
+@RunWith(JUnitParamsRunner.class)
+public class PartitionedRegionClearWithRebalanceDUnitTest implements Serializable {
+  private static final long serialVersionUID = -7183993832801073933L;
+
+  private static final Integer BUCKETS = GLOBAL_MAX_BUCKETS_DEFAULT;
+  private static final String REGION_NAME = "testRegion";
+  private static final String COLOCATED_REGION = "childColocatedRegion";
+  private static final int ENTRIES = 10000;
+  private static final String DISK_STORE_SUFFIX = "DiskStore";
+  private static final String REBALANCE_HAS_BEGUN = "rebalance-begun";
+  private static final String CLEAR_HAS_BEGUN = "clear-begun";
+
+  @Rule
+  public DistributedRule distributedRule = new DistributedRule(4);
+
+  @Rule
+  public CacheRule cacheRule = CacheRule.builder().createCacheInAll().build();
+
+  @Rule
+  public DistributedDiskDirRule distributedDiskDirRule = new DistributedDiskDirRule();
+
+  private static transient DUnitBlackboard blackboard;
+
+  private VM accessor;
+  private VM server1;
+  private VM server2;
+  private VM server3;
+
+  private enum TestVM {
+    ACCESSOR(0), SERVER1(1), SERVER2(2), SERVER3(3);
+
+    final int vmNumber;
+
+    TestVM(int vmNumber) {
+      this.vmNumber = vmNumber;
+    }
+  }
+
+  @SuppressWarnings("unused")
+  static Object[] coordinatorVMsAndRegionTypes() {
+    return new Object[] {
+        // {ClearCoordinatorVM, regionShortcut}
+        new Object[] {TestVM.SERVER1, PARTITION_REDUNDANT},
+        new Object[] {TestVM.ACCESSOR, PARTITION_REDUNDANT},
+        new Object[] {TestVM.SERVER1, PARTITION_REDUNDANT_PERSISTENT},
+        new Object[] {TestVM.ACCESSOR, PARTITION_REDUNDANT_PERSISTENT}
+    };
+  }
+
+  @SuppressWarnings("unused")
+  static Object[] coordinatorVMsAndRegionTypesNoAccessor() {
+    return new Object[] {
+        // {ClearCoordinatorVM, regionShortcut}
+        new Object[] {TestVM.SERVER1, PARTITION_REDUNDANT},
+        new Object[] {TestVM.SERVER2, PARTITION_REDUNDANT},
+        new Object[] {TestVM.SERVER1, PARTITION_REDUNDANT_PERSISTENT},
+        new Object[] {TestVM.SERVER2, PARTITION_REDUNDANT_PERSISTENT}
+    };
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    getBlackboard().initBlackboard();
+    server1 = getVM(TestVM.SERVER1.vmNumber);
+    server2 = getVM(TestVM.SERVER2.vmNumber);
+    server3 = getVM(TestVM.SERVER3.vmNumber);
+    accessor = getVM(TestVM.ACCESSOR.vmNumber);
+  }
+
+  private static DUnitBlackboard getBlackboard() {
+    if (blackboard == null) {
+      blackboard = new DUnitBlackboard();
+    }
+    return blackboard;
+  }
+
+  private RegionShortcut getRegionAccessorShortcut(RegionShortcut dataStoreRegionShortcut) {
+    if (dataStoreRegionShortcut.isPersistent()) {
+      switch (dataStoreRegionShortcut) {
+        case PARTITION_PERSISTENT:
+          return PARTITION;
+        case PARTITION_REDUNDANT_PERSISTENT:
+          return PARTITION_REDUNDANT;
+        default:
+          throw new IllegalArgumentException(
+              "Invalid RegionShortcut specified: " + dataStoreRegionShortcut);
+      }
+    }
+
+    return dataStoreRegionShortcut;
+  }
+
+  private void initAccessor(RegionShortcut regionShortcut, Collection<String> regionNames) {
+    RegionShortcut accessorShortcut = getRegionAccessorShortcut(regionShortcut);
+    // StartupRecoveryDelay is set to infinite to prevent automatic rebalancing when creating the
+    // region on other members
+    regionNames.forEach(regionName -> {
+      PartitionAttributesFactory<String, String> attributesFactory =
+          new PartitionAttributesFactory<String, String>()
+              .setTotalNumBuckets(BUCKETS)
+              .setStartupRecoveryDelay(-1)
+              .setLocalMaxMemory(0);
+
+      if (regionName.equals(COLOCATED_REGION)) {
+        attributesFactory.setColocatedWith(REGION_NAME);
+      }
+
+      cacheRule.getCache()
+          .<String, String>createRegionFactory(accessorShortcut)
+          .setPartitionAttributes(attributesFactory.create())
+          .create(regionName);
+    });
+  }
+
+  private void initDataStore(RegionShortcut regionShortcut, Collection<String> regionNames) {
+    // StartupRecoveryDelay is set to infinite to prevent automatic rebalancing when creating the
+    // region on other members
+    regionNames.forEach(regionName -> {
+      PartitionAttributesFactory<String, String> attributesFactory =
+          new PartitionAttributesFactory<String, String>()
+              .setTotalNumBuckets(BUCKETS)
+              .setStartupRecoveryDelay(-1);
+
+      if (regionName.equals(COLOCATED_REGION)) {
+        attributesFactory.setColocatedWith(REGION_NAME);
+      }
+
+      RegionFactory<String, String> factory = cacheRule.getCache()
+          .<String, String>createRegionFactory(regionShortcut)
+          .setPartitionAttributes(attributesFactory.create())
+          .setCacheWriter(new BlackboardSignaller());
+
+      // Set up the disk store if the region is persistent
+      if (regionShortcut.isPersistent()) {
+        factory.setDiskStoreName(cacheRule.getCache()
+            .createDiskStoreFactory()
+            .create(regionName + DISK_STORE_SUFFIX)
+            .getName());
+      }
+
+      factory.create(regionName);
+    });
+  }
+
+  private void parametrizedSetup(RegionShortcut regionShortcut, Collection<String> regionNames,
+      boolean useAccessor) {
+    // Create and populate the region on server1 first, to create an unbalanced distribution of data
+    server1.invoke(() -> {
+      initDataStore(regionShortcut, regionNames);
+      regionNames.forEach(regionName -> {
+        Region<String, String> region = cacheRule.getCache().getRegion(regionName);
+        IntStream.range(0, ENTRIES).forEach(i -> region.put("key" + i, "value" + i));
+      });
+    });
+    server2.invoke(() -> initDataStore(regionShortcut, regionNames));
+    if (useAccessor) {
+      accessor.invoke(() -> initAccessor(regionShortcut, regionNames));
+    } else {
+      server3.invoke(() -> initDataStore(regionShortcut, regionNames));
+    }
+  }
+
+  private void setBlackboardSignallerCacheWriter(String regionName) {
+    cacheRule.getCache().<String, String>getRegion(regionName).getAttributesMutator()
+        .setCacheWriter(new BlackboardSignaller());
+  }
+
+  private AsyncInvocation<?> startClearAsync(TestVM clearCoordinatorVM, String regionName,
+      boolean waitForRebalance) {
+    return getVM(clearCoordinatorVM.vmNumber).invokeAsync(() -> {
+      Region<String, String> region = cacheRule.getCache().getRegion(regionName);
+      if (waitForRebalance) {
+        // Wait for the signal from the blackboard before triggering the clear to start
+        getBlackboard().waitForGate(REBALANCE_HAS_BEGUN, GeodeAwaitility.getTimeout().toMillis(),
+            TimeUnit.MILLISECONDS);
+      }
+      region.clear();
+    });
+  }
+
+  // Trigger a rebalance and wait until it has started restoring redundancy before signalling the
+  // blackboard
+  private AsyncInvocation<?> startRebalanceAsyncAndSignalBlackboard(boolean waitForClear) {
+    return server1.invokeAsync(() -> {
+      RebalanceFactory rebalance =
+          cacheRule.getCache().getResourceManager().createRebalanceFactory();
+      if (waitForClear) {
+        // Wait for the signal from the blackboard before triggering the rebalance to start
+        getBlackboard().waitForGate(CLEAR_HAS_BEGUN, GeodeAwaitility.getTimeout().toMillis(),
+            TimeUnit.MILLISECONDS);
+      }
+      RebalanceOperation op = rebalance.start();
+      await().untilAsserted(() -> assertThat(cacheRule.getCache().getInternalResourceManager()
+          .getStats().getRebalanceBucketCreatesCompleted()).isGreaterThan(0));
+      getBlackboard().signalGate(REBALANCE_HAS_BEGUN);
+      op.getResults();
+    });
+  }
+
+  private void executeClearAndRebalanceAsyncInvocations(TestVM clearCoordinatorVM,
+      String regionToClear, boolean rebalanceFirst) throws InterruptedException {
+    getVM(clearCoordinatorVM.vmNumber)
+        .invoke(() -> setBlackboardSignallerCacheWriter(regionToClear));
+
+    AsyncInvocation<?> clearInvocation = startClearAsync(clearCoordinatorVM, regionToClear,
+        rebalanceFirst);
+
+    AsyncInvocation<?> rebalanceInvocation =
+        startRebalanceAsyncAndSignalBlackboard(!rebalanceFirst);
+
+    clearInvocation.await();
+    rebalanceInvocation.await();
+  }
+
+  private void prepareMemberToShutdownOnClear() throws TimeoutException, InterruptedException {
+    getBlackboard().waitForGate(CLEAR_HAS_BEGUN, GeodeAwaitility.getTimeout().toMillis(),
+        TimeUnit.MILLISECONDS);
+    InternalDistributedSystem.getConnectedInstance().stopReconnectingNoDisconnect();
+    MembershipManagerHelper.crashDistributedSystem(
+        InternalDistributedSystem.getConnectedInstance());
+    await().untilAsserted(
+        () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNull());
+  }
+
+  private void waitForSilenceOnRegion(String regionName) {
+    DMStats dmStats = cacheRule.getSystem().getDistributionManager().getStats();
+    PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(regionName);
+    PartitionedRegionStats partitionedRegionStats = region.getPrStats();
+    await().untilAsserted(() -> {
+      assertThat(dmStats.getReplyWaitsInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getVolunteeringInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getBucketCreatesInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getPrimaryTransfersInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getRebalanceBucketCreatesInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getRebalancePrimaryTransfersInProgress()).isEqualTo(0);
+    });
+  }
+
+  private void assertRegionIsEmpty(List<VM> vms, String regionName) {
+    vms.forEach(vm -> vm.invoke(() -> {
+      waitForSilenceOnRegion(regionName);
+      PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(regionName);
+
+      assertThat(region.getLocalSize()).as("Region local size should be 0 for region " + regionName)
+          .isEqualTo(0);
+    }));
+  }
+
+  private void assertRegionIsNotEmpty(List<VM> vms, String regionName) {
+    vms.forEach(vm -> vm.invoke(() -> {
+      waitForSilenceOnRegion(regionName);
+      PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(regionName);
+
+      assertThat(region.size()).as("Region size should be " + ENTRIES + " for region " + regionName)
+          .isEqualTo(ENTRIES);
+    }));
+  }
+
+  private void assertRebalanceDoesNoWork() {
+    server1.invoke(() -> {
+      RebalanceResults results =
+          cacheRule.getCache().getResourceManager().createRebalanceFactory().start().getResults();
+
+      assertThat(results.getTotalBucketTransfersCompleted())
+          .as("Expected bucket transfers to be zero").isEqualTo(0);
+      assertThat(results.getTotalBucketCreatesCompleted()).as("Expected bucket creates to be zero")
+          .isEqualTo(0);
+      assertThat(results.getTotalPrimaryTransfersCompleted())
+          .as("Expected primary transfers to be zero").isEqualTo(0);
+    });
+  }
+
+  @Test
+  @Parameters(method = "coordinatorVMsAndRegionTypes")
+  @TestCaseName("[{index}] {method}(ClearCoordinator:{0}, RegionType:{1})")
+  public void clearRegionStartedAfterRebalanceClearsRegion(TestVM clearCoordinatorVM,
+      RegionShortcut regionType) throws InterruptedException {
+    parametrizedSetup(regionType, Collections.singleton(REGION_NAME), true);
+
+    executeClearAndRebalanceAsyncInvocations(clearCoordinatorVM, REGION_NAME, true);
+
+    // Assert that the region is empty
+    assertRegionIsEmpty(asList(accessor, server1, server2), REGION_NAME);
+
+    // Assert that the region was successfully rebalanced (a second rebalance should do no work)
+    assertRebalanceDoesNoWork();
+  }
+
+  @Test
+  @Parameters(method = "coordinatorVMsAndRegionTypes")
+  @TestCaseName("[{index}] {method}(ClearCoordinator:{0}, RegionType:{1})")
+  public void clearRegionStartedBeforeRebalanceClearsRegion(TestVM clearCoordinatorVM,
+      RegionShortcut regionType) throws InterruptedException {
+    parametrizedSetup(regionType, Collections.singleton(REGION_NAME), true);
+
+    executeClearAndRebalanceAsyncInvocations(clearCoordinatorVM, REGION_NAME, false);
+
+    // Assert that the region is empty
+    assertRegionIsEmpty(asList(accessor, server1, server2), REGION_NAME);
+
+    // Assert that the region was successfully rebalanced (a second rebalance should do no work)
+    assertRebalanceDoesNoWork();
+  }
+
+  @Test
+  @Parameters(method = "coordinatorVMsAndRegionTypes")
+  @TestCaseName("[{index}] {method}(ClearCoordinator:{0}, RegionType:{1})")
+  public void clearParentColocatedRegionStartedAfterRebalanceOfColocatedRegionsClearsRegionAndDoesNotInterfereWithRebalance(
+      TestVM clearCoordinatorVM, RegionShortcut regionType)
+      throws InterruptedException {
+    parametrizedSetup(regionType, asList(REGION_NAME, COLOCATED_REGION), true);
+
+    executeClearAndRebalanceAsyncInvocations(clearCoordinatorVM, REGION_NAME, true);
+
+    // Assert that the parent region is empty
+    assertRegionIsEmpty(asList(accessor, server1, server2), REGION_NAME);
+
+    // Assert that the colocated region is the correct size
+    assertRegionIsNotEmpty(asList(accessor, server1, server2), COLOCATED_REGION);
+
+    // Assert that the regions were successfully rebalanced (a second rebalance should do no work)
+    assertRebalanceDoesNoWork();
+  }
+
+  @Test
+  @Parameters(method = "coordinatorVMsAndRegionTypes")
+  @TestCaseName("[{index}] {method}(ClearCoordinator:{0}, RegionType:{1})")
+  public void clearParentColocatedRegionStartedBeforeRebalanceOfColocatedRegionsClearsRegionAndDoesNotInterfereWithRebalance(
+      TestVM clearCoordinatorVM, RegionShortcut regionType)
+      throws InterruptedException {
+    parametrizedSetup(regionType, asList(REGION_NAME, COLOCATED_REGION), true);
+
+    executeClearAndRebalanceAsyncInvocations(clearCoordinatorVM, REGION_NAME, false);
+
+    // Assert that the parent region is empty
+    assertRegionIsEmpty(asList(accessor, server1, server2), REGION_NAME);
+
+    // Assert that the colocated region is the correct size
+    assertRegionIsNotEmpty(asList(accessor, server1, server2), COLOCATED_REGION);
+
+    // Assert that the regions were successfully rebalanced (a second rebalance should do no work)
+    assertRebalanceDoesNoWork();
+  }
+
+  @Test
+  @Parameters(method = "coordinatorVMsAndRegionTypes")
+  @TestCaseName("[{index}] {method}(ClearCoordinator:{0}, RegionType:{1})")
+  public void clearChildColocatedRegionStartedAfterRebalanceOfColocatedRegionsClearsRegionAndDoesNotInterfereWithRebalance(
+      TestVM clearCoordinatorVM, RegionShortcut regionType)
+      throws InterruptedException {
+    parametrizedSetup(regionType, asList(REGION_NAME, COLOCATED_REGION), true);
+
+    executeClearAndRebalanceAsyncInvocations(clearCoordinatorVM, COLOCATED_REGION, true);
+
+    // Assert that the colocated region is empty
+    assertRegionIsEmpty(asList(accessor, server1, server2), COLOCATED_REGION);
+
+    // Assert that the parent region is the correct size
+    assertRegionIsNotEmpty(asList(accessor, server1, server2), REGION_NAME);
+
+    // Assert that the regions were successfully rebalanced (a second rebalance should do no work)
+    assertRebalanceDoesNoWork();
+  }
+
+  @Test
+  @Parameters(method = "coordinatorVMsAndRegionTypes")
+  @TestCaseName("[{index}] {method}(ClearCoordinator:{0}, RegionType:{1})")
+  public void clearChildColocatedRegionStartedBeforeRebalanceOfColocatedRegionsClearsRegionAndDoesNotInterfereWithRebalance(
+      TestVM clearCoordinatorVM, RegionShortcut regionType)
+      throws InterruptedException {
+    parametrizedSetup(regionType, asList(REGION_NAME, COLOCATED_REGION), true);
+
+    executeClearAndRebalanceAsyncInvocations(clearCoordinatorVM, COLOCATED_REGION, false);
+
+    // Assert that the colocated region is empty
+    assertRegionIsEmpty(asList(accessor, server1, server2), COLOCATED_REGION);
+
+    // Assert that the parent region is the correct size
+    assertRegionIsNotEmpty(asList(accessor, server1, server2), REGION_NAME);
+
+    // Assert that the regions were successfully rebalanced (a second rebalance should do no work)
+    assertRebalanceDoesNoWork();
+  }
+
+  @Test
+  @Parameters(method = "coordinatorVMsAndRegionTypesNoAccessor")
+  @TestCaseName("[{index}] {method}(ClearCoordinator:{0}, RegionType:{1})")
+  public void clearStartedBeforeRebalanceClearsRegionWhenNonCoordinatorMemberIsKilled(
+      TestVM clearCoordinatorVM, RegionShortcut regionType)
+      throws InterruptedException {
+    parametrizedSetup(regionType, Collections.singleton(REGION_NAME), false);
+
+    getVM(clearCoordinatorVM.vmNumber).invoke(() -> setBlackboardSignallerCacheWriter(REGION_NAME));
+
+    // Make server3 shut down when it receives the signal from the blackboard that clear has started
+    AsyncInvocation<?> shutdownInvocation =
+        server3.invokeAsync(this::prepareMemberToShutdownOnClear);
+
+    executeClearAndRebalanceAsyncInvocations(clearCoordinatorVM, REGION_NAME, false);
+
+    shutdownInvocation.await();
+
+    // Assert that the region is empty
+    assertRegionIsEmpty(asList(server1, server2), REGION_NAME);
+  }
+
+  @Test
+  @Parameters(method = "coordinatorVMsAndRegionTypesNoAccessor")
+  @TestCaseName("[{index}] {method}(ClearCoordinator:{0}, RegionType:{1})")
+  public void clearStartedAfterRebalanceClearsRegionWhenNewMemberJoins(TestVM clearCoordinatorVM,
+      RegionShortcut regionType) throws InterruptedException {
+
+    // Load the data on server1 before creating the region on other servers, to create an imbalanced
+    // system
+    server1.invoke(() -> {
+      initDataStore(regionType, Collections.singleton(REGION_NAME));
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      IntStream.range(0, ENTRIES).forEach(i -> region.put("key" + i, "value" + i));
+    });
+    server2.invoke(() -> initDataStore(regionType, Collections.singleton(REGION_NAME)));
+
+    // Wait for rebalance to start, then create the region on server3
+    AsyncInvocation<?> createRegion = server3.invokeAsync(() -> {
+      cacheRule.createCache();
+
+      PartitionAttributesFactory<String, String> attributesFactory =
+          new PartitionAttributesFactory<String, String>()
+              .setTotalNumBuckets(BUCKETS)
+              .setStartupRecoveryDelay(-1);
+
+      RegionFactory<String, String> factory = cacheRule.getCache()
+          .<String, String>createRegionFactory(regionType)
+          .setPartitionAttributes(attributesFactory.create())
+          .setCacheWriter(new BlackboardSignaller());
+
+      if (regionType.isPersistent()) {
+        factory.setDiskStoreName(cacheRule.getCache()
+            .createDiskStoreFactory()
+            .create(REGION_NAME + DISK_STORE_SUFFIX)
+            .getName());
+      }
+
+      getBlackboard().waitForGate(REBALANCE_HAS_BEGUN, GeodeAwaitility.getTimeout().toMillis(),
+          TimeUnit.MILLISECONDS);
+
+      factory.create(REGION_NAME);
+    });
+
+    executeClearAndRebalanceAsyncInvocations(clearCoordinatorVM, REGION_NAME, true);
+
+    createRegion.await();
+
+    // Assert that the region is empty
+    assertRegionIsEmpty(asList(server1, server2, server3), REGION_NAME);
+  }
+
+
+  @Test
+  @Parameters(method = "coordinatorVMsAndRegionTypesNoAccessor")
+  @TestCaseName("[{index}] {method}(ClearCoordinator:{0}, RegionType:{1})")
+  public void clearStartedBeforeRebalanceClearsRegionWhenNewMemberJoins(TestVM clearCoordinatorVM,
+      RegionShortcut regionType) throws InterruptedException {
+
+    // Load the data on server1 before creating the region on other servers, to create an imbalanced
+    // system
+    server1.invoke(() -> {
+      initDataStore(regionType, Collections.singleton(REGION_NAME));
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      IntStream.range(0, ENTRIES).forEach(i -> region.put("key" + i, "value" + i));
+    });
+
+    server2.invoke(() -> initDataStore(regionType, Collections.singleton(REGION_NAME)));
+
+    // Wait for clear to start, then create the region on server3
+    AsyncInvocation<?> createRegion = server3.invokeAsync(() -> {
+      cacheRule.createCache();
+
+      PartitionAttributesFactory<String, String> attributesFactory =
+          new PartitionAttributesFactory<String, String>()
+              .setTotalNumBuckets(BUCKETS)
+              .setStartupRecoveryDelay(-1);
+
+      RegionFactory<String, String> factory = cacheRule.getCache()
+          .<String, String>createRegionFactory(regionType)
+          .setPartitionAttributes(attributesFactory.create())
+          .setCacheWriter(new BlackboardSignaller());
+
+      if (regionType.isPersistent()) {
+        factory.setDiskStoreName(cacheRule.getCache()
+            .createDiskStoreFactory()
+            .create(REGION_NAME + DISK_STORE_SUFFIX)
+            .getName());
+      }
+
+      getBlackboard().waitForGate(CLEAR_HAS_BEGUN, GeodeAwaitility.getTimeout().toMillis(),
+          TimeUnit.MILLISECONDS);
+
+      factory.create(REGION_NAME);
+    });
+
+    executeClearAndRebalanceAsyncInvocations(clearCoordinatorVM, REGION_NAME, false);
+
+    createRegion.await();
+
+    // Assert that the region is empty
+    assertRegionIsEmpty(asList(server1, server2, server3), REGION_NAME);
+  }
+
+  public static class BlackboardSignaller extends CacheWriterAdapter<String, String> {
+    @Override
+    public synchronized void beforeRegionClear(RegionEvent<String, String> event)
+        throws CacheWriterException {
+      getBlackboard().signalGate(CLEAR_HAS_BEGUN);
+    }
+  }
+}
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionOverflowClearDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionOverflowClearDUnitTest.java
new file mode 100644
index 0000000..c10d1db
--- /dev/null
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionOverflowClearDUnitTest.java
@@ -0,0 +1,380 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.distributed.ConfigurationProperties.ENABLE_CLUSTER_CONFIGURATION;
+import static org.apache.geode.distributed.ConfigurationProperties.HTTP_SERVICE_PORT;
+import static org.apache.geode.distributed.ConfigurationProperties.JMX_MANAGER;
+import static org.apache.geode.distributed.ConfigurationProperties.JMX_MANAGER_PORT;
+import static org.apache.geode.distributed.ConfigurationProperties.JMX_MANAGER_START;
+import static org.apache.geode.distributed.ConfigurationProperties.LOCATORS;
+import static org.apache.geode.distributed.ConfigurationProperties.LOG_FILE;
+import static org.apache.geode.distributed.ConfigurationProperties.MAX_WAIT_TIME_RECONNECT;
+import static org.apache.geode.distributed.ConfigurationProperties.MEMBER_TIMEOUT;
+import static org.apache.geode.distributed.ConfigurationProperties.USE_CLUSTER_CONFIGURATION;
+import static org.apache.geode.internal.AvailablePortHelper.getRandomAvailableTCPPorts;
+import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.apache.geode.test.dunit.VM.getVM;
+import static org.apache.geode.test.dunit.VM.getVMId;
+import static org.apache.geode.test.dunit.VM.toArray;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.File;
+import java.io.Serializable;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.IntStream;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.DiskStoreFactory;
+import org.apache.geode.cache.EvictionAction;
+import org.apache.geode.cache.EvictionAttributes;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.client.ClientCache;
+import org.apache.geode.cache.client.ClientCacheFactory;
+import org.apache.geode.cache.client.ClientRegionShortcut;
+import org.apache.geode.distributed.LocatorLauncher;
+import org.apache.geode.distributed.ServerLauncher;
+import org.apache.geode.distributed.internal.InternalLocator;
+import org.apache.geode.management.internal.cli.util.CommandStringBuilder;
+import org.apache.geode.test.dunit.AsyncInvocation;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.rules.DistributedRule;
+import org.apache.geode.test.junit.rules.GfshCommandRule;
+import org.apache.geode.test.junit.rules.serializable.SerializableTemporaryFolder;
+
+public class PartitionedRegionOverflowClearDUnitTest implements Serializable {
+
+  @Rule
+  public DistributedRule distributedRule = new DistributedRule(5);
+
+  @Rule
+  public SerializableTemporaryFolder temporaryFolder = new SerializableTemporaryFolder();
+
+  @Rule
+  public transient GfshCommandRule gfsh = new GfshCommandRule();
+
+  private VM locator;
+  private VM server1;
+  private VM server2;
+  private VM accessor;
+  private VM client;
+
+  private static final String LOCATOR_NAME = "locator";
+  private static final String SERVER1_NAME = "server1";
+  private static final String SERVER2_NAME = "server2";
+  private static final String SERVER3_NAME = "server3";
+
+  private File locatorDir;
+  private File server1Dir;
+  private File server2Dir;
+  private File server3Dir;
+
+  private String locatorString;
+
+  private int locatorPort;
+  private int locatorJmxPort;
+  private int locatorHttpPort;
+  private int serverPort1;
+  private int serverPort2;
+  private int serverPort3;
+
+  private static final AtomicReference<LocatorLauncher> LOCATOR_LAUNCHER = new AtomicReference<>();
+
+  private static final AtomicReference<ServerLauncher> SERVER_LAUNCHER = new AtomicReference<>();
+
+  private static final AtomicReference<ClientCache> CLIENT_CACHE = new AtomicReference<>();
+
+  private static final String OVERFLOW_REGION_NAME = "testOverflowRegion";
+
+  public static final int NUM_ENTRIES = 1000;
+
+  @Before
+  public void setup() throws Exception {
+    locator = getVM(0);
+    server1 = getVM(1);
+    server2 = getVM(2);
+    accessor = getVM(3);
+    client = getVM(4);
+
+    locatorDir = temporaryFolder.newFolder(LOCATOR_NAME);
+    server1Dir = temporaryFolder.newFolder(SERVER1_NAME);
+    server2Dir = temporaryFolder.newFolder(SERVER2_NAME);
+    server3Dir = temporaryFolder.newFolder(SERVER3_NAME);
+
+    int[] ports = getRandomAvailableTCPPorts(6);
+    locatorPort = ports[0];
+    locatorJmxPort = ports[1];
+    locatorHttpPort = ports[2];
+    serverPort1 = ports[3];
+    serverPort2 = ports[4];
+    serverPort3 = ports[5];
+
+    locator.invoke(
+        () -> startLocator(locatorDir, locatorPort, locatorJmxPort, locatorHttpPort));
+    gfsh.connectAndVerify(locatorJmxPort, GfshCommandRule.PortType.jmxManager);
+
+    locatorString = "localhost[" + locatorPort + "]";
+    server1.invoke(() -> startServer(SERVER1_NAME, server1Dir, locatorString, serverPort1));
+    server2.invoke(() -> startServer(SERVER2_NAME, server2Dir, locatorString, serverPort2));
+  }
+
+  @After
+  public void tearDown() {
+    destroyRegion();
+    destroyDiskStore(DiskStoreFactory.DEFAULT_DISK_STORE_NAME);
+
+    for (VM vm : new VM[] {client, accessor, server1, server2, locator}) {
+      vm.invoke(() -> {
+        if (CLIENT_CACHE.get() != null) {
+          CLIENT_CACHE.get().close();
+        }
+        if (LOCATOR_LAUNCHER.get() != null) {
+          LOCATOR_LAUNCHER.get().stop();
+        }
+        if (SERVER_LAUNCHER.get() != null) {
+          SERVER_LAUNCHER.get().stop();
+        }
+
+        CLIENT_CACHE.set(null);
+        LOCATOR_LAUNCHER.set(null);
+        SERVER_LAUNCHER.set(null);
+      });
+    }
+  }
+
+  @Test
+  public void testGfshClearRegionWithOverflow() throws InterruptedException {
+    createPartitionRedundantPersistentOverflowRegion();
+
+    populateRegion();
+    assertRegionSize(NUM_ENTRIES);
+
+    gfsh.executeAndAssertThat("clear region --name=" + OVERFLOW_REGION_NAME).statusIsSuccess();
+    assertRegionSize(0);
+
+    restartServers();
+
+    assertRegionSize(0);
+  }
+
+  @Test
+  public void testClientRegionClearWithOverflow() throws InterruptedException {
+    createPartitionRedundantPersistentOverflowRegion();
+
+    populateRegion();
+    assertRegionSize(NUM_ENTRIES);
+
+    client.invoke(() -> {
+      if (CLIENT_CACHE.get() == null) {
+        ClientCache clientCache =
+            new ClientCacheFactory().addPoolLocator("localhost", locatorPort).create();
+        CLIENT_CACHE.set(clientCache);
+      }
+
+      CLIENT_CACHE.get().getRegion(OVERFLOW_REGION_NAME).clear();
+    });
+    assertRegionSize(0);
+
+    restartServers();
+
+    assertRegionSize(0);
+  }
+
+  @Test
+  public void testAccessorRegionClearWithOverflow() throws InterruptedException {
+
+    for (VM vm : toArray(server1, server2)) {
+      vm.invoke(this::createRegionWithDefaultDiskStore);
+    }
+
+    accessor.invoke(() -> {
+      startServer(SERVER3_NAME, server3Dir, locatorString, serverPort3);
+      SERVER_LAUNCHER.get().getCache()
+          .createRegionFactory(RegionShortcut.PARTITION_REDUNDANT_OVERFLOW)
+          .setPartitionAttributes(
+              new PartitionAttributesFactory().setLocalMaxMemory(0).create())
+          .create(OVERFLOW_REGION_NAME);
+    });
+
+    populateRegion();
+    assertRegionSize(NUM_ENTRIES);
+
+    accessor.invoke(() -> {
+      assertThat(SERVER_LAUNCHER.get().getCache().getRegion(OVERFLOW_REGION_NAME).size())
+          .isEqualTo(NUM_ENTRIES);
+      SERVER_LAUNCHER.get().getCache().getRegion(OVERFLOW_REGION_NAME).clear();
+      assertThat(SERVER_LAUNCHER.get().getCache().getRegion(OVERFLOW_REGION_NAME).size())
+          .isEqualTo(0);
+    });
+    assertRegionSize(0);
+
+    for (VM vm : toArray(server1, server2)) {
+      vm.invoke(PartitionedRegionOverflowClearDUnitTest::stopServer);
+    }
+
+    gfsh.executeAndAssertThat("list members").statusIsSuccess();
+    assertThat(gfsh.getGfshOutput()).contains("locator");
+    AsyncInvocation asyncInvocation1 = server1.invokeAsync(() -> {
+      startServer(SERVER1_NAME, server1Dir, locatorString, serverPort1);
+      createRegionWithDefaultDiskStore();
+    });
+    AsyncInvocation asyncInvocation2 = server2.invokeAsync(() -> {
+      startServer(SERVER2_NAME, server2Dir, locatorString, serverPort2);
+      createRegionWithDefaultDiskStore();
+    });
+    asyncInvocation1.get();
+    asyncInvocation2.get();
+    assertRegionSize(0);
+  }
+
+  private void restartServers() throws InterruptedException {
+    for (VM vm : toArray(server1, server2)) {
+      vm.invoke(PartitionedRegionOverflowClearDUnitTest::stopServer);
+    }
+
+    gfsh.executeAndAssertThat("list members").statusIsSuccess();
+    assertThat(gfsh.getGfshOutput()).contains("locator");
+    AsyncInvocation asyncInvocation1 =
+        server1
+            .invokeAsync(() -> startServer(SERVER1_NAME, server1Dir, locatorString, serverPort1));
+    AsyncInvocation asyncInvocation2 =
+        server2
+            .invokeAsync(() -> startServer(SERVER2_NAME, server2Dir, locatorString, serverPort2));
+    asyncInvocation1.get();
+    asyncInvocation2.get();
+  }
+
+  private void createPartitionRedundantPersistentOverflowRegion() {
+    String command = new CommandStringBuilder("create region")
+        .addOption("name", OVERFLOW_REGION_NAME)
+        .addOption("type", "PARTITION_REDUNDANT_PERSISTENT_OVERFLOW")
+        .addOption("redundant-copies", "1")
+        .addOption("eviction-entry-count", "1")
+        .addOption("eviction-action", "overflow-to-disk")
+        .getCommandString();
+    gfsh.executeAndAssertThat(command).statusIsSuccess();
+  }
+
+  private void destroyRegion() {
+    server1.invoke(() -> {
+      assertThat(SERVER_LAUNCHER.get().getCache().getRegion(OVERFLOW_REGION_NAME)).isNotNull();
+      SERVER_LAUNCHER.get().getCache().getRegion(OVERFLOW_REGION_NAME).destroyRegion();
+
+    });
+  }
+
+  private void destroyDiskStore(String diskStoreName) {
+    String command = new CommandStringBuilder("destroy disk-store")
+        .addOption("name", diskStoreName)
+        .getCommandString();
+    gfsh.executeAndAssertThat(command).statusIsSuccess();
+  }
+
+  private void createRegionWithDefaultDiskStore() {
+    SERVER_LAUNCHER.get().getCache().createDiskStoreFactory()
+        .create(DiskStoreFactory.DEFAULT_DISK_STORE_NAME);
+    SERVER_LAUNCHER.get().getCache()
+        .createRegionFactory(RegionShortcut.PARTITION_REDUNDANT_PERSISTENT_OVERFLOW)
+        .setPartitionAttributes(
+            new PartitionAttributesFactory().setRedundantCopies(1).create())
+        .setDiskStoreName(DiskStoreFactory.DEFAULT_DISK_STORE_NAME)
+        .setEvictionAttributes(
+            EvictionAttributes.createLRUEntryAttributes(1, EvictionAction.OVERFLOW_TO_DISK))
+        .create(OVERFLOW_REGION_NAME);
+  }
+
+  private void populateRegion() {
+    client.invoke(() -> {
+      if (CLIENT_CACHE.get() == null) {
+        ClientCache clientCache =
+            new ClientCacheFactory().addPoolLocator("localhost", locatorPort).create();
+        CLIENT_CACHE.set(clientCache);
+      }
+
+      Region<Object, Object> clientRegion = CLIENT_CACHE.get()
+          .createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
+          .create(OVERFLOW_REGION_NAME);
+
+      IntStream.range(0, NUM_ENTRIES).forEach(i -> clientRegion.put("key-" + i, "value-" + i));
+    });
+  }
+
+  private void assertRegionSize(int size) {
+    server1.invoke(() -> {
+      assertThat(SERVER_LAUNCHER.get().getCache().getRegion(OVERFLOW_REGION_NAME)).isNotNull();
+      assertThat(SERVER_LAUNCHER.get().getCache().getRegion(OVERFLOW_REGION_NAME).size())
+          .isEqualTo(size);
+    });
+    server2.invoke(() -> {
+      assertThat(SERVER_LAUNCHER.get().getCache().getRegion(OVERFLOW_REGION_NAME)).isNotNull();
+      assertThat(SERVER_LAUNCHER.get().getCache().getRegion(OVERFLOW_REGION_NAME).size())
+          .isEqualTo(size);
+    });
+  }
+
+  private void startLocator(File directory, int port, int jmxPort, int httpPort) {
+    LOCATOR_LAUNCHER.set(new LocatorLauncher.Builder()
+        .setMemberName(LOCATOR_NAME)
+        .setPort(port)
+        .setWorkingDirectory(directory.getAbsolutePath())
+        .set(HTTP_SERVICE_PORT, httpPort + "")
+        .set(JMX_MANAGER, "true")
+        .set(JMX_MANAGER_PORT, String.valueOf(jmxPort))
+        .set(JMX_MANAGER_START, "true")
+        .set(LOG_FILE, new File(directory, LOCATOR_NAME + ".log").getAbsolutePath())
+        .set(MAX_WAIT_TIME_RECONNECT, "1000")
+        .set(MEMBER_TIMEOUT, "2000")
+        .set(ENABLE_CLUSTER_CONFIGURATION, "true")
+        .set(USE_CLUSTER_CONFIGURATION, "true")
+        .build());
+
+    LOCATOR_LAUNCHER.get().start();
+
+    await().untilAsserted(() -> {
+      InternalLocator locator = (InternalLocator) LOCATOR_LAUNCHER.get().getLocator();
+      assertThat(locator.isSharedConfigurationRunning())
+          .as("Locator shared configuration is running on locator" + getVMId())
+          .isTrue();
+    });
+  }
+
+  private void startServer(String name, File workingDirectory, String locator, int serverPort) {
+    SERVER_LAUNCHER.set(new ServerLauncher.Builder()
+        .setDeletePidFileOnStop(true)
+        .setMemberName(name)
+        .setWorkingDirectory(workingDirectory.getAbsolutePath())
+        .setServerPort(serverPort)
+        .set(HTTP_SERVICE_PORT, "0")
+        .set(LOCATORS, locator)
+        .set(LOG_FILE, new File(workingDirectory, name + ".log").getAbsolutePath())
+        .set(MAX_WAIT_TIME_RECONNECT, "1000")
+        .set(MEMBER_TIMEOUT, "2000")
+        .set(ENABLE_CLUSTER_CONFIGURATION, "true")
+        .set(USE_CLUSTER_CONFIGURATION, "true")
+        .build());
+
+    SERVER_LAUNCHER.get().start();
+  }
+
+  private static void stopServer() {
+    SERVER_LAUNCHER.get().stop();
+  }
+}
diff --git a/geode-junit/src/main/java/org/apache/geode/cache/query/data/City.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionPersistentClearDUnitTest.java
similarity index 51%
copy from geode-junit/src/main/java/org/apache/geode/cache/query/data/City.java
copy to geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionPersistentClearDUnitTest.java
index e7e7b39..c758446 100644
--- a/geode-junit/src/main/java/org/apache/geode/cache/query/data/City.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionPersistentClearDUnitTest.java
@@ -12,41 +12,15 @@
  * or implied. See the License for the specific language governing permissions and limitations under
  * the License.
  */
+package org.apache.geode.internal.cache;
 
-/*
- * City.java
- *
- * Created on September 30, 2005, 6:20 PM
- */
-
-package org.apache.geode.cache.query.data;
 
-import java.io.Serializable;
 
-public class City implements Serializable {
-  public String name;
-  public int zip;
+import org.apache.geode.cache.RegionShortcut;
 
-  /** Creates a new instance of City */
-  public City(String name, int zip) {
-    this.name = name;
-    this.zip = zip;
-  }// end of constructor 1
-
-  public City(int i) {
-    String arr1[] = {"MUMBAI", "PUNE", "GANDHINAGAR", "CHANDIGARH"};
-    /* this is for the test to have 50% of the objects belonging to one city */
-    this.name = arr1[i % 2];
-    this.zip = 425125 + i;
-  }// end of constructor 2
-
-  ////////////////////////////
-
-  public String getName() {
-    return name;
-  }
+public class PartitionedRegionPersistentClearDUnitTest extends PartitionedRegionClearDUnitTest {
 
-  public int getZip() {
-    return zip;
+  protected RegionShortcut getRegionShortCut() {
+    return RegionShortcut.PARTITION_REDUNDANT_PERSISTENT;
   }
-}// end of class
+}
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PersistentPartitionedRegionClearWithExpirationDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PersistentPartitionedRegionClearWithExpirationDUnitTest.java
new file mode 100644
index 0000000..f6f25bd
--- /dev/null
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PersistentPartitionedRegionClearWithExpirationDUnitTest.java
@@ -0,0 +1,530 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.cache.ExpirationAction.DESTROY;
+import static org.apache.geode.cache.RegionShortcut.PARTITION;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_OVERFLOW;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_PERSISTENT;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_PERSISTENT_OVERFLOW;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_REDUNDANT;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_REDUNDANT_OVERFLOW;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_REDUNDANT_PERSISTENT;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_REDUNDANT_PERSISTENT_OVERFLOW;
+import static org.apache.geode.internal.util.ArrayUtils.asList;
+import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.apache.geode.test.dunit.VM.getVM;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.IntStream;
+
+import junitparams.JUnitParamsRunner;
+import junitparams.Parameters;
+import junitparams.naming.TestCaseName;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import org.apache.geode.ForcedDisconnectException;
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.CacheWriter;
+import org.apache.geode.cache.CacheWriterException;
+import org.apache.geode.cache.ExpirationAttributes;
+import org.apache.geode.cache.PartitionAttributes;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.PartitionedRegionPartialClearException;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionEvent;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.partition.PartitionRegionHelper;
+import org.apache.geode.cache.util.CacheWriterAdapter;
+import org.apache.geode.distributed.DistributedSystemDisconnectedException;
+import org.apache.geode.distributed.internal.DMStats;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.membership.api.MembershipManagerHelper;
+import org.apache.geode.test.awaitility.GeodeAwaitility;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.rules.CacheRule;
+import org.apache.geode.test.dunit.rules.DistributedDiskDirRule;
+import org.apache.geode.test.dunit.rules.DistributedRule;
+
+/**
+ * Tests to verify that {@link PartitionedRegion#clear()} cancels all remaining expiration tasks
+ * on the {@link PartitionedRegion} once the operation is executed.
+ */
+@RunWith(JUnitParamsRunner.class)
+public class PersistentPartitionedRegionClearWithExpirationDUnitTest implements Serializable {
+  private static final Integer BUCKETS = 13;
+  private static final Integer EXPIRATION_TIME = 5 * 60;
+  private static final Integer SMALL_EXPIRATION_TIME = 10;
+  private static final String REGION_NAME = "PartitionedRegion";
+
+  @Rule
+  public DistributedRule distributedRule = new DistributedRule(3);
+
+  @Rule
+  public CacheRule cacheRule = CacheRule.builder().createCacheInAll().build();
+
+  @Rule
+  public DistributedDiskDirRule distributedDiskDirRule = new DistributedDiskDirRule();
+
+  private VM accessor, server1, server2;
+
+  private enum TestVM {
+    ACCESSOR(0), SERVER1(1), SERVER2(2);
+
+    final int vmNumber;
+
+    TestVM(int vmNumber) {
+      this.vmNumber = vmNumber;
+    }
+  }
+
+  @SuppressWarnings("unused")
+  static RegionShortcut[] regionTypes() {
+    return new RegionShortcut[] {
+        PARTITION_PERSISTENT,
+        PARTITION_PERSISTENT_OVERFLOW,
+        PARTITION_REDUNDANT_PERSISTENT,
+        PARTITION_REDUNDANT_PERSISTENT_OVERFLOW
+    };
+  }
+
+  @SuppressWarnings("unused")
+  static Object[] vmsAndRegionTypes() {
+    ArrayList<Object[]> parameters = new ArrayList<>();
+    RegionShortcut[] regionShortcuts = regionTypes();
+
+    Arrays.stream(regionShortcuts).forEach(regionShortcut -> {
+      parameters.add(new Object[] {TestVM.SERVER1, regionShortcut});
+      parameters.add(new Object[] {TestVM.ACCESSOR, regionShortcut});
+    });
+
+    return parameters.toArray();
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    server1 = getVM(TestVM.SERVER1.vmNumber);
+    server2 = getVM(TestVM.SERVER2.vmNumber);
+    accessor = getVM(TestVM.ACCESSOR.vmNumber);
+  }
+
+  private RegionShortcut getRegionAccessorShortcut(RegionShortcut dataStoreRegionShortcut) {
+    if (dataStoreRegionShortcut.isPersistent()) {
+      switch (dataStoreRegionShortcut) {
+        case PARTITION_PERSISTENT:
+          return PARTITION;
+        case PARTITION_PERSISTENT_OVERFLOW:
+          return PARTITION_OVERFLOW;
+        case PARTITION_REDUNDANT_PERSISTENT:
+          return PARTITION_REDUNDANT;
+        case PARTITION_REDUNDANT_PERSISTENT_OVERFLOW:
+          return PARTITION_REDUNDANT_OVERFLOW;
+      }
+    }
+
+    return dataStoreRegionShortcut;
+  }
+
+  private void initAccessor(RegionShortcut regionShortcut,
+      ExpirationAttributes expirationAttributes) {
+    RegionShortcut accessorShortcut = getRegionAccessorShortcut(regionShortcut);
+    PartitionAttributes<String, String> attributes =
+        new PartitionAttributesFactory<String, String>()
+            .setTotalNumBuckets(BUCKETS)
+            .setLocalMaxMemory(0)
+            .create();
+
+    cacheRule.getCache()
+        .<String, String>createRegionFactory(accessorShortcut)
+        .setPartitionAttributes(attributes)
+        .setEntryTimeToLive(expirationAttributes)
+        .setEntryIdleTimeout(expirationAttributes)
+        .create(REGION_NAME);
+  }
+
+  private void initDataStore(RegionShortcut regionShortcut,
+      ExpirationAttributes expirationAttributes) {
+    PartitionAttributes<String, String> attributes =
+        new PartitionAttributesFactory<String, String>()
+            .setTotalNumBuckets(BUCKETS)
+            .create();
+
+    cacheRule.getCache()
+        .<String, String>createRegionFactory(regionShortcut)
+        .setPartitionAttributes(attributes)
+        .setEntryTimeToLive(expirationAttributes)
+        .setEntryIdleTimeout(expirationAttributes)
+        .create(REGION_NAME);
+
+    ExpiryTask.expiryTaskListener = new ExpirationListener();
+  }
+
+  private void parametrizedSetup(RegionShortcut regionShortcut,
+      ExpirationAttributes expirationAttributes) {
+    server1.invoke(() -> initDataStore(regionShortcut, expirationAttributes));
+    server2.invoke(() -> initDataStore(regionShortcut, expirationAttributes));
+    accessor.invoke(() -> initAccessor(regionShortcut, expirationAttributes));
+  }
+
+  private void waitForSilence() {
+    DMStats dmStats = cacheRule.getSystem().getDistributionManager().getStats();
+    PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME);
+    PartitionedRegionStats partitionedRegionStats = region.getPrStats();
+
+    await().untilAsserted(() -> {
+      assertThat(dmStats.getReplyWaitsInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getVolunteeringInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getBucketCreatesInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getPrimaryTransfersInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getRebalanceBucketCreatesInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getRebalancePrimaryTransfersInProgress()).isEqualTo(0);
+    });
+  }
+
+  /**
+   * Populates the region and verifies the data on the selected VMs.
+   */
+  private void populateRegion(VM feeder, int entryCount, List<VM> vms) {
+    feeder.invoke(() -> {
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      IntStream.range(0, entryCount).forEach(i -> region.put(String.valueOf(i), "Value_" + i));
+    });
+
+    vms.forEach(vm -> vm.invoke(() -> {
+      waitForSilence();
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+
+      IntStream.range(0, entryCount)
+          .forEach(i -> assertThat(region.get(String.valueOf(i))).isEqualTo("Value_" + i));
+    }));
+  }
+
+  /**
+   * Asserts that the region is empty on requested VMs.
+   */
+  private void assertRegionIsEmpty(List<VM> vms) {
+    vms.forEach(vm -> vm.invoke(() -> {
+      waitForSilence();
+      PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME);
+
+      assertThat(region.getLocalSize()).isEqualTo(0);
+    }));
+  }
+
+  /**
+   * Asserts that the region data is consistent across buckets.
+   */
+  private void assertRegionBucketsConsistency() throws ForceReattemptException {
+    waitForSilence();
+    List<BucketDump> bucketDumps;
+    PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME);
+    // Redundant copies + 1 primary.
+    int expectedCopies = region.getRedundantCopies() + 1;
+
+    for (int bucketId = 0; bucketId < BUCKETS; bucketId++) {
+      bucketDumps = region.getAllBucketEntries(bucketId);
+      assertThat(bucketDumps.size()).as("Bucket " + bucketId + " should have " + expectedCopies
+          + " copies, but has " + bucketDumps.size()).isEqualTo(expectedCopies);
+
+      // Check that all copies of the bucket have the same data.
+      if (bucketDumps.size() > 1) {
+        BucketDump firstDump = bucketDumps.get(0);
+
+        for (int j = 1; j < bucketDumps.size(); j++) {
+          BucketDump otherDump = bucketDumps.get(j);
+          assertThat(otherDump.getValues())
+              .as("Values for bucket " + bucketId + " on member " + otherDump.getMember()
+                  + " are not consistent with member " + firstDump.getMember())
+              .isEqualTo(firstDump.getValues());
+          assertThat(otherDump.getVersions())
+              .as("Versions for bucket " + bucketId + " on member " + otherDump.getMember()
+                  + " are not consistent with member " + firstDump.getMember())
+              .isEqualTo(firstDump.getVersions());
+        }
+      }
+    }
+  }
+
+  /**
+   * Register the MemberKiller CacheWriter on the given vms.
+   */
+  private void registerVMKillerAsCacheWriter(List<VM> vmsToBounce) {
+    vmsToBounce.forEach(vm -> vm.invoke(() -> {
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      region.getAttributesMutator().setCacheWriter(new MemberKiller());
+    }));
+  }
+
+  private void doClear() {
+    Cache cache = cacheRule.getCache();
+    boolean retry;
+    do {
+      retry = false;
+      try {
+        cache.getRegion(REGION_NAME).clear();
+      } catch (PartitionedRegionPartialClearException | CacheWriterException ex) {
+        retry = true;
+      }
+    } while (retry);
+  }
+
+  /**
+   * The test does the following (clear coordinator and region type are parametrized):
+   * - Populates the Partition Region (entries have expiration).
+   * - Verifies that the entries are synchronized on all members.
+   * - Clears the Partition Region once.
+   * - Asserts that, after the clear is finished:
+   * . No expiration tasks were executed.
+   * . All expiration tasks were cancelled.
+   * . Map of expiry tasks per bucket is empty.
+   * . The Partition Region is empty on all members.
+   */
+  @Test
+  @Parameters(method = "vmsAndRegionTypes")
+  @TestCaseName("[{index}] {method}(Coordinator:{0}, RegionType:{1})")
+  public void clearShouldRemoveRegisteredExpirationTasks(TestVM coordinatorVM,
+      RegionShortcut regionShortcut) {
+    final int entries = 500;
+    int expirationTime = (int) GeodeAwaitility.getTimeout().getSeconds();
+    parametrizedSetup(regionShortcut, new ExpirationAttributes(expirationTime, DESTROY));
+    populateRegion(accessor, entries, asList(accessor, server1, server2));
+
+    // Clear the region.
+    getVM(coordinatorVM.vmNumber).invoke(() -> doClear());
+
+    // Assert all expiration tasks were cancelled and none were executed.
+    asList(server1, server2).forEach(vm -> vm.invoke(() -> {
+      ExpirationListener listener = (ExpirationListener) EntryExpiryTask.expiryTaskListener;
+      assertThat(listener.tasksRan.get()).isEqualTo(0);
+      assertThat(listener.tasksCanceled.get()).isEqualTo(listener.tasksScheduled.get());
+
+      PartitionedRegionDataStore dataStore =
+          ((PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME)).getDataStore();
+      Set<BucketRegion> bucketRegions = dataStore.getAllLocalBucketRegions();
+      bucketRegions
+          .forEach(bucketRegion -> assertThat(bucketRegion.entryExpiryTasks.isEmpty()).isTrue());
+    }));
+
+    // Assert Region Buckets are consistent and region is empty,
+    accessor.invoke(this::assertRegionBucketsConsistency);
+    assertRegionIsEmpty(asList(accessor, server1, server2));
+  }
+
+  /**
+   * The test does the following (region type is parametrized):
+   * - Populates the Partition Region (entries have expiration).
+   * - Verifies that the entries are synchronized on all members.
+   * - Sets the {@link MemberKiller} as a {@link CacheWriter} to stop the coordinator VM while the
+   * clear is in progress.
+   * - Clears the Partition Region (at this point the coordinator is restarted).
+   * - Asserts that, after the clear is finished and the expiration time is reached:
+   * . No expiration tasks were cancelled.
+   * . All entries were removed due to the expiration.
+   * . The Partition Region Buckets are consistent on all members.
+   */
+  @Test
+  @Parameters(method = "regionTypes")
+  @TestCaseName("[{index}] {method}(RegionType:{0})")
+  public void clearShouldFailWhenCoordinatorMemberIsBouncedAndExpirationTasksShouldSurvive(
+      RegionShortcut regionShortcut) {
+    final int entries = 1000;
+    ExpirationAttributes expirationAttributes =
+        new ExpirationAttributes(SMALL_EXPIRATION_TIME, DESTROY);
+    parametrizedSetup(regionShortcut, expirationAttributes);
+    populateRegion(accessor, entries, asList(accessor, server1, server2));
+    registerVMKillerAsCacheWriter(Collections.singletonList(server1));
+
+    // Clear the region (it should fail).
+    server1.invoke(() -> {
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      assertThatThrownBy(region::clear)
+          .isInstanceOf(DistributedSystemDisconnectedException.class)
+          .hasCauseInstanceOf(ForcedDisconnectException.class);
+    });
+
+    // Wait for member to get back online and assign all buckets.
+    server1.invoke(() -> {
+      cacheRule.createCache();
+      initDataStore(regionShortcut, expirationAttributes);
+      await().untilAsserted(
+          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
+      PartitionRegionHelper.assignBucketsToPartitions(cacheRule.getCache().getRegion(REGION_NAME));
+    });
+
+    // Wait until all expiration tasks are executed.
+    asList(server1, server2).forEach(vm -> vm.invoke(() -> {
+      PartitionedRegionDataStore dataStore =
+          ((PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME)).getDataStore();
+      Set<BucketRegion> bucketRegions = dataStore.getAllLocalBucketRegions();
+      bucketRegions.forEach(bucketRegion -> await()
+          .untilAsserted(() -> assertThat(bucketRegion.entryExpiryTasks.isEmpty()).isTrue()));
+    }));
+
+    // At this point the entries should be either invalidated or destroyed (expiration tasks ran).
+    asList(accessor, server1, server2).forEach(vm -> vm.invoke(() -> {
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      IntStream.range(0, entries).forEach(i -> {
+        String key = String.valueOf(i);
+        assertThat(region.get(key)).isNull();
+      });
+    }));
+
+    // Assert Region Buckets are consistent.
+    accessor.invoke(this::assertRegionBucketsConsistency);
+  }
+
+  /**
+   * The test does the following (clear coordinator and region type are parametrized):
+   * - Populates the Partition Region (entries have expiration).
+   * - Verifies that the entries are synchronized on all members.
+   * - Sets the {@link MemberKiller} as a {@link CacheWriter} to stop a non-coordinator VM while the
+   * clear is in progress (the member has primary buckets, though, so participates on
+   * the clear operation).
+   * - Clears the Partition Region (at this point the non-coordinator is restarted).
+   * - Asserts that, after the clear is finished:
+   * . No expiration tasks were executed on the non-restarted members.
+   * . All expiration tasks were cancelled on the non-restarted members.
+   * . Map of expiry tasks per bucket is empty on the non-restarted members.
+   * . All expiration tasks were executed and all expired on the restarted members.
+   * . The Partition Region is empty and buckets are consistent across all members.
+   */
+  @Test
+  @Parameters(method = "vmsAndRegionTypes")
+  @TestCaseName("[{index}] {method}(Coordinator:{0}, RegionType:{1})")
+  public void clearShouldSucceedAndRemoveRegisteredExpirationTasksWhenNonCoordinatorMemberIsBounced(
+      TestVM coordinatorVM, RegionShortcut regionShortcut) throws Exception {
+    final int entries = 500;
+    // To avoid partition offline exception without redundancy.
+
+    if (regionShortcut == PARTITION_PERSISTENT) {
+      regionShortcut = PARTITION_REDUNDANT_PERSISTENT;
+    } else if (regionShortcut == PARTITION_PERSISTENT_OVERFLOW) {
+      regionShortcut = PARTITION_REDUNDANT_PERSISTENT_OVERFLOW;
+    }
+
+    final RegionShortcut rs = regionShortcut;
+    ExpirationAttributes expirationAttributes = new ExpirationAttributes(EXPIRATION_TIME, DESTROY);
+    parametrizedSetup(regionShortcut, expirationAttributes);
+    registerVMKillerAsCacheWriter(Collections.singletonList(server2));
+    populateRegion(accessor, entries, asList(accessor, server1, server2));
+
+    // Clear the region.
+    getVM(coordinatorVM.vmNumber).invoke(() -> doClear());
+
+    // Wait for member to get back online and assign buckets.
+    server2.invoke(() -> {
+      cacheRule.createCache();
+      initDataStore(rs, expirationAttributes);
+      await().untilAsserted(
+          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
+      PartitionRegionHelper.assignBucketsToPartitions(cacheRule.getCache().getRegion(REGION_NAME));
+    });
+
+    // Assert all expiration tasks were cancelled and none were executed (surviving members).
+    server1.invoke(() -> {
+      PartitionedRegionDataStore dataStore =
+          ((PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME)).getDataStore();
+      Set<BucketRegion> bucketRegions = dataStore.getAllLocalBucketRegions();
+      bucketRegions
+          .forEach(bucketRegion -> assertThat(bucketRegion.entryExpiryTasks.isEmpty()).isTrue());
+
+      ExpirationListener listener = (ExpirationListener) EntryExpiryTask.expiryTaskListener;
+      assertThat(listener.tasksRan.get()).isEqualTo(0);
+      assertThat(listener.tasksCanceled.get()).isEqualTo(listener.tasksScheduled.get());
+    });
+
+    // Assert all expiration tasks were expired as the region is empty (restarted member).
+    server2.invoke(() -> {
+      PartitionedRegionDataStore dataStore =
+          ((PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME)).getDataStore();
+      Set<BucketRegion> bucketRegions = dataStore.getAllLocalBucketRegions();
+
+      // During restart, the member loads the region from disk and automatically registers
+      // expiration tasks for each entry. After GII, however, the region is empty due to the
+      // clear operation and the tasks will just expire as there are no entries.
+      bucketRegions.forEach(bucketRegion -> await()
+          .untilAsserted(() -> assertThat(bucketRegion.entryExpiryTasks.isEmpty()).isTrue()));
+
+      ExpirationListener listener = (ExpirationListener) EntryExpiryTask.expiryTaskListener;
+      assertThat(listener.tasksExpired.get()).isEqualTo(listener.tasksRan.get());
+    });
+
+    // Assert Region Buckets are consistent and region is empty,
+    // accessor.invoke(this::assertRegionBucketsConsistency);
+    assertRegionIsEmpty(asList(accessor, server1, server2));
+  }
+
+  /**
+   * Tracks expiration tasks lifecycle.
+   */
+  public static class ExpirationListener implements ExpiryTask.ExpiryTaskListener {
+    final AtomicInteger tasksRan = new AtomicInteger(0);
+    final AtomicInteger tasksExpired = new AtomicInteger(0);
+    final AtomicInteger tasksCanceled = new AtomicInteger(0);
+    final AtomicInteger tasksScheduled = new AtomicInteger(0);
+
+    @Override
+    public void afterSchedule(ExpiryTask et) {
+      tasksScheduled.incrementAndGet();
+    }
+
+    @Override
+    public void afterTaskRan(ExpiryTask et) {
+      tasksRan.incrementAndGet();
+    }
+
+    @Override
+    public void afterReschedule(ExpiryTask et) {}
+
+    @Override
+    public void afterExpire(ExpiryTask et) {
+      tasksExpired.incrementAndGet();
+    }
+
+    @Override
+    public void afterCancel(ExpiryTask et) {
+      tasksCanceled.incrementAndGet();
+    }
+  }
+
+  /**
+   * Shutdowns a member while the clear operation is in progress.
+   * The writer is only installed on the member the test wants to shutdown, doesn't matter whether
+   * it's the clear coordinator or another member holding primary buckets.
+   */
+  public static class MemberKiller extends CacheWriterAdapter<String, String> {
+
+    @Override
+    public synchronized void beforeRegionClear(RegionEvent<String, String> event)
+        throws CacheWriterException {
+      InternalDistributedSystem.getConnectedInstance().stopReconnectingNoDisconnect();
+      MembershipManagerHelper.crashDistributedSystem(
+          InternalDistributedSystem.getConnectedInstance());
+      await().untilAsserted(
+          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNull());
+    }
+  }
+}
diff --git a/geode-core/src/integrationTest/java/org/apache/geode/cache/query/partitioned/PRClearIntegrationTest.java b/geode-core/src/integrationTest/java/org/apache/geode/cache/query/partitioned/PRClearIntegrationTest.java
new file mode 100644
index 0000000..894db1b
--- /dev/null
+++ b/geode-core/src/integrationTest/java/org/apache/geode/cache/query/partitioned/PRClearIntegrationTest.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.geode.cache.query.partitioned;
+
+
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.IntStream;
+
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.EntryNotFoundException;
+import org.apache.geode.cache.Region;
+import org.apache.geode.internal.cache.InternalCache;
+import org.apache.geode.test.junit.rules.ExecutorServiceRule;
+import org.apache.geode.test.junit.rules.ServerStarterRule;
+
+public class PRClearIntegrationTest {
+
+  @Rule
+  public ServerStarterRule server = new ServerStarterRule().withAutoStart();
+
+  @Rule
+  public ExecutorServiceRule executor = new ExecutorServiceRule();
+
+  @Test
+  public void doesNotHangWhenClearWithConcurrentPutsAndInvalidates() throws Exception {
+    InternalCache cache = server.getCache();
+    Region<Object, Object> region = server.createPartitionRegion("regionA", f -> {
+    }, f -> f.setTotalNumBuckets(1));
+    cache.getQueryService().createIndex("indexA", "r", "/regionA r");
+    region.put(0, "value0");
+
+    CompletableFuture<Void> put = executor.runAsync(() -> {
+      Thread.currentThread().setName("put-Thread");
+      IntStream.range(0, 10).forEach(i -> region.put(i, "value" + i));
+    });
+
+    CompletableFuture<Void> invalidate = executor.runAsync(() -> {
+      Thread.currentThread().setName("invalidate-Thread");
+      IntStream.range(0, 10).forEach(i -> {
+        try {
+          region.invalidate(i);
+        } catch (EntryNotFoundException e) {
+          // ignore
+        }
+      });
+    });
+
+    CompletableFuture<Void> clear = executor.runAsync(() -> {
+      Thread.currentThread().setName("Clear-Thread");
+      IntStream.range(0, 10).forEach(i -> region.clear());
+    });
+
+    put.get(5, TimeUnit.SECONDS);
+    invalidate.get(5, TimeUnit.SECONDS);
+    clear.get(5, TimeUnit.SECONDS);
+  }
+}
diff --git a/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/PartitionedRegionIntegrationTest.java b/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/PartitionedRegionIntegrationTest.java
index 818a855..933bc39 100644
--- a/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/PartitionedRegionIntegrationTest.java
+++ b/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/PartitionedRegionIntegrationTest.java
@@ -16,15 +16,24 @@
 package org.apache.geode.internal.cache;
 
 import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
 
+import java.util.List;
 import java.util.concurrent.ScheduledExecutorService;
 
 import org.junit.Rule;
 import org.junit.Test;
 
+import org.apache.geode.cache.CacheEvent;
 import org.apache.geode.cache.EvictionAction;
 import org.apache.geode.cache.EvictionAttributes;
+import org.apache.geode.cache.Operation;
+import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache30.TestCacheListener;
 import org.apache.geode.test.junit.rules.ServerStarterRule;
 
 public class PartitionedRegionIntegrationTest {
@@ -55,4 +64,40 @@ public class PartitionedRegionIntegrationTest {
     ScheduledExecutorService bucketSorter = region.getBucketSorter();
     assertThat(bucketSorter).isNull();
   }
+
+  @Test
+  public void prClearWithDataInvokesCacheListenerAfterClear() {
+    TestCacheListener prCacheListener = new TestCacheListener() {};
+    TestCacheListener spyPRCacheListener = spy(prCacheListener);
+
+    Region region = server.createPartitionRegion("PR1",
+        f -> f.addCacheListener(spyPRCacheListener), f -> f.setTotalNumBuckets(2));
+    region.put("key1", "value2");
+    region.put("key2", "value2");
+    spyPRCacheListener.enableEventHistory();
+
+    region.clear();
+
+    verify(spyPRCacheListener, times(1)).afterRegionClear(any());
+    List cacheEvents = spyPRCacheListener.getEventHistory();
+    assertThat(cacheEvents.size()).isEqualTo(1);
+    assertThat(((CacheEvent) cacheEvents.get(0)).getOperation()).isEqualTo(Operation.REGION_CLEAR);
+  }
+
+  @Test
+  public void prClearWithoutDataInvokesCacheListenerAfterClear() {
+    TestCacheListener prCacheListener = new TestCacheListener() {};
+    TestCacheListener spyPRCacheListener = spy(prCacheListener);
+
+    Region region = server.createPartitionRegion("PR1",
+        f -> f.addCacheListener(spyPRCacheListener), f -> f.setTotalNumBuckets(2));
+    spyPRCacheListener.enableEventHistory();
+
+    region.clear();
+
+    verify(spyPRCacheListener, times(1)).afterRegionClear(any());
+    List cacheEvents = spyPRCacheListener.getEventHistory();
+    assertThat(cacheEvents.size()).isEqualTo(1);
+    assertThat(((CacheEvent) cacheEvents.get(0)).getOperation()).isEqualTo(Operation.REGION_CLEAR);
+  }
 }
diff --git a/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/PartitionedRegionSingleNodeOperationsJUnitTest.java b/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/PartitionedRegionSingleNodeOperationsJUnitTest.java
index b37945b..4f36060 100644
--- a/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/PartitionedRegionSingleNodeOperationsJUnitTest.java
+++ b/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/PartitionedRegionSingleNodeOperationsJUnitTest.java
@@ -25,7 +25,6 @@ import static org.junit.Assert.fail;
 
 import java.util.Arrays;
 import java.util.Collections;
-import java.util.HashMap;
 import java.util.Iterator;
 import java.util.NoSuchElementException;
 import java.util.Set;
@@ -1298,71 +1297,6 @@ public class PartitionedRegionSingleNodeOperationsJUnitTest {
     }
   }
 
-  @Test
-  public void test023UnsupportedOps() throws Exception {
-    Region pr = null;
-    try {
-      pr = PartitionedRegionTestHelper.createPartitionedRegion("testUnsupportedOps",
-          String.valueOf(200), 0);
-
-      pr.put(new Integer(1), "one");
-      pr.put(new Integer(2), "two");
-      pr.put(new Integer(3), "three");
-      pr.getEntry("key");
-
-      try {
-        pr.clear();
-        fail(
-            "PartitionedRegionSingleNodeOperationTest:testUnSupportedOps() operation failed on a blank PartitionedRegion");
-      } catch (UnsupportedOperationException expected) {
-      }
-
-      // try {
-      // pr.entries(true);
-      // fail();
-      // }
-      // catch (UnsupportedOperationException expected) {
-      // }
-
-      // try {
-      // pr.entrySet(true);
-      // fail();
-      // }
-      // catch (UnsupportedOperationException expected) {
-      // }
-
-      try {
-        HashMap data = new HashMap();
-        data.put("foo", "bar");
-        data.put("bing", "bam");
-        data.put("supper", "hero");
-        pr.putAll(data);
-        // fail("testPutAll() does NOT throw UnsupportedOperationException");
-      } catch (UnsupportedOperationException onse) {
-      }
-
-
-      // try {
-      // pr.values();
-      // fail("testValues() does NOT throw UnsupportedOperationException");
-      // }
-      // catch (UnsupportedOperationException expected) {
-      // }
-
-
-      try {
-        pr.containsValue("foo");
-      } catch (UnsupportedOperationException ex) {
-        fail("PartitionedRegionSingleNodeOperationTest:testContainsValue() operation failed");
-      }
-
-    } finally {
-      if (pr != null) {
-        pr.destroyRegion();
-      }
-    }
-  }
-
   /**
    * This method validates size operations. It verifies that it returns correct size of the
    * PartitionedRegion.
diff --git a/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt b/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
index 104031d..cb4e6b3 100644
--- a/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
+++ b/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
@@ -1075,6 +1075,14 @@ org/apache/geode/internal/cache/PartitionRegionConfig,2
 fromData,207
 toData,178
 
+org/apache/geode/internal/cache/PartitionedRegionClearMessage,2
+fromData,40
+toData,36
+
+org/apache/geode/internal/cache/PartitionedRegionClearMessage$PartitionedRegionClearReplyMessage,2
+fromData,32
+toData,28
+
 org/apache/geode/internal/cache/PoolFactoryImpl$PoolAttributes,2
 fromData,161
 toData,161
@@ -1376,6 +1384,14 @@ org/apache/geode/internal/cache/partitioned/BucketSizeMessage$BucketSizeReplyMes
 fromData,27
 toData,27
 
+org/apache/geode/internal/cache/partitioned/ClearPRMessage,2
+fromData,30
+toData,44
+
+org/apache/geode/internal/cache/partitioned/ClearPRMessage$ClearReplyMessage,2
+fromData,17
+toData,17
+
 org/apache/geode/internal/cache/partitioned/ColocatedRegionDetails,2
 fromData,81
 toData,133
diff --git a/geode-junit/src/main/java/org/apache/geode/cache/query/data/City.java b/geode-core/src/main/java/org/apache/geode/cache/PartitionedRegionPartialClearException.java
similarity index 51%
copy from geode-junit/src/main/java/org/apache/geode/cache/query/data/City.java
copy to geode-core/src/main/java/org/apache/geode/cache/PartitionedRegionPartialClearException.java
index e7e7b39..1ddb301 100644
--- a/geode-junit/src/main/java/org/apache/geode/cache/query/data/City.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/PartitionedRegionPartialClearException.java
@@ -12,41 +12,26 @@
  * or implied. See the License for the specific language governing permissions and limitations under
  * the License.
  */
+package org.apache.geode.cache;
 
-/*
- * City.java
- *
- * Created on September 30, 2005, 6:20 PM
+/**
+ * Indicates a failure to perform a distributed clear operation on a Partitioned Region
+ * after multiple attempts. The clear may not have been successfully applied on some of
+ * the members hosting the region.
  */
+public class PartitionedRegionPartialClearException extends CacheRuntimeException {
 
-package org.apache.geode.cache.query.data;
-
-import java.io.Serializable;
-
-public class City implements Serializable {
-  public String name;
-  public int zip;
+  public PartitionedRegionPartialClearException() {}
 
-  /** Creates a new instance of City */
-  public City(String name, int zip) {
-    this.name = name;
-    this.zip = zip;
-  }// end of constructor 1
-
-  public City(int i) {
-    String arr1[] = {"MUMBAI", "PUNE", "GANDHINAGAR", "CHANDIGARH"};
-    /* this is for the test to have 50% of the objects belonging to one city */
-    this.name = arr1[i % 2];
-    this.zip = 425125 + i;
-  }// end of constructor 2
-
-  ////////////////////////////
+  public PartitionedRegionPartialClearException(String msg) {
+    super(msg);
+  }
 
-  public String getName() {
-    return name;
+  public PartitionedRegionPartialClearException(String msg, Throwable cause) {
+    super(msg, cause);
   }
 
-  public int getZip() {
-    return zip;
+  public PartitionedRegionPartialClearException(Throwable cause) {
+    super(cause);
   }
-}// end of class
+}
diff --git a/geode-core/src/main/java/org/apache/geode/cache/Region.java b/geode-core/src/main/java/org/apache/geode/cache/Region.java
index b6ba670..1961e81 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/Region.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/Region.java
@@ -1304,7 +1304,11 @@ public interface Region<K, V> extends ConcurrentMap<K, V> {
    * @see java.util.Map#clear()
    * @see CacheListener#afterRegionClear
    * @see CacheWriter#beforeRegionClear
-   * @throws UnsupportedOperationException If the region is a partitioned region
+   * @throws PartitionedRegionPartialClearException when data is partially cleared on partitioned
+   *         region. It is caller responsibility to handle the partial data clear either by retrying
+   *         the clear operation or continue working with the partially cleared partitioned region.
+   * @throws UnsupportedOperationException when data was not cleared because one or more
+   *         of the member servers' version was too old to understand the clear message.
    */
   @Override
   void clear();
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQueryService.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQueryService.java
index 2930a3a..2895aaf 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQueryService.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQueryService.java
@@ -213,7 +213,7 @@ public class DefaultQueryService implements InternalQueryService {
       throw new UnsupportedOperationException(
           "Index creation on the server is not supported from the client.");
     }
-    PartitionedIndex parIndex = null;
+
     if (region == null) {
       region = getRegionFromPath(imports, fromClause);
     }
@@ -241,6 +241,7 @@ public class DefaultQueryService implements InternalQueryService {
       }
     }
     if (region instanceof PartitionedRegion) {
+      PartitionedIndex parIndex = null;
       try {
         parIndex = (PartitionedIndex) ((PartitionedRegion) region).createIndex(false, indexType,
             indexName, indexedExpression, fromClause, imports, loadEntries);
@@ -256,7 +257,6 @@ public class DefaultQueryService implements InternalQueryService {
       return parIndex;
 
     } else {
-
       IndexManager indexManager = IndexUtils.getIndexManager(this.cache, region, true);
       Index index = indexManager.createIndex(indexName, indexType, indexedExpression, fromClause,
           imports, null, null, loadEntries);
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/IndexManager.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/IndexManager.java
index 5b2867b..0501603 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/IndexManager.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/IndexManager.java
@@ -275,6 +275,8 @@ public class IndexManager {
     }
 
     try {
+      ((LocalRegion) this.region).lockRVVForBulkOp();
+
       String projectionAttributes = "*"; // for now this is the only option
 
       if (getIndex(indexName) != null) {
@@ -425,7 +427,7 @@ public class IndexManager {
     } finally {
       this.cache.setPdxReadSerializedOverride(oldReadSerialized);
       ((TXManagerImpl) this.cache.getCacheTransactionManager()).unpauseTransaction(tx);
-
+      ((LocalRegion) this.region).unlockRVVForBulkOp();
     }
   }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java b/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java
index 504e7d1..f0658a6 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java
@@ -235,6 +235,7 @@ import org.apache.geode.internal.cache.MemberFunctionStreamingMessage;
 import org.apache.geode.internal.cache.Node;
 import org.apache.geode.internal.cache.PRQueryProcessor;
 import org.apache.geode.internal.cache.PartitionRegionConfig;
+import org.apache.geode.internal.cache.PartitionedRegionClearMessage;
 import org.apache.geode.internal.cache.PreferBytesCachedDeserializable;
 import org.apache.geode.internal.cache.RegionEventImpl;
 import org.apache.geode.internal.cache.ReleaseClearLockMessage;
@@ -289,6 +290,7 @@ import org.apache.geode.internal.cache.partitioned.BucketCountLoadProbe;
 import org.apache.geode.internal.cache.partitioned.BucketProfileUpdateMessage;
 import org.apache.geode.internal.cache.partitioned.BucketSizeMessage;
 import org.apache.geode.internal.cache.partitioned.BucketSizeMessage.BucketSizeReplyMessage;
+import org.apache.geode.internal.cache.partitioned.ClearPRMessage;
 import org.apache.geode.internal.cache.partitioned.ContainsKeyValueMessage;
 import org.apache.geode.internal.cache.partitioned.ContainsKeyValueMessage.ContainsKeyValueReplyMessage;
 import org.apache.geode.internal.cache.partitioned.CreateBucketMessage;
@@ -685,6 +687,10 @@ public class DSFIDFactory implements DataSerializableFixedID {
     serializer.registerDSFID(PR_DUMP_B2N_REPLY_MESSAGE, DumpB2NReplyMessage.class);
     serializer.registerDSFID(DESTROY_PARTITIONED_REGION_MESSAGE,
         DestroyPartitionedRegionMessage.class);
+    serializer.registerDSFID(CLEAR_PARTITIONED_REGION_MESSAGE,
+        PartitionedRegionClearMessage.class);
+    serializer.registerDSFID(CLEAR_PARTITIONED_REGION_REPLY_MESSAGE,
+        PartitionedRegionClearMessage.PartitionedRegionClearReplyMessage.class);
     serializer.registerDSFID(INVALIDATE_PARTITIONED_REGION_MESSAGE,
         InvalidatePartitionedRegionMessage.class);
     serializer.registerDSFID(COMMIT_PROCESS_QUERY_MESSAGE, CommitProcessQueryMessage.class);
@@ -985,6 +991,8 @@ public class DSFIDFactory implements DataSerializableFixedID {
     serializer.registerDSFID(GATEWAY_SENDER_QUEUE_ENTRY_SYNCHRONIZATION_ENTRY,
         GatewaySenderQueueEntrySynchronizationOperation.GatewaySenderQueueEntrySynchronizationEntry.class);
     serializer.registerDSFID(ABORT_BACKUP_REQUEST, AbortBackupRequest.class);
+    serializer.registerDSFID(PR_CLEAR_MESSAGE, ClearPRMessage.class);
+    serializer.registerDSFID(PR_CLEAR_REPLY_MESSAGE, ClearPRMessage.ClearReplyMessage.class);
     serializer.registerDSFID(HOST_AND_PORT, HostAndPort.class);
     serializer.registerDSFID(DISTRIBUTED_PING_MESSAGE, DistributedPingMessage.class);
   }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegion.java
index 3f09662..10055a4 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegion.java
@@ -113,6 +113,7 @@ public abstract class AbstractRegion implements InternalRegion, AttributesMutato
   private static final Logger logger = LogService.getLogger();
   private final ReentrantReadWriteLock readWriteLockForCacheLoader = new ReentrantReadWriteLock();
   private final ReentrantReadWriteLock readWriteLockForCacheWriter = new ReentrantReadWriteLock();
+  @VisibleForTesting
   protected final ConcurrentHashMap<RegionEntry, EntryExpiryTask> entryExpiryTasks =
       new ConcurrentHashMap<>();
   /**
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java
index 1993b85..4de9dcd 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java
@@ -344,7 +344,12 @@ public abstract class AbstractRegionMap extends BaseRegionMap
     if (lr != null && !(lr instanceof HARegion)) {
       CachePerfStats stats = lr.getCachePerfStats();
       if (stats != null) {
-        stats.incClearCount();
+        if (lr.isUsedForPartitionedRegionBucket()) {
+          stats.incBucketClearCount();
+        } else {
+          stats.incRegionClearCount();
+        }
+
       }
     }
   }
@@ -1278,16 +1283,19 @@ public abstract class AbstractRegionMap extends BaseRegionMap
     DiskRegion dr = owner.getDiskRegion();
     boolean ownerIsInitialized = owner.isInitialized();
 
-    // Fix for Bug #44431. We do NOT want to update the region and wait
-    // later for index INIT as region.clear() can cause inconsistency if
-    // happened in parallel as it also does index INIT.
-    IndexManager oqlIndexManager = owner.getIndexManager();
-    if (oqlIndexManager != null) {
-      oqlIndexManager.waitForIndexInit();
-    }
+    // lock before waitForIndexInit so that we should wait
+    // till a concurrent clear to finish
     lockForCacheModification(owner, event);
-    final boolean locked = owner.lockWhenRegionIsInitializing();
+    boolean locked = false;
     try {
+      // Fix for Bug #44431. We do NOT want to update the region and wait
+      // later for index INIT as region.clear() can cause inconsistency if
+      // happened in parallel as it also does index INIT.
+      IndexManager oqlIndexManager = owner.getIndexManager();
+      if (oqlIndexManager != null) {
+        oqlIndexManager.waitForIndexInit();
+      }
+      locked = owner.lockWhenRegionIsInitializing();
       try {
         try {
           if (forceNewEntry || forceCallbacks) {
@@ -1656,7 +1664,6 @@ public abstract class AbstractRegionMap extends BaseRegionMap
       }
       releaseCacheModificationLock(owner, event);
     }
-
   }
 
   /**
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java
index e4045c3..6cba754 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java
@@ -1622,7 +1622,7 @@ public class BucketAdvisor extends CacheDistributionAdvisor {
   /**
    * Returns true if the a primary is known.
    */
-  private boolean hasPrimary() {
+  protected boolean hasPrimary() {
     final byte primaryState = this.primaryState;
     return primaryState == OTHER_PRIMARY_NOT_HOSTING || primaryState == OTHER_PRIMARY_HOSTING
         || primaryState == IS_PRIMARY_HOSTING;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
index af5ebd0..49f6aad 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
@@ -560,6 +560,38 @@ public class BucketRegion extends DistributedRegion implements Bucket {
     }
   }
 
+  /**
+   * this starts with a primary bucket, clears it, and distribute a DistributedClearOperation
+   * .OperationType.OP_CLEAR operation to other members.
+   * If this member is not locked yet, lock it and send OP_LOCK_FOR_CLEAR to others first.
+   */
+  @Override
+  public void cmnClearRegion(RegionEventImpl regionEvent, boolean cacheWrite, boolean useRVV) {
+    if (!getBucketAdvisor().isPrimary()) {
+      if (logger.isDebugEnabled()) {
+        logger.debug("Not primary bucket when doing clear, do nothing");
+      }
+      return;
+    }
+
+    // get rvvLock
+    Set<InternalDistributedMember> participants =
+        getCacheDistributionAdvisor().adviseInvalidateRegion();
+    boolean isLockedAlready = this.partitionedRegion.getPartitionedRegionClear()
+        .isLockedForListenerAndClientNotification();
+
+    try {
+      obtainWriteLocksForClear(regionEvent, participants, isLockedAlready);
+      // no need to dominate my own rvv.
+      // Clear is on going here, there won't be GII for this member
+      clearRegionLocally(regionEvent, cacheWrite, null);
+      distributeClearOperation(regionEvent, null, participants);
+
+      // TODO: call reindexUserDataRegion if there're lucene indexes
+    } finally {
+      releaseWriteLocksForClear(regionEvent, participants, isLockedAlready);
+    }
+  }
 
   long generateTailKey() {
     long key = eventSeqNum.addAndGet(partitionedRegion.getTotalNumberOfBuckets());
@@ -2110,8 +2142,8 @@ public class BucketRegion extends DistributedRegion implements Bucket {
       // counters to 0.
       oldMemValue = bytesInMemory.getAndSet(0);
     } else {
-      throw new InternalGemFireError(
-          "Trying to clear a bucket region that was not destroyed or in initialization.");
+      // BucketRegion's clear is supported now
+      oldMemValue = bytesInMemory.getAndSet(0);
     }
     if (oldMemValue != BUCKET_DESTROYED) {
       partitionedRegion.getPrStats().incDataStoreEntryCount(-sizeBeforeClear);
@@ -2483,4 +2515,10 @@ public class BucketRegion extends DistributedRegion implements Bucket {
   void checkSameSenderIdsAvailableOnAllNodes() {
     // nothing needed on a bucket region
   }
+
+  @Override
+  protected void basicClear(RegionEventImpl regionEvent) {
+    basicClear(regionEvent, false);
+  }
+
 }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java
index 5bbca52..25fdca2 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java
@@ -118,7 +118,11 @@ public class CachePerfStats {
   static final int indexUpdateInProgressId;
   static final int indexUpdateCompletedId;
   static final int indexUpdateTimeId;
-  static final int clearsId;
+  static final int bucketClearsId;
+  static final int regionClearsId;
+  static final int partitionedRegionClearLocalDurationId;
+  static final int partitionedRegionClearTotalDurationId;
+
   private static final int indexInitializationInProgressId;
   private static final int indexInitializationCompletedId;
   private static final int indexInitializationTimeId;
@@ -286,7 +290,14 @@ public class CachePerfStats {
         "Current number of regions configured for reliablity that are missing required roles with Limited access";
     final String reliableRegionsMissingNoAccessDesc =
         "Current number of regions configured for reliablity that are missing required roles with No access";
-    final String clearsDesc = "The total number of times a clear has been done on this cache.";
+    final String regionClearsDesc =
+        "The total number of times a clear has been done on this cache.";
+    final String bucketClearsDesc =
+        "The total number of times a clear has been done on this region and it's bucket regions";
+    final String partitionedRegionClearLocalDurationDesc =
+        "The time in nanoseconds partitioned region clear has been running for the region on this member";
+    final String partitionedRegionClearTotalDurationDesc =
+        "The time in nanoseconds partitioned region clear has been running for the region with this member as coordinator.";
     final String metaDataRefreshCountDesc =
         "Total number of times the meta data is refreshed due to hopping observed.";
     final String conflatedEventsDesc =
@@ -465,7 +476,12 @@ public class CachePerfStats {
             f.createIntCounter("retries",
                 "Number of times a concurrent destroy followed by a create has caused an entry operation to need to retry.",
                 "operations"),
-            f.createLongCounter("clears", clearsDesc, "operations"),
+            f.createLongCounter("regionClears", regionClearsDesc, "operations"),
+            f.createLongCounter("bucketClears", bucketClearsDesc, "operations"),
+            f.createLongCounter("partitionedRegionClearLocalDuration",
+                partitionedRegionClearLocalDurationDesc, "nanoseconds"),
+            f.createLongCounter("partitionedRegionClearTotalDuration",
+                partitionedRegionClearTotalDurationDesc, "nanoseconds"),
             f.createIntGauge("diskTasksWaiting",
                 "Current number of disk tasks (oplog compactions, asynchronous recoveries, etc) that are waiting for a thread to run the operation",
                 "operations"),
@@ -608,7 +624,10 @@ public class CachePerfStats {
     eventsQueuedId = type.nameToId("eventsQueued");
 
     retriesId = type.nameToId("retries");
-    clearsId = type.nameToId("clears");
+    regionClearsId = type.nameToId("regionClears");
+    bucketClearsId = type.nameToId("bucketClears");
+    partitionedRegionClearLocalDurationId = type.nameToId("partitionedRegionClearLocalDuration");
+    partitionedRegionClearTotalDurationId = type.nameToId("partitionedRegionClearTotalDuration");
 
     diskTasksWaitingId = type.nameToId("diskTasksWaiting");
     evictorJobsStartedId = type.nameToId("evictorJobsStarted");
@@ -1394,12 +1413,36 @@ public class CachePerfStats {
     };
   }
 
-  public long getClearCount() {
-    return stats.getLong(clearsId);
+  public long getRegionClearCount() {
+    return stats.getLong(regionClearsId);
+  }
+
+  public long getBucketClearCount() {
+    return stats.getLong(bucketClearsId);
+  }
+
+  public long getPartitionedRegionClearLocalDuration() {
+    return stats.getLong(partitionedRegionClearLocalDurationId);
+  }
+
+  public long getPartitionedRegionClearTotalDuration() {
+    return stats.getLong(partitionedRegionClearTotalDurationId);
+  }
+
+  public void incRegionClearCount() {
+    stats.incLong(regionClearsId, 1L);
+  }
+
+  public void incBucketClearCount() {
+    stats.incLong(bucketClearsId, 1L);
+  }
+
+  public void incPartitionedRegionClearLocalDuration(long durationNanos) {
+    stats.incLong(partitionedRegionClearLocalDurationId, durationNanos);
   }
 
-  public void incClearCount() {
-    stats.incLong(clearsId, 1L);
+  public void incPartitionedRegionClearTotalDuration(long durationNanos) {
+    stats.incLong(partitionedRegionClearTotalDurationId, durationNanos);
   }
 
   public long getConflatedEventsCount() {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java b/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java
index 4e30d64..f7c5c7f 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java
@@ -279,15 +279,11 @@ public class ColocationHelper {
   }
 
   /**
-   * An utility method to retrieve all partitioned regions(excluding self) in a colocation chain<br>
+   * A utility method to retrieve all partitioned regions(excluding self) in a colocation chain<br>
    * <p>
-   * For example, shipmentPR is colocated with orderPR and orderPR is colocated with customerPR <br>
-   * <br>
-   * getAllColocationRegions(customerPR) --> List{orderPR, shipmentPR}<br>
-   * getAllColocationRegions(orderPR) --> List{customerPR, shipmentPR}<br>
-   * getAllColocationRegions(shipmentPR) --> List{customerPR, orderPR}<br>
    *
-   * @return List of all partitioned regions (excluding self) in a colocated chain
+   * @return Map<String, PartitionedRegion> of all partitioned regions (excluding self) in a
+   *         colocated chain. Keys are the full paths of the PartitionedRegion values.
    * @since GemFire 5.8Beta
    */
   public static Map<String, PartitionedRegion> getAllColocationRegions(
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedClearOperation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedClearOperation.java
index 4396581..4809291 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedClearOperation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedClearOperation.java
@@ -163,6 +163,10 @@ public class DistributedClearOperation extends DistributedCacheOperation {
   }
 
 
+  /**
+   * this message is to operate on the BucketRegion level, used by the primary member to distribute
+   * clear message to secondary buckets
+   */
   public static class ClearRegionMessage extends CacheOperationMessage {
 
     protected EventID eventID;
@@ -186,6 +190,10 @@ public class DistributedClearOperation extends DistributedCacheOperation {
       return OperationExecutors.HIGH_PRIORITY_EXECUTOR;
     }
 
+    public OperationType getOperationType() {
+      return clearOp;
+    }
+
     @Override
     protected InternalCacheEvent createEvent(DistributedRegion rgn) throws EntryNotFoundException {
       RegionEventImpl event = createRegionEvent(rgn);
@@ -207,17 +215,19 @@ public class DistributedClearOperation extends DistributedCacheOperation {
     protected boolean operateOnRegion(CacheEvent event, ClusterDistributionManager dm)
         throws EntryNotFoundException {
 
-      DistributedRegion region = (DistributedRegion) event.getRegion();
+      LocalRegion region = (LocalRegion) event.getRegion();
       switch (this.clearOp) {
         case OP_CLEAR:
           region.clearRegionLocally((RegionEventImpl) event, false, this.rvv);
-          region.notifyBridgeClients((RegionEventImpl) event);
+          region.notifyBridgeClients(event);
           this.appliedOperation = true;
           break;
         case OP_LOCK_FOR_CLEAR:
-          if (region.getDataPolicy().withStorage()) {
-            DistributedClearOperation.regionLocked(this.getSender(), region.getFullPath(), region);
-            region.lockLocallyForClear(dm, this.getSender(), event);
+          if (region.getDataPolicy().withStorage() && region instanceof DistributedRegion) {
+            DistributedRegion distributedRegion = (DistributedRegion) region;
+            DistributedClearOperation.regionLocked(this.getSender(), region.getFullPath(),
+                distributedRegion);
+            distributedRegion.lockLocallyForClear(dm, this.getSender(), event);
           }
           this.appliedOperation = true;
           break;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
index e7050e6..3d6df11 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
@@ -192,10 +192,6 @@ public class DistributedRegion extends LocalRegion implements InternalDistribute
   @MutableForTesting
   public static boolean ignoreReconnect = false;
 
-  /**
-   * Lock to prevent multiple threads on this member from performing a clear at the same time.
-   */
-  private final Object clearLock = new Object();
   private final ReentrantReadWriteLock failedInitialImageLock = new ReentrantReadWriteLock(true);
 
   @MakeNotStatic
@@ -933,11 +929,6 @@ public class DistributedRegion extends LocalRegion implements InternalDistribute
     }
   }
 
-  private void lockCheckReadiness() {
-    cache.getCancelCriterion().checkCancelInProgress(null);
-    checkReadiness();
-  }
-
   @Override
   Object validatedDestroy(Object key, EntryEventImpl event)
       throws TimeoutException, EntryNotFoundException, CacheWriterException {
@@ -2013,6 +2004,10 @@ public class DistributedRegion extends LocalRegion implements InternalDistribute
     super.basicClear(regionEvent, cacheWrite);
   }
 
+  void distributeClearOperation(RegionEventImpl regionEvent, RegionVersionVector rvv,
+      Set<InternalDistributedMember> participants) {
+    DistributedClearOperation.clear(regionEvent, rvv, participants);
+  }
 
   @Override
   void cmnClearRegion(RegionEventImpl regionEvent, boolean cacheWrite, boolean useRVV) {
@@ -2032,13 +2027,13 @@ public class DistributedRegion extends LocalRegion implements InternalDistribute
               getCacheDistributionAdvisor().adviseInvalidateRegion();
           // pause all generation of versions and flush from the other members to this one
           try {
-            obtainWriteLocksForClear(regionEvent, participants);
+            obtainWriteLocksForClear(regionEvent, participants, false);
             clearRegionLocally(regionEvent, cacheWrite, null);
             if (!regionEvent.isOriginRemote() && regionEvent.getOperation().isDistributed()) {
-              DistributedClearOperation.clear(regionEvent, null, participants);
+              distributeClearOperation(regionEvent, null, participants);
             }
           } finally {
-            releaseWriteLocksForClear(regionEvent, participants);
+            releaseWriteLocksForClear(regionEvent, participants, false);
           }
         } finally {
           distributedUnlockForClear();
@@ -2048,7 +2043,7 @@ public class DistributedRegion extends LocalRegion implements InternalDistribute
             getCacheDistributionAdvisor().adviseInvalidateRegion();
         clearRegionLocally(regionEvent, cacheWrite, null);
         if (!regionEvent.isOriginRemote() && regionEvent.getOperation().isDistributed()) {
-          DistributedClearOperation.clear(regionEvent, null, participants);
+          distributeClearOperation(regionEvent, null, participants);
         }
       }
     }
@@ -2091,9 +2086,28 @@ public class DistributedRegion extends LocalRegion implements InternalDistribute
   /**
    * obtain locks preventing generation of new versions in other members
    */
-  private void obtainWriteLocksForClear(RegionEventImpl regionEvent,
+  protected void obtainWriteLocksForClear(RegionEventImpl regionEvent,
+      Set<InternalDistributedMember> participants, boolean localLockedAlready) {
+    if (!localLockedAlready) {
+      lockLocallyForClear(getDistributionManager(), getMyId(), regionEvent);
+    }
+    lockAndFlushClearToOthers(regionEvent, participants);
+  }
+
+  /**
+   * releases the locks obtained in obtainWriteLocksForClear
+   */
+  protected void releaseWriteLocksForClear(RegionEventImpl regionEvent,
+      Set<InternalDistributedMember> participants,
+      boolean localLockedAlready) {
+    if (!localLockedAlready) {
+      releaseLockLocallyForClear(regionEvent);
+    }
+    DistributedClearOperation.releaseLocks(regionEvent, participants);
+  }
+
+  void lockAndFlushClearToOthers(RegionEventImpl regionEvent,
       Set<InternalDistributedMember> participants) {
-    lockLocallyForClear(getDistributionManager(), getMyId(), regionEvent);
     DistributedClearOperation.lockAndFlushToOthers(regionEvent, participants);
   }
 
@@ -2128,19 +2142,16 @@ public class DistributedRegion extends LocalRegion implements InternalDistribute
     }
   }
 
-  /**
-   * releases the locks obtained in obtainWriteLocksForClear
-   */
-  private void releaseWriteLocksForClear(RegionEventImpl regionEvent,
-      Set<InternalDistributedMember> participants) {
-
+  protected void releaseLockLocallyForClear(RegionEventImpl regionEvent) {
     ARMLockTestHook armLockTestHook = getRegionMap().getARMLockTestHook();
     if (armLockTestHook != null) {
       armLockTestHook.beforeRelease(this, regionEvent);
     }
 
-    getVersionVector().unlockForClear(getMyId());
-    DistributedClearOperation.releaseLocks(regionEvent, participants);
+    RegionVersionVector rvv = getVersionVector();
+    if (rvv != null) {
+      rvv.unlockForClear(getMyId());
+    }
 
     if (armLockTestHook != null) {
       armLockTestHook.afterRelease(this, regionEvent);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/InternalRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/InternalRegion.java
index 432ecca..8d59aa1 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/InternalRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/InternalRegion.java
@@ -469,4 +469,7 @@ public interface InternalRegion extends Region, HasCachePerfStats, RegionEntryCo
   boolean isRegionCreateNotified();
 
   void setRegionCreateNotified(boolean notified);
+
+  void clearRegionLocally(RegionEventImpl regionEvent, boolean cacheWrite,
+      RegionVersionVector vector);
 }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
index 0842e2b..763314d 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
@@ -324,6 +324,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
    */
   private int txRefCount;
 
+
   private volatile boolean regionInvalid;
 
   /**
@@ -470,6 +471,11 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
   private final Lock clientMetaDataLock = new ReentrantLock();
 
   /**
+   * Lock to prevent multiple threads on this member from performing a clear at the same time.
+   */
+  protected final Object clearLock = new Object();
+
+  /**
    * Lock for updating the cache service profile for the region.
    */
   private final Lock cacheServiceProfileUpdateLock = new ReentrantLock();
@@ -2757,6 +2763,11 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
     checkRegionDestroyed(true);
   }
 
+  protected void lockCheckReadiness() {
+    cache.getCancelCriterion().checkCancelInProgress(null);
+    checkReadiness();
+  }
+
   /**
    * This method should be called when the caller cannot locate an entry and that condition is
    * unexpected. This will first double check the cache and region state before throwing an
@@ -2999,7 +3010,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
   /**
    * @since GemFire 5.7
    */
-  private void serverRegionClear(RegionEventImpl regionEvent) {
+  protected void serverRegionClear(RegionEventImpl regionEvent) {
     if (regionEvent.getOperation().isDistributed()) {
       ServerRegionProxy mySRP = getServerProxy();
       if (mySRP != null) {
@@ -3118,7 +3129,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
     return result;
   }
 
-  private void cacheWriteBeforeRegionClear(RegionEventImpl event)
+  void cacheWriteBeforeRegionClear(RegionEventImpl event)
       throws CacheWriterException, TimeoutException {
     // copy into local var to prevent race condition
     CacheWriter writer = basicGetWriter();
@@ -7966,7 +7977,8 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
     }
   }
 
-  private void cancelAllEntryExpiryTasks() {
+  @VisibleForTesting
+  void cancelAllEntryExpiryTasks() {
     // This method gets called during LocalRegion construction
     // in which case the final entryExpiryTasks field can still be null
     if (entryExpiryTasks.isEmpty()) {
@@ -7981,6 +7993,10 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
       task.cancel();
       doPurge = true;
     }
+
+    // Clear the map after canceling each expiry task.
+    entryExpiryTasks.clear();
+
     if (doPurge) {
       // do a force to not leave any refs to this region
       cache.getExpirationScheduler().forcePurge();
@@ -8452,7 +8468,8 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
    * will not take distributedLock. The clear operation will also clear the local transactional
    * entries. The clear operation will have immediate committed state.
    */
-  void clearRegionLocally(RegionEventImpl regionEvent, boolean cacheWrite,
+  @Override
+  public void clearRegionLocally(RegionEventImpl regionEvent, boolean cacheWrite,
       RegionVersionVector vector) {
     final boolean isRvvDebugEnabled = logger.isTraceEnabled(LogMarker.RVV_VERBOSE);
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
index eb7fed7..a62b2b5 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
@@ -180,6 +180,7 @@ import org.apache.geode.internal.cache.execute.PartitionedRegionFunctionResultWa
 import org.apache.geode.internal.cache.execute.RegionFunctionContextImpl;
 import org.apache.geode.internal.cache.execute.ServerToClientFunctionResultSender;
 import org.apache.geode.internal.cache.ha.ThreadIdentifier;
+import org.apache.geode.internal.cache.partitioned.ClearPRMessage;
 import org.apache.geode.internal.cache.partitioned.ContainsKeyValueMessage;
 import org.apache.geode.internal.cache.partitioned.ContainsKeyValueMessage.ContainsKeyValueResponse;
 import org.apache.geode.internal.cache.partitioned.DestroyMessage;
@@ -319,6 +320,8 @@ public class PartitionedRegion extends LocalRegion
     }
   };
 
+  private final PartitionedRegionClear partitionedRegionClear = new PartitionedRegionClear(this);
+
   /**
    * Global Region for storing PR config ( PRName->PRConfig). This region would be used to resolve
    * PR name conflict.*
@@ -569,6 +572,14 @@ public class PartitionedRegion extends LocalRegion
     return this.partitionListeners;
   }
 
+  public CachePerfStats getRegionCachePerfStats() {
+    if (dataStore != null && dataStore.getAllLocalBucketRegions().size() > 0) {
+      BucketRegion bucket = dataStore.getAllLocalBucketRegions().iterator().next();
+      return bucket.getCachePerfStats();
+    }
+    return null;
+  }
+
   /**
    * Return canonical representation for a bucket (for logging)
    *
@@ -2171,18 +2182,13 @@ public class PartitionedRegion extends LocalRegion
     throw new UnsupportedOperationException();
   }
 
-  /**
-   * @since GemFire 5.0
-   * @throws UnsupportedOperationException OVERRIDES
-   */
-  @Override
-  public void clear() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  void basicClear(RegionEventImpl regionEvent, boolean cacheWrite) {
-    throw new UnsupportedOperationException();
+  List<ClearPRMessage> createClearPRMessages(EventID eventID) {
+    ArrayList<ClearPRMessage> clearMsgList = new ArrayList<>();
+    for (int bucketId = 0; bucketId < getTotalNumberOfBuckets(); bucketId++) {
+      ClearPRMessage clearPRMessage = new ClearPRMessage(bucketId, eventID);
+      clearMsgList.add(clearPRMessage);
+    }
+    return clearMsgList;
   }
 
   @Override
@@ -2601,7 +2607,7 @@ public class PartitionedRegion extends LocalRegion
             retryTime = new RetryTimeKeeper(this.retryTimeout);
           }
 
-          currentTarget = waitForNodeOrCreateBucket(retryTime, event, bucketId);
+          currentTarget = waitForNodeOrCreateBucket(retryTime, event, bucketId, true);
           if (isDebugEnabled) {
             logger.debug("PR.sendMsgByBucket: event size is {}, new currentTarget is {}",
                 getEntrySize(event), currentTarget);
@@ -2740,7 +2746,7 @@ public class PartitionedRegion extends LocalRegion
             retryTime = new RetryTimeKeeper(this.retryTimeout);
           }
 
-          currentTarget = waitForNodeOrCreateBucket(retryTime, event, bucketId);
+          currentTarget = waitForNodeOrCreateBucket(retryTime, event, bucketId, true);
           if (logger.isDebugEnabled()) {
             logger.debug("PR.sendMsgByBucket: event size is {}, new currentTarget is {}",
                 getEntrySize(event), currentTarget);
@@ -2985,7 +2991,7 @@ public class PartitionedRegion extends LocalRegion
         if (retryTime == null) {
           retryTime = new RetryTimeKeeper(this.retryTimeout);
         }
-        currentTarget = waitForNodeOrCreateBucket(retryTime, event, bucketId);
+        currentTarget = waitForNodeOrCreateBucket(retryTime, event, bucketId, true);
 
         // It's possible this is a GemFire thread e.g. ServerConnection
         // which got to this point because of a distributed system shutdown or
@@ -3144,10 +3150,11 @@ public class PartitionedRegion extends LocalRegion
    * @param retryTime the RetryTimeKeeper to track retry times
    * @param event the event used to get the entry size in the event a new bucket should be created
    * @param bucketId the identity of the bucket should it be created
+   * @param createIfNotExist boolean to indicate if to create a bucket if found not exist
    * @return a Node which contains the bucket, potentially null
    */
   private InternalDistributedMember waitForNodeOrCreateBucket(RetryTimeKeeper retryTime,
-      EntryEventImpl event, Integer bucketId) {
+      EntryEventImpl event, Integer bucketId, boolean createIfNotExist) {
     InternalDistributedMember newNode;
     if (retryTime.overMaximum()) {
       PRHARedundancyProvider.timedOut(this, null, null, "allocate a bucket",
@@ -3157,7 +3164,7 @@ public class PartitionedRegion extends LocalRegion
 
     retryTime.waitForBucketsRecovery();
     newNode = getNodeForBucketWrite(bucketId, retryTime);
-    if (newNode == null) {
+    if (newNode == null && createIfNotExist) {
       newNode = createBucket(bucketId, getEntrySize(event), retryTime);
     }
 
@@ -4271,6 +4278,26 @@ public class PartitionedRegion extends LocalRegion
     return null;
   }
 
+  boolean triggerWriter(RegionEventImpl event, SearchLoadAndWriteProcessor processor, int paction,
+      String theKey) {
+    CacheWriter localWriter = basicGetWriter();
+    Set netWriteRecipients = localWriter == null ? this.distAdvisor.adviseNetWrite() : null;
+
+    if (localWriter == null && (netWriteRecipients == null || netWriteRecipients.isEmpty())) {
+      return false;
+    }
+
+    final long start = getCachePerfStats().startCacheWriterCall();
+    try {
+      processor.initialize(this, theKey, null);
+      processor.doNetWrite(event, netWriteRecipients, localWriter, paction);
+      processor.release();
+    } finally {
+      getCachePerfStats().endCacheWriterCall(start);
+    }
+    return true;
+  }
+
   /**
    * This invokes a cache writer before a destroy operation. Although it has the same method
    * signature as the method in LocalRegion, it is invoked in a different code path. LocalRegion
@@ -4280,31 +4307,26 @@ public class PartitionedRegion extends LocalRegion
   @Override
   boolean cacheWriteBeforeRegionDestroy(RegionEventImpl event)
       throws CacheWriterException, TimeoutException {
-
     if (event.getOperation().isDistributed()) {
       serverRegionDestroy(event);
-      CacheWriter localWriter = basicGetWriter();
-      Set netWriteRecipients = localWriter == null ? this.distAdvisor.adviseNetWrite() : null;
-
-      if (localWriter == null && (netWriteRecipients == null || netWriteRecipients.isEmpty())) {
-        return false;
-      }
-
-      final long start = getCachePerfStats().startCacheWriterCall();
-      try {
-        SearchLoadAndWriteProcessor processor = SearchLoadAndWriteProcessor.getProcessor();
-        processor.initialize(this, "preDestroyRegion", null);
-        processor.doNetWrite(event, netWriteRecipients, localWriter,
-            SearchLoadAndWriteProcessor.BEFOREREGIONDESTROY);
-        processor.release();
-      } finally {
-        getCachePerfStats().endCacheWriterCall(start);
-      }
-      return true;
+      SearchLoadAndWriteProcessor processor = SearchLoadAndWriteProcessor.getProcessor();
+      return triggerWriter(event, processor, SearchLoadAndWriteProcessor.BEFOREREGIONDESTROY,
+          "preDestroyRegion");
     }
     return false;
   }
 
+  @Override
+  void cacheWriteBeforeRegionClear(RegionEventImpl event)
+      throws CacheWriterException, TimeoutException {
+    if (event.getOperation().isDistributed()) {
+      serverRegionClear(event);
+      SearchLoadAndWriteProcessor processor = SearchLoadAndWriteProcessor.getProcessor();
+      triggerWriter(event, processor, SearchLoadAndWriteProcessor.BEFOREREGIONCLEAR,
+          "preClearRegion");
+    }
+  }
+
   /**
    * Test Method: Get the DistributedMember identifier for the vm containing a key
    *
@@ -5188,6 +5210,7 @@ public class PartitionedRegion extends LocalRegion
     return this.totalNumberOfBuckets;
   }
 
+
   @Override
   public void basicDestroy(final EntryEventImpl event, final boolean cacheWrite,
       final Object expectedOldValue)
@@ -10122,4 +10145,27 @@ public class PartitionedRegion extends LocalRegion
     this.getSystem().handleResourceEvent(ResourceEvent.REGION_CREATE, this);
     this.regionCreationNotified = true;
   }
+
+  protected PartitionedRegionClear getPartitionedRegionClear() {
+    return partitionedRegionClear;
+  }
+
+  @Override
+  void cmnClearRegion(RegionEventImpl regionEvent, boolean cacheWrite, boolean useRVV) {
+    // Synchronized to avoid other threads invoking clear on this vm/node.
+    synchronized (clearLock) {
+      partitionedRegionClear.doClear(regionEvent, cacheWrite);
+    }
+  }
+
+  boolean hasAnyClientsInterested() {
+    // Check local filter
+    if (getFilterProfile() != null && (getFilterProfile().hasInterest() || getFilterProfile()
+        .hasCQs())) {
+      return true;
+    }
+    // check peer server filters
+    return (getRegionAdvisor().hasPRServerWithInterest()
+        || getRegionAdvisor().hasPRServerWithCQs());
+  }
 }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
new file mode 100644
index 0000000..539f682
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
@@ -0,0 +1,506 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.logging.log4j.Logger;
+
+import org.apache.geode.CancelException;
+import org.apache.geode.cache.CacheWriterException;
+import org.apache.geode.cache.Operation;
+import org.apache.geode.cache.OperationAbortedException;
+import org.apache.geode.cache.PartitionedRegionPartialClearException;
+import org.apache.geode.cache.partition.PartitionRegionHelper;
+import org.apache.geode.distributed.internal.DistributionManager;
+import org.apache.geode.distributed.internal.MembershipListener;
+import org.apache.geode.distributed.internal.ReplyException;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.serialization.KnownVersion;
+import org.apache.geode.logging.internal.log4j.api.LogService;
+
+public class PartitionedRegionClear {
+
+  private static final Logger logger = LogService.getLogger();
+
+  protected static final String CLEAR_OPERATION = "_clearOperation";
+
+  private final int retryTime = 2 * 60 * 1000;
+
+  private final PartitionedRegion partitionedRegion;
+
+  protected final LockForListenerAndClientNotification lockForListenerAndClientNotification =
+      new LockForListenerAndClientNotification();
+
+  private volatile boolean membershipChange = false;
+
+  protected final PartitionedRegionClearListener partitionedRegionClearListener =
+      new PartitionedRegionClearListener();
+
+  public PartitionedRegionClear(PartitionedRegion partitionedRegion) {
+    this.partitionedRegion = partitionedRegion;
+    partitionedRegion.getDistributionManager()
+        .addMembershipListener(partitionedRegionClearListener);
+  }
+
+  public boolean isLockedForListenerAndClientNotification() {
+    return lockForListenerAndClientNotification.isLocked();
+  }
+
+  void acquireDistributedClearLock(String clearLock) {
+    try {
+      partitionedRegion.getPartitionedRegionLockService().lock(clearLock, -1, -1);
+    } catch (IllegalStateException e) {
+      partitionedRegion.lockCheckReadiness();
+      throw e;
+    }
+  }
+
+  void releaseDistributedClearLock(String clearLock) {
+    try {
+      partitionedRegion.getPartitionedRegionLockService().unlock(clearLock);
+    } catch (IllegalStateException e) {
+      partitionedRegion.lockCheckReadiness();
+    } catch (Exception ex) {
+      logger.warn("Caught exception while unlocking clear distributed lock. " + ex.getMessage());
+    }
+  }
+
+  protected PartitionedRegionClearListener getPartitionedRegionClearListener() {
+    return partitionedRegionClearListener;
+  }
+
+  /**
+   * only called if there are any listeners or clients interested.
+   */
+  void obtainLockForClear(RegionEventImpl event) {
+    obtainClearLockLocal(partitionedRegion.getDistributionManager().getId());
+    sendPartitionedRegionClearMessage(event,
+        PartitionedRegionClearMessage.OperationType.OP_LOCK_FOR_PR_CLEAR);
+  }
+
+  /**
+   * only called if there are any listeners or clients interested.
+   */
+  void releaseLockForClear(RegionEventImpl event) {
+    releaseClearLockLocal();
+    sendPartitionedRegionClearMessage(event,
+        PartitionedRegionClearMessage.OperationType.OP_UNLOCK_FOR_PR_CLEAR);
+  }
+
+  /**
+   * clears local primaries and send message to remote primaries to clear
+   */
+  Set<Integer> clearRegion(RegionEventImpl regionEvent) {
+    // this includes all local primary buckets and their remote secondaries
+    Set<Integer> localPrimaryBuckets = clearRegionLocal(regionEvent);
+    // this includes all remote primary buckets and their secondaries
+    Set<Integer> remotePrimaryBuckets = sendPartitionedRegionClearMessage(regionEvent,
+        PartitionedRegionClearMessage.OperationType.OP_PR_CLEAR);
+
+    Set<Integer> allBucketsCleared = new HashSet<>();
+    allBucketsCleared.addAll(localPrimaryBuckets);
+    allBucketsCleared.addAll(remotePrimaryBuckets);
+    return allBucketsCleared;
+  }
+
+  protected void waitForPrimary(PartitionedRegion.RetryTimeKeeper retryTimer) {
+    boolean retry;
+    do {
+      retry = false;
+      for (BucketRegion bucketRegion : partitionedRegion.getDataStore()
+          .getAllLocalBucketRegions()) {
+        if (!bucketRegion.getBucketAdvisor().hasPrimary()) {
+          if (retryTimer.overMaximum()) {
+            throw new PartitionedRegionPartialClearException(
+                "Unable to find primary bucket region during clear operation on "
+                    + partitionedRegion.getName() + " region.");
+          }
+          retryTimer.waitForBucketsRecovery();
+          retry = true;
+        }
+      }
+    } while (retry);
+  }
+
+  /**
+   * this clears all local primary buckets (each will distribute the clear operation to its
+   * secondary members) and all of their remote secondaries
+   */
+  public Set<Integer> clearRegionLocal(RegionEventImpl regionEvent) {
+    Set<Integer> clearedBuckets = new HashSet<>();
+    long clearStartTime = System.nanoTime();
+    setMembershipChange(false);
+    // Synchronized to handle the requester departure.
+    synchronized (lockForListenerAndClientNotification) {
+      if (partitionedRegion.getDataStore() != null) {
+        partitionedRegion.getDataStore().lockBucketCreationForRegionClear();
+        try {
+          boolean retry;
+          do {
+            waitForPrimary(new PartitionedRegion.RetryTimeKeeper(retryTime));
+            RegionEventImpl bucketRegionEvent;
+            for (BucketRegion localPrimaryBucketRegion : partitionedRegion.getDataStore()
+                .getAllLocalPrimaryBucketRegions()) {
+              if (localPrimaryBucketRegion.size() > 0) {
+                bucketRegionEvent =
+                    new RegionEventImpl(localPrimaryBucketRegion, Operation.REGION_CLEAR, null,
+                        false, partitionedRegion.getMyId(), regionEvent.getEventId());
+                localPrimaryBucketRegion.cmnClearRegion(bucketRegionEvent, false, true);
+              }
+              clearedBuckets.add(localPrimaryBucketRegion.getId());
+            }
+
+            if (getMembershipChange()) {
+              // Retry and reset the membership change status.
+              setMembershipChange(false);
+              retry = true;
+            } else {
+              retry = false;
+            }
+
+          } while (retry);
+          doAfterClear(regionEvent);
+        } finally {
+          partitionedRegion.getDataStore().unlockBucketCreationForRegionClear();
+          if (clearedBuckets.size() != 0 && partitionedRegion.getCachePerfStats() != null) {
+            partitionedRegion.getRegionCachePerfStats().incRegionClearCount();
+            partitionedRegion.getRegionCachePerfStats()
+                .incPartitionedRegionClearLocalDuration(System.nanoTime() - clearStartTime);
+          }
+        }
+      } else {
+        // Non data-store with client queue and listener
+        doAfterClear(regionEvent);
+      }
+    }
+    return clearedBuckets;
+  }
+
+  protected void doAfterClear(RegionEventImpl regionEvent) {
+    if (partitionedRegion.hasAnyClientsInterested()) {
+      notifyClients(regionEvent);
+    }
+
+    if (partitionedRegion.hasListener()) {
+      partitionedRegion.dispatchListenerEvent(EnumListenerEvent.AFTER_REGION_CLEAR, regionEvent);
+    }
+  }
+
+  void notifyClients(RegionEventImpl event) {
+    // Set client routing information into the event
+    // The clear operation in case of PR is distributed differently
+    // hence the FilterRoutingInfo is set here instead of
+    // DistributedCacheOperation.distribute().
+    event.setEventType(EnumListenerEvent.AFTER_REGION_CLEAR);
+    if (!partitionedRegion.isUsedForMetaRegion() && !partitionedRegion
+        .isUsedForPartitionedRegionAdmin()
+        && !partitionedRegion.isUsedForPartitionedRegionBucket() && !partitionedRegion
+            .isUsedForParallelGatewaySenderQueue()) {
+
+      FilterRoutingInfo localCqFrInfo =
+          partitionedRegion.getFilterProfile().getFilterRoutingInfoPart1(event,
+              FilterProfile.NO_PROFILES, Collections.emptySet());
+
+      FilterRoutingInfo localCqInterestFrInfo =
+          partitionedRegion.getFilterProfile().getFilterRoutingInfoPart2(localCqFrInfo, event);
+
+      if (localCqInterestFrInfo != null) {
+        event.setLocalFilterInfo(localCqInterestFrInfo.getLocalFilterInfo());
+      }
+    }
+    partitionedRegion.notifyBridgeClients(event);
+  }
+
+  /**
+   * obtain locks for all local buckets
+   */
+  protected void obtainClearLockLocal(InternalDistributedMember requester) {
+    synchronized (lockForListenerAndClientNotification) {
+      // Check if the member is still part of the distributed system
+      if (!partitionedRegion.getDistributionManager().isCurrentMember(requester)) {
+        return;
+      }
+
+      lockForListenerAndClientNotification.setLocked(requester);
+      if (partitionedRegion.getDataStore() != null) {
+        for (BucketRegion localPrimaryBucketRegion : partitionedRegion.getDataStore()
+            .getAllLocalPrimaryBucketRegions()) {
+          try {
+            localPrimaryBucketRegion.lockLocallyForClear(partitionedRegion.getDistributionManager(),
+                partitionedRegion.getMyId(), null);
+          } catch (Exception ex) {
+            partitionedRegion.checkClosed();
+          }
+        }
+      }
+    }
+  }
+
+  protected void releaseClearLockLocal() {
+    synchronized (lockForListenerAndClientNotification) {
+      if (lockForListenerAndClientNotification.getLockRequester() == null) {
+        // The member has left.
+        return;
+      }
+      try {
+        if (partitionedRegion.getDataStore() != null) {
+
+          for (BucketRegion localPrimaryBucketRegion : partitionedRegion.getDataStore()
+              .getAllLocalPrimaryBucketRegions()) {
+            try {
+              localPrimaryBucketRegion.releaseLockLocallyForClear(null);
+            } catch (Exception ex) {
+              logger.debug(
+                  "Unable to acquire clear lock for bucket region " + localPrimaryBucketRegion
+                      .getName(),
+                  ex.getMessage());
+              partitionedRegion.checkClosed();
+            }
+          }
+        }
+      } finally {
+        lockForListenerAndClientNotification.setUnLocked();
+      }
+    }
+  }
+
+  protected Set<Integer> sendPartitionedRegionClearMessage(RegionEventImpl event,
+      PartitionedRegionClearMessage.OperationType op) {
+    RegionEventImpl eventForLocalClear = (RegionEventImpl) event.clone();
+    eventForLocalClear.setOperation(Operation.REGION_LOCAL_CLEAR);
+
+    do {
+      try {
+        return attemptToSendPartitionedRegionClearMessage(event, op);
+      } catch (ForceReattemptException reattemptException) {
+        // retry
+      }
+    } while (true);
+  }
+
+  /**
+   * @return buckets that are cleared. empty set if any exception happened
+   */
+  protected Set<Integer> attemptToSendPartitionedRegionClearMessage(RegionEventImpl event,
+      PartitionedRegionClearMessage.OperationType op)
+      throws ForceReattemptException {
+    Set<Integer> bucketsOperated = new HashSet<>();
+
+    if (partitionedRegion.getPRRoot() == null) {
+      if (logger.isDebugEnabled()) {
+        logger.debug(
+            "Partition region {} failed to initialize. Remove its profile from remote members.",
+            this.partitionedRegion);
+      }
+      new UpdateAttributesProcessor(partitionedRegion, true).distribute(false);
+      return bucketsOperated;
+    }
+
+    final Set<InternalDistributedMember> configRecipients =
+        new HashSet<>(partitionedRegion.getRegionAdvisor()
+            .adviseAllPRNodes());
+
+    try {
+      final PartitionRegionConfig prConfig =
+          partitionedRegion.getPRRoot().get(partitionedRegion.getRegionIdentifier());
+
+      if (prConfig != null) {
+        for (Node node : prConfig.getNodes()) {
+          InternalDistributedMember idm = node.getMemberId();
+          if (!idm.equals(partitionedRegion.getMyId())) {
+            configRecipients.add(idm);
+          }
+        }
+      }
+    } catch (CancelException ignore) {
+      // ignore
+    }
+
+    try {
+      PartitionedRegionClearMessage.PartitionedRegionClearResponse resp =
+          new PartitionedRegionClearMessage.PartitionedRegionClearResponse(
+              partitionedRegion.getSystem(), configRecipients);
+      PartitionedRegionClearMessage partitionedRegionClearMessage =
+          new PartitionedRegionClearMessage(configRecipients, partitionedRegion, resp, op, event);
+      partitionedRegionClearMessage.send();
+
+      resp.waitForRepliesUninterruptibly();
+      bucketsOperated = resp.bucketsCleared;
+
+    } catch (ReplyException e) {
+      Throwable t = e.getCause();
+      if (t instanceof ForceReattemptException) {
+        throw (ForceReattemptException) t;
+      }
+      if (t instanceof PartitionedRegionPartialClearException) {
+        throw new PartitionedRegionPartialClearException(t.getMessage(), t);
+      }
+      logger.warn(
+          "PartitionedRegionClear#sendPartitionedRegionClearMessage: Caught exception during ClearRegionMessage send and waiting for response",
+          e);
+    }
+    return bucketsOperated;
+  }
+
+  /**
+   * This method returns a boolean to indicate if all server versions support Partition Region clear
+   */
+  public void allServerVersionsSupportPartitionRegionClear() {
+    List<String> memberNames = new ArrayList<>();
+    for (int i = 0; i < partitionedRegion.getTotalNumberOfBuckets(); i++) {
+      InternalDistributedMember internalDistributedMember = partitionedRegion.getBucketPrimary(i);
+      if ((internalDistributedMember != null)
+          && (internalDistributedMember.getVersion().isOlderThan(KnownVersion.GEODE_1_14_0))) {
+        if (!memberNames.contains(internalDistributedMember.getName())) {
+          memberNames.add(internalDistributedMember.getName());
+        }
+      }
+    }
+    if (!memberNames.isEmpty()) {
+      throw new UnsupportedOperationException(
+          "A server's " + memberNames + " version was too old (< "
+              + KnownVersion.GEODE_1_14_0 + ") for : Partitioned Region Clear");
+
+    }
+  }
+
+
+  void doClear(RegionEventImpl regionEvent, boolean cacheWrite) {
+    String lockName = CLEAR_OPERATION + partitionedRegion.getName();
+    long clearStartTime = 0;
+
+    allServerVersionsSupportPartitionRegionClear();
+
+    try {
+      // distributed lock to make sure only one clear op is in progress in the cluster.
+      acquireDistributedClearLock(lockName);
+      clearStartTime = System.nanoTime();
+
+      // Force all primary buckets to be created before clear.
+      assignAllPrimaryBuckets();
+
+      // do cacheWrite
+      if (cacheWrite) {
+        invokeCacheWriter(regionEvent);
+      }
+
+      // Check if there are any listeners or clients interested. If so, then clear write
+      // locks needs to be taken on all local and remote primary buckets in order to
+      // preserve the ordering of client events (for concurrent operations on the region).
+      boolean acquireClearLockForNotification =
+          (partitionedRegion.hasAnyClientsInterested() || partitionedRegion.hasListener());
+      if (acquireClearLockForNotification) {
+        obtainLockForClear(regionEvent);
+      }
+      try {
+        Set<Integer> bucketsCleared = clearRegion(regionEvent);
+
+        if (partitionedRegion.getTotalNumberOfBuckets() != bucketsCleared.size()) {
+          String message = "Unable to clear all the buckets from the partitioned region "
+              + partitionedRegion.getName()
+              + ", either data (buckets) moved or member departed.";
+
+          logger.warn(message + " expected to clear number of buckets: "
+              + partitionedRegion.getTotalNumberOfBuckets() +
+              " actual cleared: " + bucketsCleared.size());
+
+          throw new PartitionedRegionPartialClearException(message);
+        }
+      } finally {
+        if (acquireClearLockForNotification) {
+          releaseLockForClear(regionEvent);
+        }
+      }
+    } finally {
+      releaseDistributedClearLock(lockName);
+      CachePerfStats stats = partitionedRegion.getRegionCachePerfStats();
+      if (stats != null) {
+        partitionedRegion.getRegionCachePerfStats()
+            .incPartitionedRegionClearTotalDuration(System.nanoTime() - clearStartTime);
+      }
+    }
+  }
+
+  protected void invokeCacheWriter(RegionEventImpl regionEvent) {
+    try {
+      partitionedRegion.cacheWriteBeforeRegionClear(regionEvent);
+    } catch (OperationAbortedException operationAbortedException) {
+      throw new CacheWriterException(operationAbortedException);
+    }
+  }
+
+  protected void assignAllPrimaryBuckets() {
+    PartitionedRegion leader = ColocationHelper.getLeaderRegion(partitionedRegion);
+    PartitionRegionHelper.assignBucketsToPartitions(leader);
+  }
+
+  protected void handleClearFromDepartedMember(InternalDistributedMember departedMember) {
+    if (departedMember.equals(lockForListenerAndClientNotification.getLockRequester())) {
+      synchronized (lockForListenerAndClientNotification) {
+        if (lockForListenerAndClientNotification.getLockRequester() != null) {
+          releaseClearLockLocal();
+        }
+      }
+    }
+  }
+
+  class LockForListenerAndClientNotification {
+
+    private boolean locked = false;
+
+    private InternalDistributedMember lockRequester;
+
+    synchronized void setLocked(InternalDistributedMember member) {
+      locked = true;
+      lockRequester = member;
+    }
+
+    synchronized void setUnLocked() {
+      locked = false;
+      lockRequester = null;
+    }
+
+    synchronized boolean isLocked() {
+      return locked;
+    }
+
+    synchronized InternalDistributedMember getLockRequester() {
+      return lockRequester;
+    }
+  }
+
+  protected void setMembershipChange(boolean membershipChange) {
+    this.membershipChange = membershipChange;
+  }
+
+  protected boolean getMembershipChange() {
+    return membershipChange;
+  }
+
+  protected class PartitionedRegionClearListener implements MembershipListener {
+
+    @Override
+    public synchronized void memberDeparted(DistributionManager distributionManager,
+        InternalDistributedMember id, boolean crashed) {
+      setMembershipChange(true);
+      handleClearFromDepartedMember(id);
+    }
+  }
+}
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClearMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClearMessage.java
new file mode 100755
index 0000000..724256b
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClearMessage.java
@@ -0,0 +1,289 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.geode.internal.cache;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Set;
+
+import org.apache.geode.DataSerializer;
+import org.apache.geode.cache.CacheException;
+import org.apache.geode.cache.Operation;
+import org.apache.geode.distributed.internal.ClusterDistributionManager;
+import org.apache.geode.distributed.internal.DistributionManager;
+import org.apache.geode.distributed.internal.DistributionMessage;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.ReplyException;
+import org.apache.geode.distributed.internal.ReplyMessage;
+import org.apache.geode.distributed.internal.ReplyProcessor21;
+import org.apache.geode.distributed.internal.ReplySender;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.Assert;
+import org.apache.geode.internal.CopyOnWriteHashSet;
+import org.apache.geode.internal.NanoTimer;
+import org.apache.geode.internal.cache.partitioned.PartitionMessage;
+import org.apache.geode.internal.logging.log4j.LogMarker;
+import org.apache.geode.internal.serialization.DeserializationContext;
+import org.apache.geode.internal.serialization.SerializationContext;
+import org.apache.geode.logging.internal.log4j.api.LogService;
+
+/**
+ * this message is for operations no the partition region level, could be sent by any originating
+ * member to the other members hosting this partition region
+ */
+public class PartitionedRegionClearMessage extends PartitionMessage {
+
+  public enum OperationType {
+    OP_LOCK_FOR_PR_CLEAR, OP_UNLOCK_FOR_PR_CLEAR, OP_PR_CLEAR,
+  }
+
+  private Object cbArg;
+
+  private OperationType op;
+
+  private EventID eventID;
+
+  private PartitionedRegion partitionedRegion;
+
+  private Set<Integer> bucketsCleared;
+
+  @Override
+  public EventID getEventID() {
+    return eventID;
+  }
+
+  public PartitionedRegionClearMessage() {}
+
+  PartitionedRegionClearMessage(Set<InternalDistributedMember> recipients, PartitionedRegion region,
+      ReplyProcessor21 processor, PartitionedRegionClearMessage.OperationType operationType,
+      final RegionEventImpl event) {
+    super(recipients, region.getPRId(), processor);
+    partitionedRegion = region;
+    op = operationType;
+    cbArg = event.getRawCallbackArgument();
+    eventID = event.getEventId();
+  }
+
+  public OperationType getOp() {
+    return op;
+  }
+
+  public void send() {
+    Assert.assertTrue(getRecipients() != null, "ClearMessage NULL recipients set");
+    setTransactionDistributed(partitionedRegion.getCache().getTxManager().isDistributed());
+    partitionedRegion.getDistributionManager().putOutgoing(this);
+  }
+
+  @Override
+  protected Throwable processCheckForPR(PartitionedRegion pr,
+      DistributionManager distributionManager) {
+    if (pr != null && !pr.getDistributionAdvisor().isInitialized()) {
+      return new ForceReattemptException(
+          String.format("%s : could not find partitioned region with Id %s",
+              distributionManager.getDistributionManagerId(),
+              pr.getRegionIdentifier()));
+    }
+    return null;
+  }
+
+  @Override
+  protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm,
+      PartitionedRegion partitionedRegion,
+      long startTime) throws CacheException {
+
+    if (partitionedRegion == null) {
+      return true;
+    }
+
+    if (partitionedRegion.isDestroyed()) {
+      return true;
+    }
+
+    if (op == OperationType.OP_LOCK_FOR_PR_CLEAR) {
+      partitionedRegion.getPartitionedRegionClear().obtainClearLockLocal(getSender());
+    } else if (op == OperationType.OP_UNLOCK_FOR_PR_CLEAR) {
+      partitionedRegion.getPartitionedRegionClear().releaseClearLockLocal();
+    } else {
+      RegionEventImpl event =
+          new RegionEventImpl(partitionedRegion, Operation.REGION_CLEAR, this.cbArg, true,
+              partitionedRegion.getMyId(),
+              getEventID());
+      bucketsCleared = partitionedRegion.getPartitionedRegionClear().clearRegionLocal(event);
+    }
+    return true;
+  }
+
+  @Override
+  protected void appendFields(StringBuilder buff) {
+    super.appendFields(buff);
+    buff.append(" cbArg=").append(this.cbArg).append(" op=").append(this.op);
+  }
+
+  @Override
+  public int getDSFID() {
+    return CLEAR_PARTITIONED_REGION_MESSAGE;
+  }
+
+  @Override
+  public void fromData(DataInput in,
+      DeserializationContext context) throws IOException, ClassNotFoundException {
+    super.fromData(in, context);
+    this.cbArg = DataSerializer.readObject(in);
+    op = PartitionedRegionClearMessage.OperationType.values()[in.readByte()];
+    eventID = DataSerializer.readObject(in);
+  }
+
+  @Override
+  public void toData(DataOutput out,
+      SerializationContext context) throws IOException {
+    super.toData(out, context);
+    DataSerializer.writeObject(this.cbArg, out);
+    out.writeByte(op.ordinal());
+    DataSerializer.writeObject(eventID, out);
+  }
+
+  /**
+   * The response on which to wait for all the replies. This response ignores any exceptions
+   * received from the "far side"
+   */
+  public static class PartitionedRegionClearResponse extends ReplyProcessor21 {
+    CopyOnWriteHashSet<Integer> bucketsCleared = new CopyOnWriteHashSet<>();
+
+    public PartitionedRegionClearResponse(InternalDistributedSystem system,
+        Set<InternalDistributedMember> initMembers) {
+      super(system, initMembers);
+    }
+
+    @Override
+    public void process(DistributionMessage msg) {
+      if (msg instanceof PartitionedRegionClearReplyMessage) {
+        Set<Integer> buckets = ((PartitionedRegionClearReplyMessage) msg).bucketsCleared;
+        if (buckets != null) {
+          bucketsCleared.addAll(buckets);
+        }
+      }
+      super.process(msg, true);
+    }
+  }
+
+  @Override
+  protected void sendReply(InternalDistributedMember member, int processorId,
+      DistributionManager distributionManager, ReplyException ex,
+      PartitionedRegion partitionedRegion, long startTime) {
+    if (partitionedRegion != null) {
+      if (startTime > 0) {
+        partitionedRegion.getPrStats().endPartitionMessagesProcessing(startTime);
+      }
+    }
+    PartitionedRegionClearMessage.PartitionedRegionClearReplyMessage
+        .send(member, processorId, getReplySender(distributionManager), op, bucketsCleared,
+            ex);
+  }
+
+  public static class PartitionedRegionClearReplyMessage extends ReplyMessage {
+
+    private Set<Integer> bucketsCleared;
+
+    private OperationType op;
+
+    @Override
+    public boolean getInlineProcess() {
+      return true;
+    }
+
+    /**
+     * Empty constructor to conform to DataSerializable interface
+     */
+    public PartitionedRegionClearReplyMessage() {}
+
+    private PartitionedRegionClearReplyMessage(int processorId, OperationType op,
+        Set<Integer> bucketsCleared, ReplyException ex) {
+      super();
+      this.bucketsCleared = bucketsCleared;
+      this.op = op;
+      setProcessorId(processorId);
+      setException(ex);
+    }
+
+    /** Send an ack */
+    public static void send(InternalDistributedMember recipient, int processorId, ReplySender dm,
+        OperationType op, Set<Integer> bucketsCleared, ReplyException ex) {
+
+      Assert.assertTrue(recipient != null, "partitionedRegionClearReplyMessage NULL reply message");
+
+      PartitionedRegionClearMessage.PartitionedRegionClearReplyMessage m =
+          new PartitionedRegionClearMessage.PartitionedRegionClearReplyMessage(processorId, op,
+              bucketsCleared, ex);
+
+      m.setRecipient(recipient);
+      dm.putOutgoing(m);
+    }
+
+    /**
+     * Processes this message. This method is invoked by the receiver of the message.
+     *
+     * @param dm the distribution manager that is processing the message.
+     */
+    @Override
+    public void process(final DistributionManager dm, final ReplyProcessor21 rp) {
+      final long startTime = getTimestamp();
+
+      if (rp == null) {
+        if (LogService.getLogger().isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          LogService.getLogger().trace(LogMarker.DM_VERBOSE, "{}: processor not found", this);
+        }
+        return;
+      }
+
+      rp.process(this);
+
+      dm.getStats().incReplyMessageTime(NanoTimer.getTime() - startTime);
+    }
+
+    @Override
+    public int getDSFID() {
+      return CLEAR_PARTITIONED_REGION_REPLY_MESSAGE;
+    }
+
+    @Override
+    public void fromData(DataInput in,
+        DeserializationContext context) throws IOException, ClassNotFoundException {
+      super.fromData(in, context);
+      op = PartitionedRegionClearMessage.OperationType.values()[in.readByte()];
+      bucketsCleared = DataSerializer.readObject(in);
+    }
+
+    @Override
+    public void toData(DataOutput out,
+        SerializationContext context) throws IOException {
+      super.toData(out, context);
+      out.writeByte(op.ordinal());
+      DataSerializer.writeObject(bucketsCleared, out);
+    }
+
+    @Override
+    public String toString() {
+      StringBuffer sb = new StringBuffer();
+      sb.append("PartitionedRegionClearReplyMessage ")
+          .append("processorId=").append(this.processorId)
+          .append(" sender=").append(sender)
+          .append(" bucketsCleared ").append(this.bucketsCleared)
+          .append(" exception=").append(getException());
+      return sb.toString();
+    }
+  }
+}
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionDataStore.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionDataStore.java
index d1cb4e6..6923732 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionDataStore.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionDataStore.java
@@ -980,6 +980,14 @@ public class PartitionedRegionDataStore implements HasCachePerfStats {
     }
   }
 
+  protected void lockBucketCreationForRegionClear() {
+    bucketCreationLock.writeLock().lock();
+  }
+
+  protected void unlockBucketCreationForRegionClear() {
+    bucketCreationLock.writeLock().unlock();
+  }
+
   /**
    * Gets the total amount of memory in bytes allocated for all values for this PR in this VM. This
    * is the current memory (MB) watermark for data in this PR.
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/RegionEventImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/RegionEventImpl.java
index fba513d..49dc932 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/RegionEventImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/RegionEventImpl.java
@@ -119,6 +119,11 @@ public class RegionEventImpl
     return region;
   }
 
+  public void setRegion(LocalRegion region) {
+    this.region = region;
+    this.distributedMember = region.getMyId();
+  }
+
   @Override
   public Operation getOperation() {
     return this.op;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/RegionPerfStats.java b/geode-core/src/main/java/org/apache/geode/internal/cache/RegionPerfStats.java
index d3c9891..30d60bf 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/RegionPerfStats.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/RegionPerfStats.java
@@ -520,9 +520,15 @@ class RegionPerfStats extends CachePerfStats implements RegionStats {
   }
 
   @Override
-  public void incClearCount() {
-    stats.incLong(clearsId, 1L);
-    cachePerfStats.incClearCount();
+  public void incRegionClearCount() {
+    stats.incLong(regionClearsId, 1L);
+    cachePerfStats.incRegionClearCount();
+  }
+
+  @Override
+  public void incBucketClearCount() {
+    stats.incLong(bucketClearsId, 1L);
+    cachePerfStats.incBucketClearCount();
   }
 
   @Override
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/RegionStats.java b/geode-core/src/main/java/org/apache/geode/internal/cache/RegionStats.java
index 2fe6cc1..4c0e446 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/RegionStats.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/RegionStats.java
@@ -135,7 +135,9 @@ public interface RegionStats {
 
   void incEvictWorkTime(long delta);
 
-  void incClearCount();
+  void incBucketClearCount();
+
+  void incRegionClearCount();
 
   void incPRQueryRetries();
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java
new file mode 100644
index 0000000..2603b78
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java
@@ -0,0 +1,320 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.geode.internal.cache.partitioned;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Set;
+
+import org.apache.logging.log4j.Logger;
+
+import org.apache.geode.DataSerializer;
+import org.apache.geode.annotations.VisibleForTesting;
+import org.apache.geode.cache.CacheException;
+import org.apache.geode.cache.Operation;
+import org.apache.geode.cache.persistence.PartitionOfflineException;
+import org.apache.geode.distributed.DistributedMember;
+import org.apache.geode.distributed.internal.ClusterDistributionManager;
+import org.apache.geode.distributed.internal.DirectReplyProcessor;
+import org.apache.geode.distributed.internal.DistributionManager;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.ReplyException;
+import org.apache.geode.distributed.internal.ReplyMessage;
+import org.apache.geode.distributed.internal.ReplyProcessor21;
+import org.apache.geode.distributed.internal.ReplySender;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.Assert;
+import org.apache.geode.internal.InternalDataSerializer;
+import org.apache.geode.internal.NanoTimer;
+import org.apache.geode.internal.cache.BucketRegion;
+import org.apache.geode.internal.cache.EventID;
+import org.apache.geode.internal.cache.ForceReattemptException;
+import org.apache.geode.internal.cache.PartitionedRegion;
+import org.apache.geode.internal.cache.RegionEventImpl;
+import org.apache.geode.internal.logging.log4j.LogMarker;
+import org.apache.geode.internal.serialization.DeserializationContext;
+import org.apache.geode.internal.serialization.SerializationContext;
+import org.apache.geode.logging.internal.log4j.api.LogService;
+
+public class ClearPRMessage extends PartitionMessageWithDirectReply {
+  private static final Logger logger = LogService.getLogger();
+
+  private Integer bucketId;
+
+  private EventID eventID;
+
+  public static final String BUCKET_NON_PRIMARY_MESSAGE =
+      "The bucket region on target member is no longer primary";
+  public static final String EXCEPTION_THROWN_DURING_CLEAR_OPERATION =
+      "An exception was thrown during the local clear operation: ";
+
+  /**
+   * state from operateOnRegion that must be preserved for transmission from the waiting pool
+   */
+  transient boolean result = false;
+
+  /**
+   * Empty constructor to satisfy {@link DataSerializer}requirements
+   */
+  public ClearPRMessage() {}
+
+  public ClearPRMessage(int bucketId, EventID eventID) {
+    this.bucketId = bucketId;
+    this.eventID = eventID;
+  }
+
+  public void initMessage(PartitionedRegion region, Set<InternalDistributedMember> recipients,
+      DirectReplyProcessor replyProcessor) {
+    this.resetRecipients();
+    if (recipients != null) {
+      setRecipients(recipients);
+    }
+    this.regionId = region.getPRId();
+    this.processor = replyProcessor;
+    this.processorId = replyProcessor == null ? 0 : replyProcessor.getProcessorId();
+    if (replyProcessor != null) {
+      replyProcessor.enableSevereAlertProcessing();
+    }
+  }
+
+  public ClearResponse send(DistributedMember recipient, PartitionedRegion region)
+      throws ForceReattemptException {
+    Set<InternalDistributedMember> recipients =
+        Collections.singleton((InternalDistributedMember) recipient);
+    ClearResponse clearResponse = new ClearResponse(region.getSystem(), recipients);
+    initMessage(region, recipients, clearResponse);
+    if (logger.isDebugEnabled()) {
+      logger.debug("ClearPRMessage.send: recipient is {}, msg is {}", recipient, this);
+    }
+
+    Set<InternalDistributedMember> failures = region.getDistributionManager().putOutgoing(this);
+    if (failures != null && failures.size() > 0) {
+      throw new ForceReattemptException("Failed sending <" + this + "> due to " + failures);
+    }
+    return clearResponse;
+  }
+
+  @Override
+  public int getDSFID() {
+    return PR_CLEAR_MESSAGE;
+  }
+
+  @Override
+  public void toData(DataOutput out, SerializationContext context) throws IOException {
+    super.toData(out, context);
+    if (bucketId == null) {
+      InternalDataSerializer.writeSignedVL(-1, out);
+    } else {
+      InternalDataSerializer.writeSignedVL(bucketId, out);
+    }
+    DataSerializer.writeObject(this.eventID, out);
+  }
+
+  @Override
+  public void fromData(DataInput in, DeserializationContext context)
+      throws IOException, ClassNotFoundException {
+    super.fromData(in, context);
+    this.bucketId = (int) InternalDataSerializer.readSignedVL(in);
+    this.eventID = (EventID) DataSerializer.readObject(in);
+  }
+
+  @Override
+  public EventID getEventID() {
+    return null;
+  }
+
+  /**
+   * This method is called upon receipt and make the desired changes to the PartitionedRegion Note:
+   * It is very important that this message does NOT cause any deadlocks as the sender will wait
+   * indefinitely for the acknowledgement
+   */
+  @Override
+  @VisibleForTesting
+  protected boolean operateOnPartitionedRegion(ClusterDistributionManager distributionManager,
+      PartitionedRegion region, long startTime) {
+    try {
+      this.result = doLocalClear(region);
+    } catch (ForceReattemptException ex) {
+      sendReply(getSender(), getProcessorId(), distributionManager, new ReplyException(ex), region,
+          startTime);
+      return false;
+    }
+    return this.result;
+  }
+
+  public Integer getBucketId() {
+    return this.bucketId;
+  }
+
+  public boolean doLocalClear(PartitionedRegion region)
+      throws ForceReattemptException {
+    // Retrieve local bucket region which matches target bucketId
+    BucketRegion bucketRegion =
+        region.getDataStore().getInitializedBucketForId(null, this.bucketId);
+
+    boolean lockedForPrimary = bucketRegion.doLockForPrimary(false);
+    // Check if we obtained primary lock, throw exception if not
+    if (!lockedForPrimary) {
+      throw new ForceReattemptException(BUCKET_NON_PRIMARY_MESSAGE);
+    }
+    try {
+      RegionEventImpl regionEvent = new RegionEventImpl(bucketRegion, Operation.REGION_CLEAR, null,
+          false, region.getMyId(), eventID);
+      bucketRegion.cmnClearRegion(regionEvent, false, true);
+    } catch (PartitionOfflineException poe) {
+      logger.info(
+          "All members holding data for bucket {} are offline, no more retries will be attempted",
+          this.bucketId,
+          poe);
+      throw poe;
+    } catch (Exception ex) {
+      throw new ForceReattemptException(
+          EXCEPTION_THROWN_DURING_CLEAR_OPERATION + ex.getClass().getName(), ex);
+    } finally {
+      bucketRegion.doUnlockForPrimary();
+    }
+
+    return true;
+  }
+
+  @Override
+  public boolean canStartRemoteTransaction() {
+    return false;
+  }
+
+  @Override
+  protected void sendReply(InternalDistributedMember member, int processorId,
+      DistributionManager distributionManager, ReplyException ex,
+      PartitionedRegion partitionedRegion, long startTime) {
+    if (partitionedRegion != null) {
+      if (startTime > 0) {
+        partitionedRegion.getPrStats().endPartitionMessagesProcessing(startTime);
+      }
+    }
+    ClearReplyMessage.send(member, processorId, getReplySender(distributionManager), this.result,
+        ex);
+  }
+
+  @Override
+  protected void appendFields(StringBuilder buff) {
+    super.appendFields(buff);
+    buff.append("; bucketId=").append(this.bucketId);
+  }
+
+  public static class ClearReplyMessage extends ReplyMessage {
+    @Override
+    public boolean getInlineProcess() {
+      return true;
+    }
+
+    /**
+     * Empty constructor to conform to DataSerializable interface
+     */
+    @SuppressWarnings("unused")
+    public ClearReplyMessage() {}
+
+    private ClearReplyMessage(int processorId, boolean result, ReplyException ex) {
+      super();
+      setProcessorId(processorId);
+      if (ex != null) {
+        setException(ex);
+      } else {
+        setReturnValue(result);
+      }
+    }
+
+    /**
+     * Send an ack
+     */
+    public static void send(InternalDistributedMember recipient, int processorId,
+        ReplySender replySender,
+        boolean result, ReplyException ex) {
+      Assert.assertNotNull(recipient, "ClearReplyMessage recipient was NULL.");
+      ClearReplyMessage message = new ClearReplyMessage(processorId, result, ex);
+      message.setRecipient(recipient);
+      replySender.putOutgoing(message);
+    }
+
+    /**
+     * Processes this message. This method is invoked by the receiver of the message.
+     *
+     * @param distributionManager the distribution manager that is processing the message.
+     */
+    @Override
+    public void process(final DistributionManager distributionManager,
+        final ReplyProcessor21 replyProcessor) {
+      final long startTime = getTimestamp();
+      if (replyProcessor == null) {
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE, "{}: processor not found", this);
+        }
+        return;
+      }
+      if (replyProcessor instanceof ClearResponse) {
+        ((ClearResponse) replyProcessor).setResponse(this);
+      }
+      replyProcessor.process(this);
+
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "{} processed {}", replyProcessor, this);
+      }
+      distributionManager.getStats().incReplyMessageTime(NanoTimer.getTime() - startTime);
+    }
+
+    @Override
+    public int getDSFID() {
+      return PR_CLEAR_REPLY_MESSAGE;
+    }
+
+    @Override
+    public String toString() {
+      StringBuilder stringBuilder = new StringBuilder(super.toString());
+      stringBuilder.append(" returnValue=");
+      stringBuilder.append(getReturnValue());
+      return stringBuilder.toString();
+    }
+  }
+
+  /**
+   * A processor to capture the value returned by {@link ClearPRMessage}
+   */
+  public static class ClearResponse extends PartitionResponse {
+    private volatile boolean returnValue;
+
+    public ClearResponse(InternalDistributedSystem distributedSystem,
+        Set<InternalDistributedMember> recipients) {
+      super(distributedSystem, recipients, false);
+    }
+
+    public void setResponse(ClearReplyMessage response) {
+      if (response.getException() == null) {
+        this.returnValue = (boolean) response.getReturnValue();
+      }
+    }
+
+    /**
+     * @return the result of the remote clear operation
+     * @throws ForceReattemptException if the peer is no longer available
+     * @throws CacheException if the peer generates an error
+     */
+    public boolean waitForResult() throws CacheException, ForceReattemptException {
+      waitForCacheException();
+      return this.returnValue;
+    }
+  }
+}
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PutAllPRMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PutAllPRMessage.java
index 0c690c5..6bb666c 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PutAllPRMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PutAllPRMessage.java
@@ -413,6 +413,7 @@ public class PutAllPRMessage extends PartitionMessageWithDirectReply {
       Object[] keys = getKeysToBeLocked();
       if (!notificationOnly) {
         boolean locked = false;
+        boolean rvvLocked = false;
         try {
           if (putAllPRData.length > 0) {
             if (this.posDup && bucketRegion.getConcurrencyChecksEnabled()) {
@@ -438,6 +439,10 @@ public class PutAllPRMessage extends PartitionMessageWithDirectReply {
             bucketRegion.recordBulkOpStart(membershipID, eventID);
           }
           locked = bucketRegion.waitUntilLocked(keys);
+          if (!rvvLocked) {
+            bucketRegion.lockRVVForBulkOp();
+            rvvLocked = true;
+          }
           boolean lockedForPrimary = false;
           final HashMap succeeded = new HashMap();
           PutAllPartialResult partialKeys = new PutAllPartialResult(putAllPRDataSize);
@@ -518,6 +523,10 @@ public class PutAllPRMessage extends PartitionMessageWithDirectReply {
         } catch (RegionDestroyedException e) {
           ds.checkRegionDestroyedOnBucket(bucketRegion, true, e);
         } finally {
+          if (rvvLocked) {
+            bucketRegion.unlockRVVForBulkOp();
+            rvvLocked = false;
+          }
           if (locked) {
             bucketRegion.removeAndNotifyKeys(keys);
           }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RegionAdvisor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RegionAdvisor.java
index b3f4189..af03df3 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RegionAdvisor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RegionAdvisor.java
@@ -854,10 +854,21 @@ public class RegionAdvisor extends CacheDistributionAdvisor {
         && prof.filterProfile.hasInterest();
   };
 
+  @Immutable
+  private static final Filter prServerWithCqFilter = profile -> {
+    CacheProfile prof = (CacheProfile) profile;
+    return prof.isPartitioned && prof.hasCacheServer && prof.filterProfile != null
+        && prof.filterProfile.hasCQs();
+  };
+
   public boolean hasPRServerWithInterest() {
     return satisfiesFilter(prServerWithInterestFilter);
   }
 
+  public boolean hasPRServerWithCQs() {
+    return satisfiesFilter(prServerWithCqFilter);
+  }
+
   /**
    * return the set of all members who must receive operation notifications
    *
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RemoveAllPRMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RemoveAllPRMessage.java
index 6f355d6..f295136 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RemoveAllPRMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RemoveAllPRMessage.java
@@ -406,6 +406,7 @@ public class RemoveAllPRMessage extends PartitionMessageWithDirectReply {
 
       if (!notificationOnly) {
         boolean locked = false;
+        boolean rvvLocked = false;
         try {
           if (removeAllPRData.length > 0) {
             if (this.posDup && bucketRegion.getConcurrencyChecksEnabled()) {
@@ -431,6 +432,10 @@ public class RemoveAllPRMessage extends PartitionMessageWithDirectReply {
             bucketRegion.recordBulkOpStart(membershipID, eventID);
           }
           locked = bucketRegion.waitUntilLocked(keys);
+          if (!rvvLocked) {
+            bucketRegion.lockRVVForBulkOp();
+            rvvLocked = true;
+          }
           boolean lockedForPrimary = false;
           final ArrayList<Object> succeeded = new ArrayList<Object>();
           PutAllPartialResult partialKeys = new PutAllPartialResult(removeAllPRDataSize);
@@ -526,6 +531,10 @@ public class RemoveAllPRMessage extends PartitionMessageWithDirectReply {
         } catch (RegionDestroyedException e) {
           ds.checkRegionDestroyedOnBucket(bucketRegion, true, e);
         } finally {
+          if (rvvLocked) {
+            bucketRegion.unlockRVVForBulkOp();
+            rvvLocked = false;
+          }
           if (locked) {
             bucketRegion.removeAndNotifyKeys(keys);
           }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/versions/RegionVersionVector.java b/geode-core/src/main/java/org/apache/geode/internal/cache/versions/RegionVersionVector.java
index 13a0da7..5272f10 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/versions/RegionVersionVector.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/versions/RegionVersionVector.java
@@ -320,7 +320,7 @@ public abstract class RegionVersionVector<T extends VersionSource<?>>
         // this method is invoked by memberDeparted events and may not be for the current lock owner
         return;
       }
-      unlockVersionGeneration(locker);
+      unlockVersionGeneration();
     }
   }
 
@@ -416,7 +416,7 @@ public abstract class RegionVersionVector<T extends VersionSource<?>>
 
   }
 
-  private void unlockVersionGeneration(final InternalDistributedMember locker) {
+  private void unlockVersionGeneration() {
     synchronized (clearLockSync) {
       this.doUnlock = true;
       this.clearLockSync.notifyAll();
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/i18n/CliStrings.java b/geode-core/src/main/java/org/apache/geode/management/internal/i18n/CliStrings.java
index e25fe77..5100031 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/i18n/CliStrings.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/i18n/CliStrings.java
@@ -862,6 +862,14 @@ public class CliStrings {
   public static final String CLEAR_DEFINED_INDEX__SUCCESS__MSG =
       "Index definitions successfully cleared";
 
+  /* clear region */
+  public static final String CLEAR_REGION = "clear region";
+  public static final String CLEAR_REGION_HELP =
+      "Clears/Removes all keys from the specified region.";
+  public static final String CLEAR_REGION_REGION_NAME = "name";
+  public static final String CLEAR_REGION_REGION_NAME_HELP = "Region to clear keys from.";
+  public static final String CLEAR_REGION_CLEARED_ALL_KEYS = "Cleared all keys in the region";
+
   /* create region */
   public static final String CREATE_REGION = "create region";
   public static final String CREATE_REGION__HELP =
@@ -1981,9 +1989,9 @@ public class CliStrings {
   public static final String REMOVE__MSG__KEY_EMPTY = "Key is Null";
   public static final String REMOVE__MSG__REGION_NOT_FOUND = "Region <{0}> Not Found";
   public static final String REMOVE__MSG__KEY_NOT_FOUND_REGION = "Key is not present in the region";
-  public static final String REMOVE__MSG__CLEARED_ALL_CLEARS = "Cleared all keys in the region";
-  public static final String REMOVE__MSG__CLEARALL_NOT_SUPPORTED_FOR_PARTITIONREGION =
-      "Option --" + REMOVE__ALL + " is not supported on partitioned region";
+  public static final String REMOVE__MSG__CLEARALL_DEPRECATION_WARNING =
+      "Warning: The --all option for the 'remove' command is deprecated. Please"
+          + " use the 'clear' command instead.";
 
   /* resume gateway-sender */
   public static final String RESUME_GATEWAYSENDER = "resume gateway-sender";
diff --git a/geode-core/src/main/resources/org/apache/geode/internal/sanctioned-geode-core-serializables.txt b/geode-core/src/main/resources/org/apache/geode/internal/sanctioned-geode-core-serializables.txt
index 69b6ce0..742241e 100644
--- a/geode-core/src/main/resources/org/apache/geode/internal/sanctioned-geode-core-serializables.txt
+++ b/geode-core/src/main/resources/org/apache/geode/internal/sanctioned-geode-core-serializables.txt
@@ -79,6 +79,7 @@ org/apache/geode/cache/NoSubscriptionServersAvailableException,true,848408601915
 org/apache/geode/cache/Operation,true,-7521751729852504238,ordinal:byte
 org/apache/geode/cache/OperationAbortedException,true,-8293166225026556949
 org/apache/geode/cache/PartitionedRegionDistributionException,true,-3004093739855972548
+org/apache/geode/cache/PartitionedRegionPartialClearException,false
 org/apache/geode/cache/PartitionedRegionStorageException,true,5905463619475329732
 org/apache/geode/cache/RegionAccessException,true,3142958723089038406
 org/apache/geode/cache/RegionDestroyedException,true,319804842308010754,regionFullPath:java/lang/String
@@ -302,6 +303,7 @@ org/apache/geode/internal/cache/PRContainsValueFunction,false
 org/apache/geode/internal/cache/PRHARedundancyProvider$ArrayListWithClearState,true,1,wasCleared:boolean
 org/apache/geode/internal/cache/PartitionedRegion$PRIdMap,true,3667357372967498179,cleared:boolean
 org/apache/geode/internal/cache/PartitionedRegion$SizeEntry,false,isPrimary:boolean,size:int
+org/apache/geode/internal/cache/PartitionedRegionClearMessage$OperationType,false
 org/apache/geode/internal/cache/PartitionedRegionDataStore$CreateBucketResult,false,nowExists:boolean
 org/apache/geode/internal/cache/PartitionedRegionException,true,5113786059279106007
 org/apache/geode/internal/cache/PartitionedRegionQueryEvaluator$MemberResultsList,false,isLastChunkReceived:boolean
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/BucketRegionJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/BucketRegionJUnitTest.java
index 72e6657..0d1cc87 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/BucketRegionJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/BucketRegionJUnitTest.java
@@ -14,7 +14,9 @@
  */
 package org.apache.geode.internal.cache;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.ArgumentMatchers.anyInt;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Mockito.anyLong;
@@ -31,7 +33,10 @@ import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
+import org.junit.Test;
+
 import org.apache.geode.cache.RegionAttributes;
+import org.apache.geode.internal.cache.versions.RegionVersionVector;
 import org.apache.geode.internal.statistics.StatisticsClock;
 
 public class BucketRegionJUnitTest extends DistributedRegionJUnitTest {
@@ -46,7 +51,9 @@ public class BucketRegionJUnitTest extends DistributedRegionJUnitTest {
     when(ba.getPrimaryMoveReadLock()).thenReturn(primaryMoveReadLock);
     when(ba.getProxyBucketRegion()).thenReturn(mock(ProxyBucketRegion.class));
     when(ba.isPrimary()).thenReturn(true);
-
+    PartitionedRegionClear clearPR = mock(PartitionedRegionClear.class);
+    when(clearPR.isLockedForListenerAndClientNotification()).thenReturn(true);
+    when(pr.getPartitionedRegionClear()).thenReturn(clearPR);
     ira.setPartitionedRegion(pr).setPartitionedRegionBucketRedundancy(1).setBucketAdvisor(ba);
   }
 
@@ -128,4 +135,80 @@ public class BucketRegionJUnitTest extends DistributedRegionJUnitTest {
     }
   }
 
+  @Test
+  public void cmnClearRegionWillDoNothingIfNotPrimary() {
+    RegionEventImpl event = createClearRegionEvent();
+    BucketRegion region = (BucketRegion) event.getRegion();
+    BucketAdvisor ba = mock(BucketAdvisor.class);
+    RegionVersionVector rvv = mock(RegionVersionVector.class);
+    doReturn(rvv).when(region).getVersionVector();
+    doReturn(ba).when(region).getBucketAdvisor();
+    when(ba.isPrimary()).thenReturn(false);
+    region.cmnClearRegion(event, true, true);
+    verify(region, never()).clearRegionLocally(eq(event), eq(true), eq(rvv));
+  }
+
+  @Test
+  public void cmnClearRegionCalledOnPrimary() {
+    RegionEventImpl event = createClearRegionEvent();
+    BucketRegion region = (BucketRegion) event.getRegion();
+    BucketAdvisor ba = mock(BucketAdvisor.class);
+    RegionVersionVector rvv = mock(RegionVersionVector.class);
+    doReturn(rvv).when(region).getVersionVector();
+    doReturn(true).when(region).getConcurrencyChecksEnabled();
+    doReturn(ba).when(region).getBucketAdvisor();
+    doNothing().when(region).distributeClearOperation(any(), any(), any());
+    doNothing().when(region).lockLocallyForClear(any(), any(), any());
+    doNothing().when(region).lockAndFlushClearToOthers(any(), any());
+    doNothing().when(region).clearRegionLocally(event, true, null);
+    when(ba.isPrimary()).thenReturn(true);
+    region.cmnClearRegion(event, true, true);
+    verify(region, times(1)).clearRegionLocally(eq(event), eq(true), eq(null));
+  }
+
+  @Test
+  public void clearWillUseNullAsRVVWhenConcurrencyCheckDisabled() {
+    RegionEventImpl event = createClearRegionEvent();
+    BucketRegion region = (BucketRegion) event.getRegion();
+    BucketAdvisor ba = mock(BucketAdvisor.class);
+    doReturn(false).when(region).getConcurrencyChecksEnabled();
+    doReturn(ba).when(region).getBucketAdvisor();
+    doNothing().when(region).distributeClearOperation(any(), any(), any());
+    doNothing().when(region).lockLocallyForClear(any(), any(), any());
+    doNothing().when(region).lockAndFlushClearToOthers(any(), any());
+    doNothing().when(region).clearRegionLocally(event, true, null);
+    when(ba.isPrimary()).thenReturn(true);
+    region.cmnClearRegion(event, true, true);
+    verify(region, times(1)).clearRegionLocally(eq(event), eq(true), eq(null));
+  }
+
+  @Test
+  public void obtainWriteLocksForClearInBRShouldDistribute() {
+    RegionEventImpl event = createClearRegionEvent();
+    BucketRegion region = (BucketRegion) event.getRegion();
+    doNothing().when(region).lockLocallyForClear(any(), any(), any());
+    doNothing().when(region).lockAndFlushClearToOthers(any(), any());
+    region.obtainWriteLocksForClear(event, null, false);
+    verify(region).lockLocallyForClear(any(), any(), eq(event));
+    verify(region).lockAndFlushClearToOthers(eq(event), eq(null));
+  }
+
+  @Test
+  public void updateSizeToZeroOnClearBucketRegion() {
+    RegionEventImpl event = createClearRegionEvent();
+    BucketRegion region = (BucketRegion) event.getRegion();
+    PartitionedRegion pr = region.getPartitionedRegion();
+    PartitionedRegionDataStore prds = mock(PartitionedRegionDataStore.class);
+    PartitionedRegionStats prStats = mock(PartitionedRegionStats.class);
+    when(pr.getPrStats()).thenReturn(prStats);
+    doNothing().when(prStats).incDataStoreEntryCount(anyInt());
+    doNothing().when(prds).updateMemoryStats(anyInt());
+    when(pr.getDataStore()).thenReturn(prds);
+    region.updateSizeOnCreate("key1", 20);
+    long sizeBeforeClear = region.getTotalBytes();
+    assertEquals(20, sizeBeforeClear);
+    region.updateSizeOnClearRegion((int) sizeBeforeClear);
+    long sizeAfterClear = region.getTotalBytes();
+    assertEquals(0, sizeAfterClear);
+  }
 }
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/CachePerfStatsTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/CachePerfStatsTest.java
index 7a81fdd..f1f303c 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/CachePerfStatsTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/CachePerfStatsTest.java
@@ -14,9 +14,9 @@
  */
 package org.apache.geode.internal.cache;
 
+import static org.apache.geode.internal.cache.CachePerfStats.bucketClearsId;
 import static org.apache.geode.internal.cache.CachePerfStats.cacheListenerCallsCompletedId;
 import static org.apache.geode.internal.cache.CachePerfStats.cacheWriterCallsCompletedId;
-import static org.apache.geode.internal.cache.CachePerfStats.clearsId;
 import static org.apache.geode.internal.cache.CachePerfStats.createsId;
 import static org.apache.geode.internal.cache.CachePerfStats.deltaFailedUpdatesId;
 import static org.apache.geode.internal.cache.CachePerfStats.deltaFullValuesRequestedId;
@@ -43,10 +43,13 @@ import static org.apache.geode.internal.cache.CachePerfStats.loadsCompletedId;
 import static org.apache.geode.internal.cache.CachePerfStats.missesId;
 import static org.apache.geode.internal.cache.CachePerfStats.netloadsCompletedId;
 import static org.apache.geode.internal.cache.CachePerfStats.netsearchesCompletedId;
+import static org.apache.geode.internal.cache.CachePerfStats.partitionedRegionClearLocalDurationId;
+import static org.apache.geode.internal.cache.CachePerfStats.partitionedRegionClearTotalDurationId;
 import static org.apache.geode.internal.cache.CachePerfStats.putAllsId;
 import static org.apache.geode.internal.cache.CachePerfStats.putTimeId;
 import static org.apache.geode.internal.cache.CachePerfStats.putsId;
 import static org.apache.geode.internal.cache.CachePerfStats.queryExecutionsId;
+import static org.apache.geode.internal.cache.CachePerfStats.regionClearsId;
 import static org.apache.geode.internal.cache.CachePerfStats.removeAllsId;
 import static org.apache.geode.internal.cache.CachePerfStats.retriesId;
 import static org.apache.geode.internal.cache.CachePerfStats.txCommitChangesId;
@@ -428,28 +431,58 @@ public class CachePerfStatsTest {
 
   @Test
   public void getClearsDelegatesToStatistics() {
-    statistics.incLong(clearsId, Long.MAX_VALUE);
+    statistics.incLong(regionClearsId, Long.MAX_VALUE);
 
-    assertThat(cachePerfStats.getClearCount()).isEqualTo(Long.MAX_VALUE);
+    assertThat(cachePerfStats.getRegionClearCount()).isEqualTo(Long.MAX_VALUE);
   }
 
   @Test
-  public void incClearCountIncrementsClears() {
-    cachePerfStats.incClearCount();
+  public void incRegionClearCountIncrementsClears() {
+    cachePerfStats.incRegionClearCount();
 
-    assertThat(statistics.getLong(clearsId)).isEqualTo(1L);
+    assertThat(statistics.getLong(regionClearsId)).isEqualTo(1L);
+  }
+
+  @Test
+  public void incBucketClearCountIncrementsClears() {
+    cachePerfStats.incBucketClearCount();
+
+    assertThat(statistics.getLong(bucketClearsId)).isEqualTo(1L);
+  }
+
+  @Test
+  public void incPartitionedRegionClearLocalDurationIncrementsPartitionedRegionClearLocalDuration() {
+    cachePerfStats.incPartitionedRegionClearLocalDuration(100L);
+
+    assertThat(statistics.getLong(partitionedRegionClearLocalDurationId)).isEqualTo(100L);
+  }
+
+  @Test
+  public void incPartitionedRegionClearTotalDurationIncrementsPartitionedRegionClearTotalDuration() {
+    cachePerfStats.incPartitionedRegionClearTotalDuration(100L);
+
+    assertThat(statistics.getLong(partitionedRegionClearTotalDurationId)).isEqualTo(100L);
   }
 
   /**
    * Characterization test: {@code clears} currently wraps to negative from max long value.
    */
   @Test
-  public void clearsWrapsFromMaxLongToNegativeValue() {
-    statistics.incLong(clearsId, Long.MAX_VALUE);
+  public void regionClearsWrapsFromMaxLongToNegativeValue() {
+    statistics.incLong(regionClearsId, Long.MAX_VALUE);
+
+    cachePerfStats.incRegionClearCount();
+
+    assertThat(cachePerfStats.getRegionClearCount()).isNegative();
+  }
+
+  @Test
+  public void bucketClearsWrapsFromMaxLongToNegativeValue() {
+    statistics.incLong(bucketClearsId, Long.MAX_VALUE);
 
-    cachePerfStats.incClearCount();
+    cachePerfStats.incBucketClearCount();
 
-    assertThat(cachePerfStats.getClearCount()).isNegative();
+    assertThat(cachePerfStats.getBucketClearCount()).isNegative();
   }
 
   @Test
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/DistributedRegionJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/DistributedRegionJUnitTest.java
index 9fbd8fc..ca53ced 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/DistributedRegionJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/DistributedRegionJUnitTest.java
@@ -14,6 +14,7 @@
  */
 package org.apache.geode.internal.cache;
 
+import static org.apache.geode.internal.Assert.fail;
 import static org.apache.geode.internal.statistics.StatisticsClockFactory.disabledClock;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.Assert.assertFalse;
@@ -53,6 +54,14 @@ public class DistributedRegionJUnitTest
   @Override
   protected void setInternalRegionArguments(InternalRegionArguments ira) {}
 
+  protected RegionEventImpl createClearRegionEvent() {
+    DistributedRegion region = prepare(true, true);
+    DistributedMember member = mock(DistributedMember.class);
+    RegionEventImpl regionEvent = new RegionEventImpl(region, Operation.REGION_CLEAR, null, false,
+        member, true);
+    return regionEvent;
+  }
+
   @Override
   protected DistributedRegion createAndDefineRegion(boolean isConcurrencyChecksEnabled,
       RegionAttributes ra, InternalRegionArguments ira, GemFireCacheImpl cache,
@@ -246,4 +255,13 @@ public class DistributedRegionJUnitTest
     region.basicBridgeReplace("key1", "value1", false, null, client, true, clientEvent);
     assertThat(clientEvent.getVersionTag().equals(tag));
   }
+
+  @Test(expected = UnsupportedOperationException.class)
+  public void localClearIsNotSupportedOnReplicatedRegion() {
+    RegionEventImpl event = createClearRegionEvent();
+    DistributedRegion region = (DistributedRegion) event.getRegion();
+    region.basicLocalClear(event);
+    fail("Expect UnsupportedOperationException");
+  }
+
 }
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/LocalRegionTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/LocalRegionTest.java
index efeaf46..c024d9c 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/LocalRegionTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/LocalRegionTest.java
@@ -23,6 +23,7 @@ import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
@@ -290,4 +291,25 @@ public class LocalRegionTest {
     assertThat(object).isNotSameAs(result);
     assertThat(object).isSameAs(newResult);
   }
+
+  @Test
+  public void cancelAllEntryExpiryTasksShouldClearMapOfExpiryTasks() {
+    when(cache.getExpirationScheduler()).thenReturn(mock(ExpirationScheduler.class));
+    LocalRegion region =
+        spy(new LocalRegion("region", regionAttributes, null, cache, internalRegionArguments,
+            internalDataView, regionMapConstructor, serverRegionProxyConstructor, entryEventFactory,
+            poolFinder, regionPerfStatsFactory, disabledClock()));
+
+    RegionEntry regionEntry1 = mock(RegionEntry.class);
+    RegionEntry regionEntry2 = mock(RegionEntry.class);
+    EntryExpiryTask entryExpiryTask1 = spy(new EntryExpiryTask(region, regionEntry1));
+    EntryExpiryTask entryExpiryTask2 = spy(new EntryExpiryTask(region, regionEntry2));
+    region.entryExpiryTasks.put(regionEntry1, entryExpiryTask1);
+    region.entryExpiryTasks.put(regionEntry2, entryExpiryTask2);
+
+    region.cancelAllEntryExpiryTasks();
+    assertThat(region.entryExpiryTasks).isEmpty();
+    verify(entryExpiryTask1, times(1)).cancel();
+    verify(entryExpiryTask2, times(1)).cancel();
+  }
 }
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java
new file mode 100644
index 0000000..3b66e67
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java
@@ -0,0 +1,897 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static java.util.Collections.emptySet;
+import static java.util.Collections.singleton;
+import static org.apache.geode.internal.cache.FilterProfile.NO_PROFILES;
+import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.catchThrowable;
+import static org.mockito.ArgumentCaptor.forClass;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+
+import org.apache.geode.CancelCriterion;
+import org.apache.geode.cache.PartitionedRegionPartialClearException;
+import org.apache.geode.cache.Region;
+import org.apache.geode.distributed.DistributedLockService;
+import org.apache.geode.distributed.internal.DMStats;
+import org.apache.geode.distributed.internal.DistributionManager;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.MembershipListener;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.cache.PartitionedRegion.RetryTimeKeeper;
+import org.apache.geode.internal.cache.PartitionedRegionClear.PartitionedRegionClearListener;
+import org.apache.geode.internal.cache.PartitionedRegionClearMessage.OperationType;
+import org.apache.geode.internal.cache.partitioned.RegionAdvisor;
+import org.apache.geode.internal.serialization.KnownVersion;
+
+public class PartitionedRegionClearTest {
+
+  private DistributionManager distributionManager;
+  private PartitionedRegion partitionedRegion;
+  private RegionAdvisor regionAdvisor;
+  private InternalDistributedMember internalDistributedMember;
+
+  private PartitionedRegionClear partitionedRegionClear;
+
+  @Before
+  public void setUp() {
+    distributionManager = mock(DistributionManager.class);
+    internalDistributedMember = mock(InternalDistributedMember.class);
+    partitionedRegion = mock(PartitionedRegion.class);
+    regionAdvisor = mock(RegionAdvisor.class);
+
+    when(distributionManager.getDistributionManagerId()).thenReturn(internalDistributedMember);
+    when(distributionManager.getId()).thenReturn(internalDistributedMember);
+    when(internalDistributedMember.getVersion()).thenReturn(KnownVersion.CURRENT);
+    when(partitionedRegion.getDistributionManager()).thenReturn(distributionManager);
+    when(partitionedRegion.getName()).thenReturn("prRegion");
+    when(partitionedRegion.getRegionAdvisor()).thenReturn(regionAdvisor);
+    when(regionAdvisor.getDistributionManager()).thenReturn(distributionManager);
+
+    partitionedRegionClear = new PartitionedRegionClear(partitionedRegion);
+  }
+
+  @Test
+  public void isLockedForListenerAndClientNotificationReturnsTrueWhenLocked() {
+    // arrange
+    when(distributionManager.isCurrentMember(internalDistributedMember)).thenReturn(true);
+    partitionedRegionClear.obtainClearLockLocal(internalDistributedMember);
+
+    // act
+    boolean result = partitionedRegionClear.isLockedForListenerAndClientNotification();
+
+    // assert
+    assertThat(result).isTrue();
+  }
+
+  @Test
+  public void isLockedForListenerAndClientNotificationReturnsFalseWhenMemberNotInTheSystemRequestsLock() {
+    // arrange
+    when(distributionManager.isCurrentMember(internalDistributedMember)).thenReturn(false);
+
+    // act
+    boolean result = partitionedRegionClear.isLockedForListenerAndClientNotification();
+
+    // assert
+    assertThat(result).isFalse();
+  }
+
+  @Test
+  public void acquireDistributedClearLockGetsDistributedLock() {
+    // arrange
+    DistributedLockService distributedLockService = mock(DistributedLockService.class);
+    String lockName = PartitionedRegionClear.CLEAR_OPERATION + partitionedRegion.getName();
+    when(partitionedRegion.getPartitionedRegionLockService()).thenReturn(distributedLockService);
+
+    // act
+    partitionedRegionClear.acquireDistributedClearLock(lockName);
+
+    // assert
+    verify(distributedLockService).lock(lockName, -1, -1);
+  }
+
+  @Test
+  public void releaseDistributedClearLockReleasesDistributedLock() {
+    // arrange
+    DistributedLockService distributedLockService = mock(DistributedLockService.class);
+    String lockName = PartitionedRegionClear.CLEAR_OPERATION + partitionedRegion.getName();
+    when(partitionedRegion.getPartitionedRegionLockService()).thenReturn(distributedLockService);
+
+    // act
+    partitionedRegionClear.releaseDistributedClearLock(lockName);
+
+    // assert
+    verify(distributedLockService).unlock(lockName);
+  }
+
+  @Test
+  public void obtainLockForClearGetsLocalLockAndSendsMessageForRemote() throws Exception {
+    // arrange
+    Region<String, PartitionRegionConfig> region = uncheckedCast(mock(Region.class));
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+
+    // partial mocking to stub some methods and verify
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    when(partitionedRegion.getPRRoot())
+        .thenReturn(region);
+    when(regionEvent.clone())
+        .thenReturn(mock(RegionEventImpl.class));
+    doReturn(emptySet())
+        .when(spyPartitionedRegionClear)
+        .attemptToSendPartitionedRegionClearMessage(regionEvent,
+            OperationType.OP_LOCK_FOR_PR_CLEAR);
+
+    // act
+    spyPartitionedRegionClear.obtainLockForClear(regionEvent);
+
+    // assert
+    verify(spyPartitionedRegionClear)
+        .obtainClearLockLocal(internalDistributedMember);
+    verify(spyPartitionedRegionClear)
+        .sendPartitionedRegionClearMessage(regionEvent, OperationType.OP_LOCK_FOR_PR_CLEAR);
+  }
+
+  @Test
+  public void releaseLockForClearReleasesLocalLockAndSendsMessageForRemote() throws Exception {
+    // arrange
+    Region<String, PartitionRegionConfig> region = uncheckedCast(mock(Region.class));
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+
+    when(partitionedRegion.getPRRoot())
+        .thenReturn(region);
+    when(regionEvent.clone())
+        .thenReturn(mock(RegionEventImpl.class));
+
+    // partial mocking to stub some methods and verify
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doReturn(emptySet())
+        .when(spyPartitionedRegionClear)
+        .attemptToSendPartitionedRegionClearMessage(regionEvent,
+            OperationType.OP_UNLOCK_FOR_PR_CLEAR);
+
+    // act
+    spyPartitionedRegionClear.releaseLockForClear(regionEvent);
+
+    // assert
+    verify(spyPartitionedRegionClear)
+        .releaseClearLockLocal();
+    verify(spyPartitionedRegionClear)
+        .sendPartitionedRegionClearMessage(regionEvent, OperationType.OP_UNLOCK_FOR_PR_CLEAR);
+  }
+
+  @Test
+  public void clearRegionClearsLocalAndSendsMessageForRemote() throws Exception {
+    // arrange
+    Region<String, PartitionRegionConfig> region = uncheckedCast(mock(Region.class));
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+
+    when(partitionedRegion.getPRRoot())
+        .thenReturn(region);
+    when(regionEvent.clone())
+        .thenReturn(mock(RegionEventImpl.class));
+
+    // partial mocking to stub some methods and verify
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doReturn(emptySet())
+        .when(spyPartitionedRegionClear)
+        .attemptToSendPartitionedRegionClearMessage(regionEvent, OperationType.OP_PR_CLEAR);
+
+    // act
+    spyPartitionedRegionClear.clearRegion(regionEvent);
+
+    // assert
+    verify(spyPartitionedRegionClear)
+        .clearRegionLocal(regionEvent);
+    verify(spyPartitionedRegionClear)
+        .sendPartitionedRegionClearMessage(regionEvent, OperationType.OP_PR_CLEAR);
+  }
+
+  @Test
+  public void waitForPrimaryReturnsAfterFindingAllPrimary() {
+    // arrange
+    BucketAdvisor bucketAdvisor = mock(BucketAdvisor.class);
+    PartitionedRegionDataStore dataStore = mock(PartitionedRegionDataStore.class);
+    RetryTimeKeeper retryTimer = mock(RetryTimeKeeper.class);
+    when(bucketAdvisor.hasPrimary()).thenReturn(true);
+    when(partitionedRegion.getDataStore()).thenReturn(dataStore);
+    setupBucketRegions(dataStore, bucketAdvisor);
+
+    // act
+    partitionedRegionClear.waitForPrimary(retryTimer);
+
+    // assert
+    verify(retryTimer, never()).waitForBucketsRecovery();
+  }
+
+  @Test
+  public void waitForPrimaryReturnsAfterRetryForPrimary() {
+    // arrange
+    BucketAdvisor bucketAdvisor = mock(BucketAdvisor.class);
+    PartitionedRegionDataStore dataStore = mock(PartitionedRegionDataStore.class);
+    RetryTimeKeeper retryTimer = mock(RetryTimeKeeper.class);
+    when(bucketAdvisor.hasPrimary()).thenReturn(false).thenReturn(true);
+    when(partitionedRegion.getDataStore()).thenReturn(dataStore);
+    setupBucketRegions(dataStore, bucketAdvisor);
+
+    // act
+    partitionedRegionClear.waitForPrimary(retryTimer);
+
+    // assert
+    verify(retryTimer).waitForBucketsRecovery();
+  }
+
+  @Test
+  public void waitForPrimaryThrowsPartitionedRegionPartialClearException() {
+    // arrange
+    BucketAdvisor bucketAdvisor = mock(BucketAdvisor.class);
+    PartitionedRegionDataStore dataStore = mock(PartitionedRegionDataStore.class);
+    RetryTimeKeeper retryTimer = mock(RetryTimeKeeper.class);
+    when(partitionedRegion.getDataStore())
+        .thenReturn(dataStore);
+    when(retryTimer.overMaximum())
+        .thenReturn(true);
+    setupBucketRegions(dataStore, bucketAdvisor);
+
+    // act
+    Throwable thrown = catchThrowable(() -> partitionedRegionClear.waitForPrimary(retryTimer));
+
+    // assert
+    assertThat(thrown)
+        .isInstanceOf(PartitionedRegionPartialClearException.class)
+        .hasMessage(
+            "Unable to find primary bucket region during clear operation on prRegion region.");
+    verify(retryTimer, never())
+        .waitForBucketsRecovery();
+  }
+
+  @Test
+  public void clearRegionLocalCallsClearOnLocalPrimaryBucketRegions() {
+    // arrange
+    BucketAdvisor bucketAdvisor = mock(BucketAdvisor.class);
+    PartitionedRegionDataStore dataStore = mock(PartitionedRegionDataStore.class);
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    when(bucketAdvisor.hasPrimary()).thenReturn(true);
+    when(partitionedRegion.getDataStore()).thenReturn(dataStore);
+    doNothing().when(dataStore).lockBucketCreationForRegionClear();
+    Set<BucketRegion> buckets = setupBucketRegions(dataStore, bucketAdvisor);
+
+    // act
+    Set<Integer> bucketsCleared = partitionedRegionClear.clearRegionLocal(regionEvent);
+
+    // assert
+    assertThat(bucketsCleared).hasSameSizeAs(buckets);
+    ArgumentCaptor<RegionEventImpl> captor = forClass(RegionEventImpl.class);
+    for (BucketRegion bucketRegion : buckets) {
+      verify(bucketRegion).cmnClearRegion(captor.capture(), eq(false), eq(true));
+      Region<?, ?> region = captor.getValue().getRegion();
+      assertThat(region).isEqualTo(bucketRegion);
+    }
+  }
+
+  @Test
+  public void clearRegionLocalRetriesClearOnNonClearedLocalPrimaryBucketRegionsWhenMembershipChanges() {
+    // arrange
+    BucketAdvisor bucketAdvisor = mock(BucketAdvisor.class);
+    PartitionedRegionDataStore dataStore = mock(PartitionedRegionDataStore.class);
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+
+    when(bucketAdvisor.hasPrimary())
+        .thenReturn(true);
+    doNothing()
+        .when(dataStore)
+        .lockBucketCreationForRegionClear();
+
+    Set<BucketRegion> buckets = setupBucketRegions(dataStore, bucketAdvisor);
+
+    Set<BucketRegion> allBuckets = new HashSet<>(buckets);
+    for (int i = 0; i < 3; i++) {
+      BucketRegion bucketRegion = mock(BucketRegion.class);
+      when(bucketRegion.getBucketAdvisor())
+          .thenReturn(bucketAdvisor);
+      when(bucketRegion.getId())
+          .thenReturn(i + buckets.size());
+      when(bucketRegion.size())
+          .thenReturn(1);
+      allBuckets.add(bucketRegion);
+    }
+
+    // After the first try, add 3 extra buckets to the local bucket regions
+    when(dataStore.getAllLocalBucketRegions())
+        .thenReturn(buckets)
+        .thenReturn(allBuckets);
+    when(dataStore.getAllLocalPrimaryBucketRegions())
+        .thenReturn(buckets)
+        .thenReturn(allBuckets);
+    when(partitionedRegion.getDataStore())
+        .thenReturn(dataStore);
+
+    // partial mocking to stub some methods
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    when(spyPartitionedRegionClear.getMembershipChange())
+        .thenReturn(true)
+        .thenReturn(false);
+
+    // act
+    Set<Integer> bucketsCleared = spyPartitionedRegionClear.clearRegionLocal(regionEvent);
+
+    // assert
+    assertThat(bucketsCleared).hasSameSizeAs(allBuckets);
+    ArgumentCaptor<RegionEventImpl> captor = forClass(RegionEventImpl.class);
+    for (BucketRegion bucketRegion : allBuckets) {
+      verify(bucketRegion)
+          .cmnClearRegion(captor.capture(), eq(false), eq(true));
+      Region<?, ?> region = captor.getValue().getRegion();
+      assertThat(region).isEqualTo(bucketRegion);
+    }
+  }
+
+  @Test
+  public void doAfterClearCallsNotifyClientsWhenClientHaveInterests() {
+    // arrange
+    FilterProfile filterProfile = mock(FilterProfile.class);
+    FilterRoutingInfo filterRoutingInfo = mock(FilterRoutingInfo.class);
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+
+    when(filterProfile.getFilterRoutingInfoPart1(regionEvent, NO_PROFILES, emptySet()))
+        .thenReturn(filterRoutingInfo);
+    when(filterProfile.getFilterRoutingInfoPart2(filterRoutingInfo, regionEvent))
+        .thenReturn(filterRoutingInfo);
+    when(partitionedRegion.getFilterProfile()).thenReturn(filterProfile);
+    when(partitionedRegion.hasAnyClientsInterested())
+        .thenReturn(true);
+
+    // act
+    partitionedRegionClear.doAfterClear(regionEvent);
+
+    // assert
+    verify(regionEvent)
+        .setLocalFilterInfo(any());
+    verify(partitionedRegion)
+        .notifyBridgeClients(regionEvent);
+  }
+
+  @Test
+  public void doAfterClearDispatchesListenerEvents() {
+    // arrange
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    when(partitionedRegion.hasListener())
+        .thenReturn(true);
+
+    // act
+    partitionedRegionClear.doAfterClear(regionEvent);
+
+    verify(partitionedRegion)
+        .dispatchListenerEvent(EnumListenerEvent.AFTER_REGION_CLEAR, regionEvent);
+  }
+
+  @Test
+  public void obtainClearLockLocalGetsLockOnPrimaryBuckets() {
+    // arrange
+    BucketAdvisor bucketAdvisor = mock(BucketAdvisor.class);
+    PartitionedRegionDataStore dataStore = mock(PartitionedRegionDataStore.class);
+
+    when(bucketAdvisor.hasPrimary())
+        .thenReturn(true);
+    when(distributionManager.isCurrentMember(internalDistributedMember))
+        .thenReturn(true);
+    when(partitionedRegion.getDataStore())
+        .thenReturn(dataStore);
+
+    Set<BucketRegion> buckets = setupBucketRegions(dataStore, bucketAdvisor);
+
+    // act
+    partitionedRegionClear.obtainClearLockLocal(internalDistributedMember);
+
+    // assert
+    // TODO: encapsulate lockForListenerAndClientNotification
+    assertThat(partitionedRegionClear.lockForListenerAndClientNotification.getLockRequester())
+        .isSameAs(internalDistributedMember);
+    for (BucketRegion bucketRegion : buckets) {
+      verify(bucketRegion)
+          .lockLocallyForClear(partitionedRegion.getDistributionManager(),
+              partitionedRegion.getMyId(), null);
+    }
+  }
+
+  @Test
+  public void obtainClearLockLocalDoesNotGetLocksOnPrimaryBucketsWhenMemberIsNotCurrent() {
+    // arrange
+    BucketAdvisor bucketAdvisor = mock(BucketAdvisor.class);
+    PartitionedRegionDataStore dataStore = mock(PartitionedRegionDataStore.class);
+
+    when(bucketAdvisor.hasPrimary())
+        .thenReturn(true);
+    when(distributionManager.isCurrentMember(internalDistributedMember))
+        .thenReturn(false);
+    when(partitionedRegion.getDataStore())
+        .thenReturn(dataStore);
+
+    Set<BucketRegion> buckets = setupBucketRegions(dataStore, bucketAdvisor);
+
+    // act
+    partitionedRegionClear.obtainClearLockLocal(internalDistributedMember);
+
+    // assert
+    assertThat(partitionedRegionClear.lockForListenerAndClientNotification.getLockRequester())
+        .isNull();
+    for (BucketRegion bucketRegion : buckets) {
+      verify(bucketRegion, never())
+          .lockLocallyForClear(partitionedRegion.getDistributionManager(),
+              partitionedRegion.getMyId(), null);
+    }
+  }
+
+  @Test
+  public void releaseClearLockLocalReleasesLockOnPrimaryBuckets() {
+    // arrange
+    BucketAdvisor bucketAdvisor = mock(BucketAdvisor.class);
+    PartitionedRegionDataStore dataStore = mock(PartitionedRegionDataStore.class);
+
+    when(bucketAdvisor.hasPrimary())
+        .thenReturn(true);
+    when(distributionManager.isCurrentMember(internalDistributedMember))
+        .thenReturn(true);
+    when(partitionedRegion.getDataStore())
+        .thenReturn(dataStore);
+
+    Set<BucketRegion> buckets = setupBucketRegions(dataStore, bucketAdvisor);
+
+    partitionedRegionClear.lockForListenerAndClientNotification
+        .setLocked(internalDistributedMember);
+
+    // act
+    partitionedRegionClear.releaseClearLockLocal();
+
+    // assert
+    for (BucketRegion bucketRegion : buckets) {
+      verify(bucketRegion)
+          .releaseLockLocallyForClear(null);
+    }
+  }
+
+  @Test
+  public void releaseClearLockLocalDoesNotReleaseLocksOnPrimaryBucketsWhenMemberIsNotCurrent() {
+    // arrange
+    BucketAdvisor bucketAdvisor = mock(BucketAdvisor.class);
+    PartitionedRegionDataStore dataStore = mock(PartitionedRegionDataStore.class);
+
+    when(bucketAdvisor.hasPrimary())
+        .thenReturn(true);
+    when(partitionedRegion.getDataStore())
+        .thenReturn(dataStore);
+
+    Set<BucketRegion> buckets = setupBucketRegions(dataStore, bucketAdvisor);
+
+    // act
+    partitionedRegionClear.releaseClearLockLocal();
+
+    // assert
+    assertThat(partitionedRegionClear.lockForListenerAndClientNotification.getLockRequester())
+        .isNull();
+    for (BucketRegion bucketRegion : buckets) {
+      verify(bucketRegion, never())
+          .releaseLockLocallyForClear(null);
+    }
+  }
+
+  @Test
+  public void sendPartitionedRegionClearMessageSendsClearMessageToPRNodes() {
+    // arrange
+    InternalCache internalCache = mock(InternalCache.class);
+    InternalDistributedMember member = mock(InternalDistributedMember.class);
+    Node node = mock(Node.class);
+    PartitionRegionConfig partitionRegionConfig = mock(PartitionRegionConfig.class);
+    Region<String, PartitionRegionConfig> prRoot = uncheckedCast(mock(Region.class));
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    InternalDistributedSystem system = mock(InternalDistributedSystem.class);
+    TXManagerImpl txManager = mock(TXManagerImpl.class);
+
+    Set<InternalDistributedMember> prNodes = singleton(member);
+    Set<Node> configNodes = singleton(node);
+
+    when(distributionManager.getCancelCriterion())
+        .thenReturn(mock(CancelCriterion.class));
+    when(distributionManager.getStats())
+        .thenReturn(mock(DMStats.class));
+    when(internalCache.getTxManager())
+        .thenReturn(txManager);
+    when(member.getVersion())
+        .thenReturn(KnownVersion.getCurrentVersion());
+    when(node.getMemberId())
+        .thenReturn(member);
+    when(partitionRegionConfig.getNodes())
+        .thenReturn(configNodes);
+    when(partitionedRegion.getPRRoot())
+        .thenReturn(prRoot);
+    when(regionAdvisor.adviseAllPRNodes())
+        .thenReturn(prNodes);
+    when(regionEvent.clone())
+        .thenReturn(mock(RegionEventImpl.class));
+    when(partitionedRegion.getSystem())
+        .thenReturn(system);
+    when(prRoot.get(anyString()))
+        .thenReturn(partitionRegionConfig);
+    when(system.getDistributionManager())
+        .thenReturn(distributionManager);
+    when(txManager.isDistributed())
+        .thenReturn(false);
+    when(partitionedRegion.getCache())
+        .thenReturn(internalCache);
+
+    // act
+    partitionedRegionClear
+        .sendPartitionedRegionClearMessage(regionEvent, OperationType.OP_PR_CLEAR);
+
+    // assert
+    verify(distributionManager)
+        .putOutgoing(any());
+  }
+
+  @Test
+  public void doClearAcquiresAndReleasesDistributedClearLockAndCreatesAllPrimaryBuckets() {
+    // arrange
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+
+    // partial mocking to stub some methods and verify
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doNothing().when(spyPartitionedRegionClear).acquireDistributedClearLock(any());
+    doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
+    doReturn(emptySet()).when(spyPartitionedRegionClear).clearRegion(regionEvent);
+
+    // act
+    spyPartitionedRegionClear.doClear(regionEvent, false);
+
+    // assert
+    verify(spyPartitionedRegionClear).acquireDistributedClearLock(any());
+    verify(spyPartitionedRegionClear).releaseDistributedClearLock(any());
+    verify(spyPartitionedRegionClear).assignAllPrimaryBuckets();
+  }
+
+  @Test
+  public void doClearInvokesCacheWriterWhenCacheWriteIsSet() {
+    // arrange
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+
+    // partial mocking to stub some methods and verify
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doNothing().when(spyPartitionedRegionClear).acquireDistributedClearLock(any());
+    doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
+    doReturn(emptySet()).when(spyPartitionedRegionClear).clearRegion(regionEvent);
+
+    // act
+    spyPartitionedRegionClear.doClear(regionEvent, true);
+
+    // assert
+    verify(spyPartitionedRegionClear).invokeCacheWriter(regionEvent);
+  }
+
+  @Test
+  public void doClearDoesNotInvokesCacheWriterWhenCacheWriteIsNotSet() {
+    // arrange
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+
+    // partial mocking to stub some methods and verify
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doNothing().when(spyPartitionedRegionClear).acquireDistributedClearLock(any());
+    doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
+    doReturn(emptySet()).when(spyPartitionedRegionClear).clearRegion(regionEvent);
+
+    // act
+    spyPartitionedRegionClear.doClear(regionEvent, false);
+
+    // assert
+    verify(spyPartitionedRegionClear, never()).invokeCacheWriter(regionEvent);
+  }
+
+  @Test
+  public void doClearObtainsAndReleasesLockForClearWhenRegionHasListener() {
+    // arrange
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+
+    when(partitionedRegion.hasAnyClientsInterested()).thenReturn(false);
+    when(partitionedRegion.hasListener()).thenReturn(true);
+
+    // partial mocking to stub some methods and verify
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doNothing().when(spyPartitionedRegionClear).acquireDistributedClearLock(any());
+    doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
+    doNothing().when(spyPartitionedRegionClear).obtainLockForClear(regionEvent);
+    doNothing().when(spyPartitionedRegionClear).releaseLockForClear(regionEvent);
+    doReturn(emptySet()).when(spyPartitionedRegionClear).clearRegion(regionEvent);
+
+    // act
+    spyPartitionedRegionClear.doClear(regionEvent, false);
+
+    // assert
+    verify(spyPartitionedRegionClear).obtainLockForClear(regionEvent);
+    verify(spyPartitionedRegionClear).releaseLockForClear(regionEvent);
+  }
+
+  @Test
+  public void doClearObtainsAndReleasesLockForClearWhenRegionHasClientInterest() {
+    // arrange
+    boolean cacheWrite = false;
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+
+    when(partitionedRegion.hasAnyClientsInterested()).thenReturn(true);
+    when(partitionedRegion.hasListener()).thenReturn(false);
+
+    // partial mocking to stub some methods and verify
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doNothing().when(spyPartitionedRegionClear).acquireDistributedClearLock(any());
+    doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
+    doNothing().when(spyPartitionedRegionClear).obtainLockForClear(regionEvent);
+    doNothing().when(spyPartitionedRegionClear).releaseLockForClear(regionEvent);
+    doReturn(emptySet()).when(spyPartitionedRegionClear).clearRegion(regionEvent);
+
+    // act
+    spyPartitionedRegionClear.doClear(regionEvent, cacheWrite);
+
+    // assert
+    verify(spyPartitionedRegionClear).obtainLockForClear(regionEvent);
+    verify(spyPartitionedRegionClear).releaseLockForClear(regionEvent);
+  }
+
+  @Test
+  public void doClearDoesNotObtainLockForClearWhenRegionHasNoListenerAndNoClientInterest() {
+    // arrange
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+
+    when(partitionedRegion.hasAnyClientsInterested()).thenReturn(false);
+    when(partitionedRegion.hasListener()).thenReturn(false);
+
+    // partial mocking to stub some methods and verify
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doNothing().when(spyPartitionedRegionClear).acquireDistributedClearLock(any());
+    doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
+    doNothing().when(spyPartitionedRegionClear).obtainLockForClear(regionEvent);
+    doNothing().when(spyPartitionedRegionClear).releaseLockForClear(regionEvent);
+    doReturn(emptySet()).when(spyPartitionedRegionClear).clearRegion(regionEvent);
+
+    // act
+    spyPartitionedRegionClear.doClear(regionEvent, false);
+
+    // assert
+    verify(spyPartitionedRegionClear, never()).obtainLockForClear(regionEvent);
+    verify(spyPartitionedRegionClear, never()).releaseLockForClear(regionEvent);
+  }
+
+  @Test
+  public void doClearThrowsPartitionedRegionPartialClearException() {
+    // arrange
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+
+    when(partitionedRegion.hasListener()).thenReturn(false);
+    when(partitionedRegion.hasAnyClientsInterested()).thenReturn(false);
+    when(partitionedRegion.getTotalNumberOfBuckets()).thenReturn(1);
+    when(partitionedRegion.getName()).thenReturn("prRegion");
+
+    // partial mocking to stub some methods
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doNothing().when(spyPartitionedRegionClear).acquireDistributedClearLock(any());
+    doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
+    doNothing().when(spyPartitionedRegionClear).obtainLockForClear(regionEvent);
+    doNothing().when(spyPartitionedRegionClear).releaseLockForClear(regionEvent);
+    doReturn(emptySet()).when(spyPartitionedRegionClear).clearRegion(regionEvent);
+
+    // act
+    Throwable thrown =
+        catchThrowable(() -> spyPartitionedRegionClear.doClear(regionEvent, false));
+
+    // assert
+    assertThat(thrown)
+        .isInstanceOf(PartitionedRegionPartialClearException.class)
+        .hasMessage(
+            "Unable to clear all the buckets from the partitioned region prRegion, either data (buckets) moved or member departed.");
+  }
+
+  @Test
+  public void doClearThrowsUnsupportedOperationException() {
+    // arrange
+    InternalDistributedMember member = mock(InternalDistributedMember.class);
+    InternalDistributedMember oldMember = mock(InternalDistributedMember.class);
+    Node node = mock(Node.class);
+    Node oldNode = mock(Node.class);
+
+    Set<InternalDistributedMember> prNodes = new HashSet<>();
+    prNodes.add(member);
+    prNodes.add(oldMember);
+
+    Set<Node> configNodes = new HashSet<>();
+    configNodes.add(node);
+    configNodes.add(oldNode);
+
+    InternalCache cache = mock(InternalCache.class);
+    PartitionRegionConfig partitionRegionConfig = mock(PartitionRegionConfig.class);
+    Region<String, PartitionRegionConfig> prRoot = uncheckedCast(mock(Region.class));
+    RegionEventImpl regionEvent = mock(RegionEventImpl.class);
+    InternalDistributedSystem system = mock(InternalDistributedSystem.class);
+    TXManagerImpl txManager = mock(TXManagerImpl.class);
+
+    when(member.getName()).thenReturn("member");
+    when(oldMember.getName()).thenReturn("oldMember");
+    when(node.getMemberId()).thenReturn(member);
+    when(oldNode.getMemberId()).thenReturn(oldMember);
+    when(member.getVersion()).thenReturn(KnownVersion.getCurrentVersion());
+    when(oldMember.getVersion()).thenReturn(KnownVersion.GEODE_1_11_0);
+
+    when(cache.getTxManager()).thenReturn(txManager);
+    when(distributionManager.getCancelCriterion()).thenReturn(mock(CancelCriterion.class));
+    when(distributionManager.getStats()).thenReturn(mock(DMStats.class));
+    when(partitionRegionConfig.getNodes()).thenReturn(configNodes);
+    when(partitionedRegion.getBucketPrimary(0)).thenReturn(member);
+    when(partitionedRegion.getBucketPrimary(1)).thenReturn(oldMember);
+    when(partitionedRegion.getCache()).thenReturn(cache);
+    when(partitionedRegion.getName()).thenReturn("prRegion");
+    when(partitionedRegion.getPRRoot()).thenReturn(prRoot);
+    when(partitionedRegion.getSystem()).thenReturn(system);
+    when(partitionedRegion.getTotalNumberOfBuckets()).thenReturn(2);
+    when(partitionedRegion.hasListener()).thenReturn(false);
+    when(partitionedRegion.hasAnyClientsInterested()).thenReturn(false);
+    when(prRoot.get(anyString())).thenReturn(partitionRegionConfig);
+    when(regionAdvisor.adviseAllPRNodes()).thenReturn(prNodes);
+    when(regionEvent.clone()).thenReturn(mock(RegionEventImpl.class));
+    when(system.getDistributionManager()).thenReturn(distributionManager);
+    when(txManager.isDistributed()).thenReturn(false);
+
+    // partial mocking to stub some methods
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+    doNothing().when(spyPartitionedRegionClear).acquireDistributedClearLock(any());
+    doNothing().when(spyPartitionedRegionClear).assignAllPrimaryBuckets();
+    doNothing().when(spyPartitionedRegionClear).obtainLockForClear(regionEvent);
+    doNothing().when(spyPartitionedRegionClear).releaseLockForClear(regionEvent);
+    doReturn(singleton("2")).when(spyPartitionedRegionClear).clearRegion(regionEvent);
+
+    // act
+    Throwable thrown =
+        catchThrowable(() -> spyPartitionedRegionClear.doClear(regionEvent, false));
+
+    // assert
+    assertThat(thrown)
+        .isInstanceOf(UnsupportedOperationException.class)
+        .hasMessage(
+            "A server's [oldMember] version was too old (< GEODE 1.14.0) for : Partitioned Region Clear");
+  }
+
+  @Test
+  public void handleClearFromDepartedMemberReleasesTheLockForRequesterDeparture() {
+    // arrange
+    InternalDistributedMember member = mock(InternalDistributedMember.class);
+    partitionedRegionClear.lockForListenerAndClientNotification.setLocked(member);
+
+    // partial mocking to verify
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+
+    // act
+    spyPartitionedRegionClear.handleClearFromDepartedMember(member);
+
+    // assert
+    verify(spyPartitionedRegionClear).releaseClearLockLocal();
+  }
+
+  @Test
+  public void handleClearFromDepartedMemberDoesNotReleasesTheLockForNonRequesterDeparture() {
+    // arrange
+    InternalDistributedMember requesterMember = mock(InternalDistributedMember.class);
+    InternalDistributedMember member = mock(InternalDistributedMember.class);
+    partitionedRegionClear.lockForListenerAndClientNotification.setLocked(requesterMember);
+
+    // partial mocking to verify
+    PartitionedRegionClear spyPartitionedRegionClear = spy(partitionedRegionClear);
+
+    // act
+    spyPartitionedRegionClear.handleClearFromDepartedMember(member);
+
+    // assert
+    verify(spyPartitionedRegionClear, never()).releaseClearLockLocal();
+  }
+
+  @Test
+  public void partitionedRegionClearRegistersMembershipListener() {
+    // assert
+    MembershipListener membershipListener =
+        partitionedRegionClear.getPartitionedRegionClearListener();
+    verify(distributionManager).addMembershipListener(membershipListener);
+  }
+
+  @Test
+  public void lockRequesterDepartureReleasesTheLock() {
+    // arrange
+    InternalDistributedMember member = mock(InternalDistributedMember.class);
+    partitionedRegionClear.lockForListenerAndClientNotification.setLocked(member);
+    PartitionedRegionClearListener partitionedRegionClearListener =
+        partitionedRegionClear.getPartitionedRegionClearListener();
+
+    // act
+    partitionedRegionClearListener.memberDeparted(distributionManager, member, true);
+
+    // assert
+    assertThat(partitionedRegionClear.getMembershipChange())
+        .isTrue();
+    assertThat(partitionedRegionClear.lockForListenerAndClientNotification.getLockRequester())
+        .isNull();
+  }
+
+  @Test
+  public void nonLockRequesterDepartureDoesNotReleasesTheLock() {
+    // arrange
+    InternalDistributedMember requesterMember = mock(InternalDistributedMember.class);
+    InternalDistributedMember member = mock(InternalDistributedMember.class);
+    partitionedRegionClear.lockForListenerAndClientNotification.setLocked(requesterMember);
+    PartitionedRegionClearListener partitionedRegionClearListener =
+        partitionedRegionClear.getPartitionedRegionClearListener();
+
+    // act
+    partitionedRegionClearListener.memberDeparted(distributionManager, member, true);
+
+    // assert
+    assertThat(partitionedRegionClear.getMembershipChange())
+        .isTrue();
+    assertThat(partitionedRegionClear.lockForListenerAndClientNotification.getLockRequester())
+        .isNotNull();
+  }
+
+  private Set<BucketRegion> setupBucketRegions(
+      PartitionedRegionDataStore dataStore,
+      BucketAdvisor bucketAdvisor) {
+    return setupBucketRegions(dataStore, bucketAdvisor, 2);
+  }
+
+  private Set<BucketRegion> setupBucketRegions(
+      PartitionedRegionDataStore dataStore,
+      BucketAdvisor bucketAdvisor,
+      int bucketCount) {
+    Set<BucketRegion> bucketRegions = new HashSet<>();
+
+    for (int i = 0; i < bucketCount; i++) {
+      BucketRegion bucketRegion = mock(BucketRegion.class);
+
+      when(bucketRegion.getBucketAdvisor())
+          .thenReturn(bucketAdvisor);
+      when(bucketRegion.getId())
+          .thenReturn(i);
+      when(bucketRegion.size())
+          .thenReturn(1)
+          .thenReturn(0);
+
+      bucketRegions.add(bucketRegion);
+    }
+
+    when(dataStore.getAllLocalBucketRegions())
+        .thenReturn(bucketRegions);
+    when(dataStore.getAllLocalPrimaryBucketRegions())
+        .thenReturn(bucketRegions);
+
+    return bucketRegions;
+  }
+}
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionTest.java
index 2e0914d..f99b74b 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionTest.java
@@ -21,10 +21,13 @@ import static org.assertj.core.api.Assertions.assertThat;
 import static org.assertj.core.api.Assertions.assertThatCode;
 import static org.assertj.core.api.Assertions.catchThrowable;
 import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.ArgumentMatchers.anyInt;
 import static org.mockito.ArgumentMatchers.anyLong;
 import static org.mockito.ArgumentMatchers.eq;
 import static org.mockito.ArgumentMatchers.isNull;
+import static org.mockito.Mockito.doCallRealMethod;
+import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
@@ -37,6 +40,7 @@ import static org.mockito.quality.Strictness.STRICT_STUBS;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
@@ -69,6 +73,7 @@ import org.apache.geode.distributed.internal.DistributionManager;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
 import org.apache.geode.internal.cache.control.InternalResourceManager;
+import org.apache.geode.internal.cache.partitioned.ClearPRMessage;
 import org.apache.geode.internal.cache.partitioned.colocation.ColocationLoggerFactory;
 
 @RunWith(JUnitParamsRunner.class)
@@ -203,6 +208,28 @@ public class PartitionedRegionTest {
   }
 
   @Test
+  public void clearShouldNotThrowUnsupportedOperationException() {
+    PartitionedRegion spyPartitionedRegion = spy(partitionedRegion);
+    doNothing().when(spyPartitionedRegion).checkReadiness();
+    doCallRealMethod().when(spyPartitionedRegion).basicClear(any());
+    doNothing().when(spyPartitionedRegion).basicClear(any(), anyBoolean());
+    spyPartitionedRegion.clear();
+  }
+
+  @Test
+  public void createClearPRMessagesShouldCreateMessagePerBucket() {
+    PartitionedRegion spyPartitionedRegion = spy(partitionedRegion);
+    RegionEventImpl regionEvent =
+        new RegionEventImpl(spyPartitionedRegion, Operation.REGION_CLEAR, null, false,
+            spyPartitionedRegion.getMyId(), true);
+    when(spyPartitionedRegion.getTotalNumberOfBuckets()).thenReturn(3);
+    EventID eventID = new EventID(spyPartitionedRegion.getCache().getDistributedSystem());
+    List<ClearPRMessage> msgs = spyPartitionedRegion.createClearPRMessages(eventID);
+    assertThat(msgs.size()).isEqualTo(3);
+  }
+
+
... 1675 lines suppressed ...

[geode] 10/17: Fixup AnalyzeCoreSerializablesJUnitTest for PartitionedRegionPartialClearException

Posted by nn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

nnag pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 8dacfe6e7765916edf829e03d6034a9bccce1bbf
Author: Kirk Lund <kl...@apache.org>
AuthorDate: Fri Apr 16 11:40:37 2021 -0700

    Fixup AnalyzeCoreSerializablesJUnitTest for PartitionedRegionPartialClearException
---
 .../org/apache/geode/internal/sanctioned-geode-core-serializables.txt  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/geode-core/src/main/resources/org/apache/geode/internal/sanctioned-geode-core-serializables.txt b/geode-core/src/main/resources/org/apache/geode/internal/sanctioned-geode-core-serializables.txt
index 742241e..3486e82 100644
--- a/geode-core/src/main/resources/org/apache/geode/internal/sanctioned-geode-core-serializables.txt
+++ b/geode-core/src/main/resources/org/apache/geode/internal/sanctioned-geode-core-serializables.txt
@@ -79,7 +79,7 @@ org/apache/geode/cache/NoSubscriptionServersAvailableException,true,848408601915
 org/apache/geode/cache/Operation,true,-7521751729852504238,ordinal:byte
 org/apache/geode/cache/OperationAbortedException,true,-8293166225026556949
 org/apache/geode/cache/PartitionedRegionDistributionException,true,-3004093739855972548
-org/apache/geode/cache/PartitionedRegionPartialClearException,false
+org/apache/geode/cache/PartitionedRegionPartialClearException,true,-3420558263697703892
 org/apache/geode/cache/PartitionedRegionStorageException,true,5905463619475329732
 org/apache/geode/cache/RegionAccessException,true,3142958723089038406
 org/apache/geode/cache/RegionDestroyedException,true,319804842308010754,regionFullPath:java/lang/String
@@ -464,7 +464,6 @@ org/apache/geode/management/internal/functions/RebalanceFunction,true,1
 org/apache/geode/management/internal/functions/RestoreRedundancyFunction,true,-8991672237560920252
 org/apache/geode/management/internal/operation/OperationState,true,8212319653561969588,locator:java/lang/String,opId:java/lang/String,operation:org/apache/geode/management/api/ClusterManagementOperation,operationEnd:java/util/Date,operationStart:java/util/Date,result:org/apache/geode/management/runtime/OperationResult,throwable:java/lang/Throwable
 org/apache/geode/management/internal/web/domain/QueryParameterSource,true,34131123582155,objectName:javax/management/ObjectName,queryExpression:javax/management/QueryExp
-org/apache/geode/management/internal/web/shell/MBeanAccessException,true,813768898269516238
 org/apache/geode/pdx/FieldType,false,defaultSerializedValue:java/nio/ByteBuffer,defaultValue:java/lang/Object,isFixedWidth:boolean,name:java/lang/String,width:int
 org/apache/geode/pdx/JSONFormatter$states,false
 org/apache/geode/pdx/JSONFormatterException,true,1

[geode] 17/17: GEODE-9132: Fix locking in PRClearCreateIndexDUnitTest

Posted by nn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

nnag pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 477b3fe85ca0f65138288ac0c5b73acb31f8f775
Author: Kirk Lund <kl...@apache.org>
AuthorDate: Thu Apr 22 11:43:10 2021 -0700

    GEODE-9132: Fix locking in PRClearCreateIndexDUnitTest
---
 .../geode/cache/query/partitioned/PRClearCreateIndexDUnitTest.java    | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRClearCreateIndexDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRClearCreateIndexDUnitTest.java
index 423932d..f4f2105 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRClearCreateIndexDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRClearCreateIndexDUnitTest.java
@@ -103,7 +103,7 @@ public class PRClearCreateIndexDUnitTest implements Serializable {
 
     // assert that secondary member received these messages
     primary.invoke(() -> verifyEvents(false, false, false, false));
-    secondary.invoke(() -> verifyEvents(false, true, true, true));
+    secondary.invoke(() -> verifyEvents(true, true, true, true));
   }
 
   @Test
@@ -117,7 +117,7 @@ public class PRClearCreateIndexDUnitTest implements Serializable {
     clear.get();
 
     // assert that secondary member received these messages
-    primary.invoke(() -> verifyEvents(false, true, false, false));
+    primary.invoke(() -> verifyEvents(true, true, false, false));
     secondary.invoke(() -> verifyEvents(false, false, true, true));
   }
 

[geode] 12/17: GEODE-9132: PartitionedRegionClearWithConcurrentOperationsDUnitTest cleanup 2

Posted by nn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

nnag pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 5e007a606b8377ebcbe45e3a2a512470aaa0a563
Author: Kirk Lund <kl...@apache.org>
AuthorDate: Mon Apr 19 16:16:17 2021 -0700

    GEODE-9132: PartitionedRegionClearWithConcurrentOperationsDUnitTest cleanup 2
---
 ...gionClearWithConcurrentOperationsDUnitTest.java | 782 ++++++++++-----------
 1 file changed, 378 insertions(+), 404 deletions(-)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java
index c9a1e5b..b2aacc0 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java
@@ -72,8 +72,6 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
 
   private static final int BUCKETS = 13;
   private static final String REGION_NAME = "PartitionedRegion";
-  private static final String TEST_CASE_NAME =
-      "[{index}] {method}(Coordinator:{0}, RegionType:{1})";
 
   @Rule
   public DistributedRule distributedRule = new DistributedRule(3);
@@ -84,32 +82,6 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   private VM server2;
   private VM accessor;
 
-  @SuppressWarnings("unused")
-  static TestVM[] coordinators() {
-    return new TestVM[] {
-        TestVM.SERVER1, TestVM.ACCESSOR
-    };
-  }
-
-  @SuppressWarnings("unused")
-  static Object[] coordinatorsAndRegionTypes() {
-    List<Object[]> parameters = new ArrayList<>();
-    RegionShortcut[] regionShortcuts = regionTypes();
-
-    Arrays.stream(regionShortcuts).forEach(regionShortcut -> {
-      parameters.add(new Object[] {TestVM.SERVER1, regionShortcut});
-      parameters.add(new Object[] {TestVM.ACCESSOR, regionShortcut});
-    });
-
-    return parameters.toArray();
-  }
-
-  private static RegionShortcut[] regionTypes() {
-    return new RegionShortcut[] {
-        RegionShortcut.PARTITION, RegionShortcut.PARTITION_REDUNDANT
-    };
-  }
-
   @Before
   public void setUp() throws Exception {
     server1 = getVM(TestVM.SERVER1.vmNumber);
@@ -117,127 +89,414 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
     accessor = getVM(TestVM.ACCESSOR.vmNumber);
   }
 
-  private void initAccessor(RegionShortcut regionShortcut) {
-    PartitionAttributes<String, String> attrs = new PartitionAttributesFactory<String, String>()
-        .setTotalNumBuckets(BUCKETS)
-        .setLocalMaxMemory(0)
-        .create();
+  /**
+   * The test does the following (clear coordinator and regionType are parametrized):
+   * - Launches one thread per VM to continuously execute removes, puts and gets for a given time.
+   * - Clears the Partition Region continuously every X milliseconds for a given time.
+   * - Asserts that, after the clears have finished, the Region Buckets are consistent across
+   * members.
+   */
+  @Test
+  @Parameters({"SERVER1,PARTITION", "ACCESSOR,PARTITION",
+      "SERVER1,PARTITION_REDUNDANT", "ACCESSOR,PARTITION_REDUNDANT"})
+  @TestCaseName("[{index}] {method}(Coordinator:{0}, RegionType:{1})")
+  public void clearWithConcurrentPutGetRemoveShouldWorkCorrectly(TestVM coordinatorVM,
+      RegionShortcut regionShortcut) throws InterruptedException {
+    parametrizedSetup(regionShortcut);
 
-    cacheRule.getCache().createRegionFactory(regionShortcut)
-        .setPartitionAttributes(attrs)
-        .create(REGION_NAME);
+    // Let all VMs continuously execute puts and gets for 60 seconds.
+    final int workMillis = 60000;
+    final int entries = 15000;
+    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
+        server1.invokeAsync(() -> executePuts(entries, workMillis)),
+        server2.invokeAsync(() -> executeGets(entries, workMillis)),
+        accessor.invokeAsync(() -> executeRemoves(entries, workMillis)));
 
-  }
+    // Clear the region every second for 60 seconds.
+    getVM(coordinatorVM.vmNumber).invoke(() -> executeClears(workMillis, 1000));
 
-  private void initDataStore(RegionShortcut regionShortcut) {
-    PartitionAttributes<String, String> attrs = new PartitionAttributesFactory<String, String>()
-        .setTotalNumBuckets(BUCKETS)
-        .create();
+    // Let asyncInvocations finish.
+    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
+      asyncInvocation.await();
+    }
 
-    cacheRule.getCache().createRegionFactory(regionShortcut)
-        .setPartitionAttributes(attrs)
-        .create(REGION_NAME);
+    // Assert Region Buckets are consistent.
+    asList(accessor, server1, server2).forEach(vm -> vm.invoke(this::waitForSilence));
+    accessor.invoke(this::assertRegionBucketsConsistency);
   }
 
-  private void parametrizedSetup(RegionShortcut regionShortcut) {
-    server1.invoke(() -> initDataStore(regionShortcut));
-    server2.invoke(() -> initDataStore(regionShortcut));
-    accessor.invoke(() -> initAccessor(regionShortcut));
-  }
+  /**
+   * The test does the following (clear coordinator and regionType are parametrized):
+   * - Launches two threads per VM to continuously execute putAll and removeAll for a given time.
+   * - Clears the Partition Region continuously every X milliseconds for a given time.
+   * - Asserts that, after the clears have finished, the Region Buckets are consistent across
+   * members.
+   */
+  @Test
+  @Parameters({"SERVER1,PARTITION", "ACCESSOR,PARTITION",
+      "SERVER1,PARTITION_REDUNDANT", "ACCESSOR,PARTITION_REDUNDANT"})
+  @TestCaseName("[{index}] {method}(Coordinator:{0}, RegionType:{1})")
+  public void clearWithConcurrentPutAllRemoveAllShouldWorkCorrectly(TestVM coordinatorVM,
+      RegionShortcut regionShortcut) throws InterruptedException {
+    parametrizedSetup(regionShortcut);
 
-  private void waitForSilence() {
-    DMStats dmStats = cacheRule.getSystem().getDistributionManager().getStats();
-    PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME);
-    PartitionedRegionStats partitionedRegionStats = region.getPrStats();
+    // Let all VMs continuously execute putAll and removeAll for 15 seconds.
+    final int workMillis = 15000;
+    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
+        server1.invokeAsync(() -> executePutAlls(0, 2000, workMillis)),
+        server1.invokeAsync(() -> executeRemoveAlls(0, 2000, workMillis)),
+        server2.invokeAsync(() -> executePutAlls(2000, 4000, workMillis)),
+        server2.invokeAsync(() -> executeRemoveAlls(2000, 4000, workMillis)),
+        accessor.invokeAsync(() -> executePutAlls(4000, 6000, workMillis)),
+        accessor.invokeAsync(() -> executeRemoveAlls(4000, 6000, workMillis)));
 
-    await().untilAsserted(() -> {
-      assertThat(dmStats.getReplyWaitsInProgress()).isEqualTo(0);
-      assertThat(partitionedRegionStats.getVolunteeringInProgress()).isEqualTo(0);
-      assertThat(partitionedRegionStats.getBucketCreatesInProgress()).isEqualTo(0);
-      assertThat(partitionedRegionStats.getPrimaryTransfersInProgress()).isEqualTo(0);
-      assertThat(partitionedRegionStats.getRebalanceBucketCreatesInProgress()).isEqualTo(0);
-      assertThat(partitionedRegionStats.getRebalancePrimaryTransfersInProgress()).isEqualTo(0);
-    });
+    // Clear the region every half second for 15 seconds.
+    getVM(coordinatorVM.vmNumber).invoke(() -> executeClears(workMillis, 500));
+
+    // Let asyncInvocations finish.
+    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
+      asyncInvocation.await();
+    }
+
+    // Assert Region Buckets are consistent.
+    asList(accessor, server1, server2).forEach(vm -> vm.invoke(this::waitForSilence));
+    accessor.invoke(this::assertRegionBucketsConsistency);
   }
 
   /**
-   * Populates the region and verifies the data on the selected VMs.
+   * The test does the following (regionType is parametrized):
+   * - Populates the Partition Region.
+   * - Verifies that the entries are synchronized on all members.
+   * - Sets the {@link MemberKiller} as a {@link DistributionMessageObserver} to stop the
+   * coordinator VM while the clear is in progress.
+   * - Clears the Partition Region (at this point the coordinator is restarted).
+   * - Asserts that, after the member joins again, the Region Buckets are consistent.
    */
-  private void populateRegion(VM feeder, int entryCount, Iterable<VM> vms) {
-    feeder.invoke(() -> {
+  @Test
+  @Parameters({"PARTITION", "PARTITION_REDUNDANT"})
+  @TestCaseName("[{index}] {method}(RegionType:{0})")
+  public void clearShouldFailWhenCoordinatorMemberIsBounced(RegionShortcut regionShortcut) {
+    parametrizedSetup(regionShortcut);
+    final int entries = 1000;
+    populateRegion(accessor, entries, asList(accessor, server1, server2));
+
+    // Set the CoordinatorMemberKiller and try to clear the region
+    server1.invoke(() -> {
+      DistributionMessageObserver.setInstance(new MemberKiller(true));
       Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
-      IntStream.range(0, entryCount).forEach(i -> region.put(String.valueOf(i), "Value_" + i));
+      assertThatThrownBy(region::clear)
+          .isInstanceOf(DistributedSystemDisconnectedException.class)
+          .hasCauseInstanceOf(ForcedDisconnectException.class);
     });
 
-    vms.forEach(vm -> vm.invoke(() -> {
-      waitForSilence();
-      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+    // Wait for member to get back online and assign all buckets.
+    server1.invoke(() -> {
+      cacheRule.createCache();
+      initDataStore(regionShortcut);
+      await().untilAsserted(
+          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
+      PartitionRegionHelper.assignBucketsToPartitions(cacheRule.getCache().getRegion(REGION_NAME));
+    });
 
-      IntStream.range(0, entryCount)
-          .forEach(i -> assertThat(region.get(String.valueOf(i))).isEqualTo("Value_" + i));
-    }));
+    // Assert Region Buckets are consistent.
+    asList(accessor, server1, server2).forEach(vm -> vm.invoke(this::waitForSilence));
+    accessor.invoke(this::assertRegionBucketsConsistency);
   }
 
   /**
-   * Asserts that the RegionVersionVectors for both buckets are consistent.
-   *
-   * @param bucketId Id of the bucket to compare.
-   * @param bucketDump1 First bucketDump.
-   * @param bucketDump2 Second bucketDump.
+   * The test does the following (clear coordinator is chosen through parameters):
+   * - Populates the Partition Region.
+   * - Verifies that the entries are synchronized on all members.
+   * - Sets the {@link MemberKiller} as a {@link DistributionMessageObserver} to stop a
+   * non-coordinator VM while the clear is in progress (the member has primary buckets, though, so
+   * participates on the clear operation).
+   * - Launches two threads per VM to continuously execute gets, puts and removes for a given time.
+   * - Clears the Partition Region (at this point the non-coordinator is restarted).
+   * - Asserts that, after the clear has finished, the Region Buckets are consistent across members.
    */
-  private void assertRegionVersionVectorsConsistency(int bucketId, BucketDump bucketDump1,
-      BucketDump bucketDump2) {
-    RegionVersionVector<?> rvv1 = bucketDump1.getRvv();
-    RegionVersionVector<?> rvv2 = bucketDump2.getRvv();
+  @Test
+  @Parameters({"SERVER1", "ACCESSOR"})
+  @TestCaseName("[{index}] {method}(Coordinator:{0})")
+  public void clearOnRedundantPartitionRegionWithConcurrentPutGetRemoveShouldWorkCorrectlyWhenNonCoordinatorMembersAreBounced(
+      TestVM coordinatorVM) throws InterruptedException {
+    parametrizedSetup(RegionShortcut.PARTITION_REDUNDANT);
+    final int entries = 7500;
+    populateRegion(accessor, entries, asList(accessor, server1, server2));
+    server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
 
-    if (rvv1 == null) {
-      assertThat(rvv2)
-          .as("Bucket " + bucketId + " has an RVV on member " + bucketDump2.getMember()
-              + ", but does not on member " + bucketDump1.getMember())
-          .isNull();
-    }
+    // Let all VMs (except the one to kill) continuously execute gets, put and removes for 30"
+    final int workMillis = 30000;
+    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
+        server1.invokeAsync(() -> executeGets(entries, workMillis)),
+        server1.invokeAsync(() -> executePuts(entries, workMillis)),
+        accessor.invokeAsync(() -> executeGets(entries, workMillis)),
+        accessor.invokeAsync(() -> executeRemoves(entries, workMillis)));
 
-    if (rvv2 == null) {
-      assertThat(rvv1)
-          .as("Bucket " + bucketId + " has an RVV on member " + bucketDump1.getMember()
-              + ", but does not on member " + bucketDump2.getMember())
-          .isNull();
-    }
+    // Retry the clear operation on the region until success (server2 will go down, but other
+    // members will eventually become primary for those buckets previously hosted by server2).
+    executeClearWithRetry(getVM(coordinatorVM.vmNumber));
 
-    assertThat(rvv1).isNotNull();
-    assertThat(rvv2).isNotNull();
-    Map<VersionSource<?>, RegionVersionHolder<?>> rvv2Members =
-        new HashMap<>(rvv1.getMemberToVersion());
-    Map<VersionSource<?>, RegionVersionHolder<?>> rvv1Members =
-        new HashMap<>(rvv1.getMemberToVersion());
-    for (Map.Entry<VersionSource<?>, RegionVersionHolder<?>> entry : rvv1Members.entrySet()) {
-      VersionSource<?> memberId = entry.getKey();
-      RegionVersionHolder<?> versionHolder1 = entry.getValue();
-      RegionVersionHolder<?> versionHolder2 = rvv2Members.remove(memberId);
-      assertThat(versionHolder1)
-          .as("RegionVersionVector for bucket " + bucketId + " on member " + bucketDump1.getMember()
-              + " is not consistent with member " + bucketDump2.getMember())
-          .isEqualTo(versionHolder2);
+    // Wait for member to get back online.
+    server2.invoke(() -> {
+      cacheRule.createCache();
+      initDataStore(RegionShortcut.PARTITION_REDUNDANT);
+      await().untilAsserted(
+          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
+    });
+
+    // Let asyncInvocations finish.
+    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
+      asyncInvocation.await();
     }
+
+    // Assert Region Buckets are consistent.
+    asList(accessor, server1, server2).forEach(vm -> vm.invoke(this::waitForSilence));
+    accessor.invoke(this::assertRegionBucketsConsistency);
   }
 
   /**
-   * Asserts that the region data is consistent across buckets.
+   * The test does the following (clear coordinator is chosen through parameters):
+   * - Populates the Partition Region.
+   * - Verifies that the entries are synchronized on all members.
+   * - Sets the {@link MemberKiller} as a {@link DistributionMessageObserver} to stop a
+   * non-coordinator VM while the clear is in progress (the member has primary buckets, though, so
+   * participates on the clear operation).
+   * - Launches two threads per VM to continuously execute gets, puts and removes for a given time.
+   * - Clears the Partition Region (at this point the non-coordinator is restarted).
+   * - Asserts that the clear operation failed with PartitionedRegionPartialClearException (primary
+   * buckets on the the restarted members are not available).
    */
-  private void assertRegionBucketsConsistency() throws ForceReattemptException {
-    PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME);
-    // Redundant copies + 1 primary.
-    int expectedCopies = region.getRedundantCopies() + 1;
+  @Test
+  @Parameters({"SERVER1", "ACCESSOR"})
+  @TestCaseName("[{index}] {method}(Coordinator:{0})")
+  public void clearOnNonRedundantPartitionRegionWithConcurrentPutGetRemoveShouldFailWhenNonCoordinatorMembersAreBounced(
+      TestVM coordinatorVM) throws InterruptedException {
+    parametrizedSetup(RegionShortcut.PARTITION);
+    final int entries = 7500;
+    populateRegion(accessor, entries, asList(accessor, server1, server2));
+    server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
 
-    for (int bId = 0; bId < BUCKETS; bId++) {
-      final int bucketId = bId;
-      List<BucketDump> bucketDumps = region.getAllBucketEntries(bucketId);
-      assertThat(bucketDumps.size())
-          .as("Bucket " + bucketId + " should have " + expectedCopies + " copies, but has "
-              + bucketDumps.size())
-          .isEqualTo(expectedCopies);
+    // Let all VMs (except the one to kill) continuously execute gets, put and removes for 30"
+    final int workMillis = 30000;
+    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
+        server1.invokeAsync(() -> executeGets(entries, workMillis)),
+        server1.invokeAsync(() -> executePuts(entries, workMillis)),
+        accessor.invokeAsync(() -> executeGets(entries, workMillis)),
+        accessor.invokeAsync(() -> executeRemoves(entries, workMillis)));
 
-      // Check that all copies of the bucket have the same data.
+    // Clear the region.
+    getVM(coordinatorVM.vmNumber).invoke(() -> {
+      assertThatThrownBy(() -> cacheRule.getCache().getRegion(REGION_NAME).clear())
+          .isInstanceOf(PartitionedRegionPartialClearException.class);
+    });
+
+    // Let asyncInvocations finish.
+    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
+      asyncInvocation.await();
+    }
+  }
+
+  /**
+   * The test does the following (clear coordinator is chosen through parameters):
+   * - Sets the {@link MemberKiller} as a {@link DistributionMessageObserver} to stop a
+   * non-coordinator VM while the clear is in progress (the member has primary buckets, though, so
+   * participates on the clear operation).
+   * - Launches one thread per VM to continuously execute putAll/removeAll for a given time.
+   * - Clears the Partition Region (at this point the non-coordinator is restarted).
+   * - Asserts that, after the clear has finished, the Region Buckets are consistent across members.
+   */
+  @Test
+  @Parameters({"SERVER1", "ACCESSOR"})
+  @TestCaseName("[{index}] {method}(Coordinator:{0})")
+  public void clearOnRedundantPartitionRegionWithConcurrentPutAllRemoveAllShouldWorkCorrectlyWhenNonCoordinatorMembersAreBounced(
+      TestVM coordinatorVM) throws InterruptedException {
+    parametrizedSetup(RegionShortcut.PARTITION_REDUNDANT);
+    server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
+
+    // Let all VMs continuously execute putAll/removeAll for 30 seconds.
+    final int workMillis = 30000;
+    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
+        server1.invokeAsync(() -> executePutAlls(0, 6000, workMillis)),
+        accessor.invokeAsync(() -> executeRemoveAlls(2000, 4000, workMillis)));
+
+    // Retry the clear operation on the region until success (server2 will go down, but other
+    // members will eventually become primary for those buckets previously hosted by server2).
+    executeClearWithRetry(getVM(coordinatorVM.vmNumber));
+
+    // Wait for member to get back online.
+    server2.invoke(() -> {
+      cacheRule.createCache();
+      initDataStore(RegionShortcut.PARTITION_REDUNDANT);
+      await().untilAsserted(
+          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
+    });
+
+    // Let asyncInvocations finish.
+    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
+      asyncInvocation.await();
+    }
+
+    // Assert Region Buckets are consistent.
+    asList(accessor, server1, server2).forEach(vm -> vm.invoke(this::waitForSilence));
+    accessor.invoke(this::assertRegionBucketsConsistency);
+  }
+
+  /**
+   * The test does the following (clear coordinator is chosen through parameters):
+   * - Sets the {@link MemberKiller} as a {@link DistributionMessageObserver} to stop a
+   * non-coordinator VM while the clear is in progress (the member has primary buckets, though, so
+   * participates on the clear operation).
+   * - Launches one thread per VM to continuously execute putAll/removeAll for a given time.
+   * - Clears the Partition Region (at this point the non-coordinator is restarted).
+   * - Asserts that the clear operation failed with PartitionedRegionPartialClearException (primary
+   * buckets on the the restarted members are not available).
+   */
+  @Test
+  @Parameters({"SERVER1", "ACCESSOR"})
+  @TestCaseName("[{index}] {method}(Coordinator:{0})")
+  public void clearOnNonRedundantPartitionRegionWithConcurrentPutAllRemoveAllShouldFailWhenNonCoordinatorMembersAreBounced(
+      TestVM coordinatorVM) throws InterruptedException {
+    parametrizedSetup(RegionShortcut.PARTITION);
+    server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
+
+    final int workMillis = 30000;
+    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
+        server1.invokeAsync(() -> executePutAlls(0, 6000, workMillis)),
+        accessor.invokeAsync(() -> executeRemoveAlls(2000, 4000, workMillis)));
+
+    // Clear the region.
+    getVM(coordinatorVM.vmNumber).invoke(() -> {
+      assertThatThrownBy(() -> cacheRule.getCache().getRegion(REGION_NAME).clear())
+          .isInstanceOf(PartitionedRegionPartialClearException.class);
+    });
+
+    // Let asyncInvocations finish.
+    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
+      asyncInvocation.await();
+    }
+  }
+
+  private void initAccessor(RegionShortcut regionShortcut) {
+    PartitionAttributes<String, String> attrs = new PartitionAttributesFactory<String, String>()
+        .setTotalNumBuckets(BUCKETS)
+        .setLocalMaxMemory(0)
+        .create();
+
+    cacheRule.getCache().createRegionFactory(regionShortcut)
+        .setPartitionAttributes(attrs)
+        .create(REGION_NAME);
+
+  }
+
+  private void initDataStore(RegionShortcut regionShortcut) {
+    PartitionAttributes<String, String> attrs = new PartitionAttributesFactory<String, String>()
+        .setTotalNumBuckets(BUCKETS)
+        .create();
+
+    cacheRule.getCache().createRegionFactory(regionShortcut)
+        .setPartitionAttributes(attrs)
+        .create(REGION_NAME);
+  }
+
+  private void parametrizedSetup(RegionShortcut regionShortcut) {
+    server1.invoke(() -> initDataStore(regionShortcut));
+    server2.invoke(() -> initDataStore(regionShortcut));
+    accessor.invoke(() -> initAccessor(regionShortcut));
+  }
+
+  private void waitForSilence() {
+    DMStats dmStats = cacheRule.getSystem().getDistributionManager().getStats();
+    PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME);
+    PartitionedRegionStats partitionedRegionStats = region.getPrStats();
+
+    await().untilAsserted(() -> {
+      assertThat(dmStats.getReplyWaitsInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getVolunteeringInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getBucketCreatesInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getPrimaryTransfersInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getRebalanceBucketCreatesInProgress()).isEqualTo(0);
+      assertThat(partitionedRegionStats.getRebalancePrimaryTransfersInProgress()).isEqualTo(0);
+    });
+  }
+
+  /**
+   * Populates the region and verifies the data on the selected VMs.
+   */
+  private void populateRegion(VM feeder, int entryCount, Iterable<VM> vms) {
+    feeder.invoke(() -> {
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+      IntStream.range(0, entryCount).forEach(i -> region.put(String.valueOf(i), "Value_" + i));
+    });
+
+    vms.forEach(vm -> vm.invoke(() -> {
+      waitForSilence();
+      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
+
+      IntStream.range(0, entryCount)
+          .forEach(i -> assertThat(region.get(String.valueOf(i))).isEqualTo("Value_" + i));
+    }));
+  }
+
+  /**
+   * Asserts that the RegionVersionVectors for both buckets are consistent.
+   *
+   * @param bucketId Id of the bucket to compare.
+   * @param bucketDump1 First bucketDump.
+   * @param bucketDump2 Second bucketDump.
+   */
+  private void assertRegionVersionVectorsConsistency(int bucketId, BucketDump bucketDump1,
+      BucketDump bucketDump2) {
+    RegionVersionVector<?> rvv1 = bucketDump1.getRvv();
+    RegionVersionVector<?> rvv2 = bucketDump2.getRvv();
+
+    if (rvv1 == null) {
+      assertThat(rvv2)
+          .as("Bucket " + bucketId + " has an RVV on member " + bucketDump2.getMember()
+              + ", but does not on member " + bucketDump1.getMember())
+          .isNull();
+    }
+
+    if (rvv2 == null) {
+      assertThat(rvv1)
+          .as("Bucket " + bucketId + " has an RVV on member " + bucketDump1.getMember()
+              + ", but does not on member " + bucketDump2.getMember())
+          .isNull();
+    }
+
+    assertThat(rvv1).isNotNull();
+    assertThat(rvv2).isNotNull();
+    Map<VersionSource<?>, RegionVersionHolder<?>> rvv2Members =
+        new HashMap<>(rvv1.getMemberToVersion());
+    Map<VersionSource<?>, RegionVersionHolder<?>> rvv1Members =
+        new HashMap<>(rvv1.getMemberToVersion());
+    for (Map.Entry<VersionSource<?>, RegionVersionHolder<?>> entry : rvv1Members.entrySet()) {
+      VersionSource<?> memberId = entry.getKey();
+      RegionVersionHolder<?> versionHolder1 = entry.getValue();
+      RegionVersionHolder<?> versionHolder2 = rvv2Members.remove(memberId);
+      assertThat(versionHolder1)
+          .as("RegionVersionVector for bucket " + bucketId + " on member " + bucketDump1.getMember()
+              + " is not consistent with member " + bucketDump2.getMember())
+          .isEqualTo(versionHolder2);
+    }
+  }
+
+  /**
+   * Asserts that the region data is consistent across buckets.
+   */
+  private void assertRegionBucketsConsistency() throws ForceReattemptException {
+    PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME);
+    // Redundant copies + 1 primary.
+    int expectedCopies = region.getRedundantCopies() + 1;
+
+    for (int bId = 0; bId < BUCKETS; bId++) {
+      final int bucketId = bId;
+      List<BucketDump> bucketDumps = region.getAllBucketEntries(bucketId);
+      assertThat(bucketDumps.size())
+          .as("Bucket " + bucketId + " should have " + expectedCopies + " copies, but has "
+              + bucketDumps.size())
+          .isEqualTo(expectedCopies);
+
+      // Check that all copies of the bucket have the same data.
       if (bucketDumps.size() > 1) {
         BucketDump firstDump = bucketDumps.get(0);
 
@@ -375,291 +634,6 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
     }
   }
 
-  /**
-   * The test does the following (clear coordinator and regionType are parametrized):
-   * - Launches one thread per VM to continuously execute removes, puts and gets for a given time.
-   * - Clears the Partition Region continuously every X milliseconds for a given time.
-   * - Asserts that, after the clears have finished, the Region Buckets are consistent across
-   * members.
-   */
-  @Test
-  @TestCaseName(TEST_CASE_NAME)
-  @Parameters(method = "coordinatorsAndRegionTypes")
-  public void clearWithConcurrentPutGetRemoveShouldWorkCorrectly(TestVM coordinatorVM,
-      RegionShortcut regionShortcut) throws InterruptedException {
-    parametrizedSetup(regionShortcut);
-
-    // Let all VMs continuously execute puts and gets for 60 seconds.
-    final int workMillis = 60000;
-    final int entries = 15000;
-    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
-        server1.invokeAsync(() -> executePuts(entries, workMillis)),
-        server2.invokeAsync(() -> executeGets(entries, workMillis)),
-        accessor.invokeAsync(() -> executeRemoves(entries, workMillis)));
-
-    // Clear the region every second for 60 seconds.
-    getVM(coordinatorVM.vmNumber).invoke(() -> executeClears(workMillis, 1000));
-
-    // Let asyncInvocations finish.
-    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
-      asyncInvocation.await();
-    }
-
-    // Assert Region Buckets are consistent.
-    asList(accessor, server1, server2).forEach(vm -> vm.invoke(this::waitForSilence));
-    accessor.invoke(this::assertRegionBucketsConsistency);
-  }
-
-  /**
-   * The test does the following (clear coordinator and regionType are parametrized):
-   * - Launches two threads per VM to continuously execute putAll and removeAll for a given time.
-   * - Clears the Partition Region continuously every X milliseconds for a given time.
-   * - Asserts that, after the clears have finished, the Region Buckets are consistent across
-   * members.
-   */
-  @Test
-  @TestCaseName(TEST_CASE_NAME)
-  @Parameters(method = "coordinatorsAndRegionTypes")
-  public void clearWithConcurrentPutAllRemoveAllShouldWorkCorrectly(TestVM coordinatorVM,
-      RegionShortcut regionShortcut) throws InterruptedException {
-    parametrizedSetup(regionShortcut);
-
-    // Let all VMs continuously execute putAll and removeAll for 15 seconds.
-    final int workMillis = 15000;
-    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
-        server1.invokeAsync(() -> executePutAlls(0, 2000, workMillis)),
-        server1.invokeAsync(() -> executeRemoveAlls(0, 2000, workMillis)),
-        server2.invokeAsync(() -> executePutAlls(2000, 4000, workMillis)),
-        server2.invokeAsync(() -> executeRemoveAlls(2000, 4000, workMillis)),
-        accessor.invokeAsync(() -> executePutAlls(4000, 6000, workMillis)),
-        accessor.invokeAsync(() -> executeRemoveAlls(4000, 6000, workMillis)));
-
-    // Clear the region every half second for 15 seconds.
-    getVM(coordinatorVM.vmNumber).invoke(() -> executeClears(workMillis, 500));
-
-    // Let asyncInvocations finish.
-    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
-      asyncInvocation.await();
-    }
-
-    // Assert Region Buckets are consistent.
-    asList(accessor, server1, server2).forEach(vm -> vm.invoke(this::waitForSilence));
-    accessor.invoke(this::assertRegionBucketsConsistency);
-  }
-
-  /**
-   * The test does the following (regionType is parametrized):
-   * - Populates the Partition Region.
-   * - Verifies that the entries are synchronized on all members.
-   * - Sets the {@link MemberKiller} as a {@link DistributionMessageObserver} to stop the
-   * coordinator VM while the clear is in progress.
-   * - Clears the Partition Region (at this point the coordinator is restarted).
-   * - Asserts that, after the member joins again, the Region Buckets are consistent.
-   */
-  @Test
-  @TestCaseName("[{index}] {method}(RegionType:{0})")
-  @Parameters(method = "regionTypes")
-  public void clearShouldFailWhenCoordinatorMemberIsBounced(RegionShortcut regionShortcut) {
-    parametrizedSetup(regionShortcut);
-    final int entries = 1000;
-    populateRegion(accessor, entries, asList(accessor, server1, server2));
-
-    // Set the CoordinatorMemberKiller and try to clear the region
-    server1.invoke(() -> {
-      DistributionMessageObserver.setInstance(new MemberKiller(true));
-      Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
-      assertThatThrownBy(region::clear)
-          .isInstanceOf(DistributedSystemDisconnectedException.class)
-          .hasCauseInstanceOf(ForcedDisconnectException.class);
-    });
-
-    // Wait for member to get back online and assign all buckets.
-    server1.invoke(() -> {
-      cacheRule.createCache();
-      initDataStore(regionShortcut);
-      await().untilAsserted(
-          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
-      PartitionRegionHelper.assignBucketsToPartitions(cacheRule.getCache().getRegion(REGION_NAME));
-    });
-
-    // Assert Region Buckets are consistent.
-    asList(accessor, server1, server2).forEach(vm -> vm.invoke(this::waitForSilence));
-    accessor.invoke(this::assertRegionBucketsConsistency);
-  }
-
-  /**
-   * The test does the following (clear coordinator is chosen through parameters):
-   * - Populates the Partition Region.
-   * - Verifies that the entries are synchronized on all members.
-   * - Sets the {@link MemberKiller} as a {@link DistributionMessageObserver} to stop a
-   * non-coordinator VM while the clear is in progress (the member has primary buckets, though, so
-   * participates on the clear operation).
-   * - Launches two threads per VM to continuously execute gets, puts and removes for a given time.
-   * - Clears the Partition Region (at this point the non-coordinator is restarted).
-   * - Asserts that, after the clear has finished, the Region Buckets are consistent across members.
-   */
-  @Test
-  @Parameters(method = "coordinators")
-  @TestCaseName("[{index}] {method}(Coordinator:{0})")
-  public void clearOnRedundantPartitionRegionWithConcurrentPutGetRemoveShouldWorkCorrectlyWhenNonCoordinatorMembersAreBounced(
-      TestVM coordinatorVM) throws InterruptedException {
-    parametrizedSetup(RegionShortcut.PARTITION_REDUNDANT);
-    final int entries = 7500;
-    populateRegion(accessor, entries, asList(accessor, server1, server2));
-    server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
-
-    // Let all VMs (except the one to kill) continuously execute gets, put and removes for 30"
-    final int workMillis = 30000;
-    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
-        server1.invokeAsync(() -> executeGets(entries, workMillis)),
-        server1.invokeAsync(() -> executePuts(entries, workMillis)),
-        accessor.invokeAsync(() -> executeGets(entries, workMillis)),
-        accessor.invokeAsync(() -> executeRemoves(entries, workMillis)));
-
-    // Retry the clear operation on the region until success (server2 will go down, but other
-    // members will eventually become primary for those buckets previously hosted by server2).
-    executeClearWithRetry(getVM(coordinatorVM.vmNumber));
-
-    // Wait for member to get back online.
-    server2.invoke(() -> {
-      cacheRule.createCache();
-      initDataStore(RegionShortcut.PARTITION_REDUNDANT);
-      await().untilAsserted(
-          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
-    });
-
-    // Let asyncInvocations finish.
-    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
-      asyncInvocation.await();
-    }
-
-    // Assert Region Buckets are consistent.
-    asList(accessor, server1, server2).forEach(vm -> vm.invoke(this::waitForSilence));
-    accessor.invoke(this::assertRegionBucketsConsistency);
-  }
-
-  /**
-   * The test does the following (clear coordinator is chosen through parameters):
-   * - Populates the Partition Region.
-   * - Verifies that the entries are synchronized on all members.
-   * - Sets the {@link MemberKiller} as a {@link DistributionMessageObserver} to stop a
-   * non-coordinator VM while the clear is in progress (the member has primary buckets, though, so
-   * participates on the clear operation).
-   * - Launches two threads per VM to continuously execute gets, puts and removes for a given time.
-   * - Clears the Partition Region (at this point the non-coordinator is restarted).
-   * - Asserts that the clear operation failed with PartitionedRegionPartialClearException (primary
-   * buckets on the the restarted members are not available).
-   */
-  @Test
-  @Parameters(method = "coordinators")
-  @TestCaseName("[{index}] {method}(Coordinator:{0})")
-  public void clearOnNonRedundantPartitionRegionWithConcurrentPutGetRemoveShouldFailWhenNonCoordinatorMembersAreBounced(
-      TestVM coordinatorVM) throws InterruptedException {
-    parametrizedSetup(RegionShortcut.PARTITION);
-    final int entries = 7500;
-    populateRegion(accessor, entries, asList(accessor, server1, server2));
-    server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
-
-    // Let all VMs (except the one to kill) continuously execute gets, put and removes for 30"
-    final int workMillis = 30000;
-    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
-        server1.invokeAsync(() -> executeGets(entries, workMillis)),
-        server1.invokeAsync(() -> executePuts(entries, workMillis)),
-        accessor.invokeAsync(() -> executeGets(entries, workMillis)),
-        accessor.invokeAsync(() -> executeRemoves(entries, workMillis)));
-
-    // Clear the region.
-    getVM(coordinatorVM.vmNumber).invoke(() -> {
-      assertThatThrownBy(() -> cacheRule.getCache().getRegion(REGION_NAME).clear())
-          .isInstanceOf(PartitionedRegionPartialClearException.class);
-    });
-
-    // Let asyncInvocations finish.
-    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
-      asyncInvocation.await();
-    }
-  }
-
-  /**
-   * The test does the following (clear coordinator is chosen through parameters):
-   * - Sets the {@link MemberKiller} as a {@link DistributionMessageObserver} to stop a
-   * non-coordinator VM while the clear is in progress (the member has primary buckets, though, so
-   * participates on the clear operation).
-   * - Launches one thread per VM to continuously execute putAll/removeAll for a given time.
-   * - Clears the Partition Region (at this point the non-coordinator is restarted).
-   * - Asserts that, after the clear has finished, the Region Buckets are consistent across members.
-   */
-  @Test
-  @Parameters(method = "coordinators")
-  @TestCaseName("[{index}] {method}(Coordinator:{0})")
-  public void clearOnRedundantPartitionRegionWithConcurrentPutAllRemoveAllShouldWorkCorrectlyWhenNonCoordinatorMembersAreBounced(
-      TestVM coordinatorVM) throws InterruptedException {
-    parametrizedSetup(RegionShortcut.PARTITION_REDUNDANT);
-    server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
-
-    // Let all VMs continuously execute putAll/removeAll for 30 seconds.
-    final int workMillis = 30000;
-    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
-        server1.invokeAsync(() -> executePutAlls(0, 6000, workMillis)),
-        accessor.invokeAsync(() -> executeRemoveAlls(2000, 4000, workMillis)));
-
-    // Retry the clear operation on the region until success (server2 will go down, but other
-    // members will eventually become primary for those buckets previously hosted by server2).
-    executeClearWithRetry(getVM(coordinatorVM.vmNumber));
-
-    // Wait for member to get back online.
-    server2.invoke(() -> {
-      cacheRule.createCache();
-      initDataStore(RegionShortcut.PARTITION_REDUNDANT);
-      await().untilAsserted(
-          () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
-    });
-
-    // Let asyncInvocations finish.
-    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
-      asyncInvocation.await();
-    }
-
-    // Assert Region Buckets are consistent.
-    asList(accessor, server1, server2).forEach(vm -> vm.invoke(this::waitForSilence));
-    accessor.invoke(this::assertRegionBucketsConsistency);
-  }
-
-  /**
-   * The test does the following (clear coordinator is chosen through parameters):
-   * - Sets the {@link MemberKiller} as a {@link DistributionMessageObserver} to stop a
-   * non-coordinator VM while the clear is in progress (the member has primary buckets, though, so
-   * participates on the clear operation).
-   * - Launches one thread per VM to continuously execute putAll/removeAll for a given time.
-   * - Clears the Partition Region (at this point the non-coordinator is restarted).
-   * - Asserts that the clear operation failed with PartitionedRegionPartialClearException (primary
-   * buckets on the the restarted members are not available).
-   */
-  @Test
-  @Parameters(method = "coordinators")
-  @TestCaseName("[{index}] {method}(Coordinator:{0})")
-  public void clearOnNonRedundantPartitionRegionWithConcurrentPutAllRemoveAllShouldFailWhenNonCoordinatorMembersAreBounced(
-      TestVM coordinatorVM) throws InterruptedException {
-    parametrizedSetup(RegionShortcut.PARTITION);
-    server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
-
-    final int workMillis = 30000;
-    List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
-        server1.invokeAsync(() -> executePutAlls(0, 6000, workMillis)),
-        accessor.invokeAsync(() -> executeRemoveAlls(2000, 4000, workMillis)));
-
-    // Clear the region.
-    getVM(coordinatorVM.vmNumber).invoke(() -> {
-      assertThatThrownBy(() -> cacheRule.getCache().getRegion(REGION_NAME).clear())
-          .isInstanceOf(PartitionedRegionPartialClearException.class);
-    });
-
-    // Let asyncInvocations finish.
-    for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
-      asyncInvocation.await();
-    }
-  }
-
   private enum TestVM {
     ACCESSOR(0), SERVER1(1), SERVER2(2);
 

[geode] 06/17: GEODE-9132: Remove unused DSFID constants

Posted by nn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

nnag pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 4a423aee8eca388839660be614377537e422cae4
Author: Kirk Lund <kl...@apache.org>
AuthorDate: Thu Apr 15 17:07:43 2021 -0700

    GEODE-9132: Remove unused DSFID constants
---
 .../apache/geode/internal/serialization/DataSerializableFixedID.java   | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/geode-serialization/src/main/java/org/apache/geode/internal/serialization/DataSerializableFixedID.java b/geode-serialization/src/main/java/org/apache/geode/internal/serialization/DataSerializableFixedID.java
index 0dd0e39..313878b 100644
--- a/geode-serialization/src/main/java/org/apache/geode/internal/serialization/DataSerializableFixedID.java
+++ b/geode-serialization/src/main/java/org/apache/geode/internal/serialization/DataSerializableFixedID.java
@@ -59,9 +59,6 @@ public interface DataSerializableFixedID extends SerializationVersions, BasicSer
   short CLEAR_PARTITIONED_REGION_REPLY_MESSAGE = -166;
   short CLEAR_PARTITIONED_REGION_MESSAGE = -165;
 
-  short PR_CLEAR_REPLY_MESSAGE = -164;
-  short PR_CLEAR_MESSAGE = -163;
-
   short DISTRIBUTED_PING_MESSAGE = -162;
 
   short REGION_REDUNDANCY_STATUS = -161;

[geode] 05/17: GEODE-9132: Cleanup PartitionedRegionClearMessage

Posted by nn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

nnag pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 6f0bbb1bdf13eb4df890335ed81850a64c808b29
Author: Kirk Lund <kl...@apache.org>
AuthorDate: Fri Apr 9 17:24:26 2021 -0700

    GEODE-9132: Cleanup PartitionedRegionClearMessage
    
    * Use descriptive names for variables and methods
    * Use Objects.requireNonNull instead of Assert.assertTrue
    * Remove unnecessary uses of final, this, and super
    * Use static logger
    * Reformat some lines with weird formatting
---
 .../partitioned/PRClearCreateIndexDUnitTest.java   |   5 +-
 ...ionedRegionAfterClearNotificationDUnitTest.java |   4 +-
 ...itionedRegionClearWithAlterRegionDUnitTest.java |   2 +-
 ...gionClearWithConcurrentOperationsDUnitTest.java |   2 +-
 .../cache/PartitionedRegionClearMessage.java       | 225 ++++++++++-----------
 .../internal/cache/PartitionRegionClearHATest.java |   2 +-
 6 files changed, 118 insertions(+), 122 deletions(-)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRClearCreateIndexDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRClearCreateIndexDUnitTest.java
index 1c94c2d..423932d 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRClearCreateIndexDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/cache/query/partitioned/PRClearCreateIndexDUnitTest.java
@@ -236,10 +236,11 @@ public class PRClearCreateIndexDUnitTest implements Serializable {
       if (message instanceof PartitionedRegionClearMessage) {
         PartitionedRegionClearMessage clearMessage = (PartitionedRegionClearMessage) message;
         if (clearMessage
-            .getOp() == PartitionedRegionClearMessage.OperationType.OP_LOCK_FOR_PR_CLEAR) {
+            .getOperationType() == PartitionedRegionClearMessage.OperationType.OP_LOCK_FOR_PR_CLEAR) {
           lock_others = true;
         }
-        if (clearMessage.getOp() == PartitionedRegionClearMessage.OperationType.OP_PR_CLEAR) {
+        if (clearMessage
+            .getOperationType() == PartitionedRegionClearMessage.OperationType.OP_PR_CLEAR) {
           clear_others = true;
         }
       }
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionAfterClearNotificationDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionAfterClearNotificationDUnitTest.java
index 237b6a8..7979cfa 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionAfterClearNotificationDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionAfterClearNotificationDUnitTest.java
@@ -326,7 +326,7 @@ public class PartitionedRegionAfterClearNotificationDUnitTest implements Seriali
       public void beforeProcessMessage(ClusterDistributionManager dm, DistributionMessage message) {
         if (message instanceof PartitionedRegionClearMessage) {
           if (((PartitionedRegionClearMessage) message)
-              .getOp() == PartitionedRegionClearMessage.OperationType.OP_LOCK_FOR_PR_CLEAR) {
+              .getOperationType() == PartitionedRegionClearMessage.OperationType.OP_LOCK_FOR_PR_CLEAR) {
             DistributionMessageObserver.setInstance(null);
             getBlackboard().signalGate("CLOSE_CACHE");
             try {
@@ -348,7 +348,7 @@ public class PartitionedRegionAfterClearNotificationDUnitTest implements Seriali
       public void afterProcessMessage(ClusterDistributionManager dm, DistributionMessage message) {
         if (message instanceof PartitionedRegionClearMessage) {
           if (((PartitionedRegionClearMessage) message)
-              .getOp() == PartitionedRegionClearMessage.OperationType.OP_LOCK_FOR_PR_CLEAR) {
+              .getOperationType() == PartitionedRegionClearMessage.OperationType.OP_LOCK_FOR_PR_CLEAR) {
             DistributionMessageObserver.setInstance(null);
             getBlackboard().signalGate("CLOSE_CACHE");
             try {
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithAlterRegionDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithAlterRegionDUnitTest.java
index fb74eb3..564706e 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithAlterRegionDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithAlterRegionDUnitTest.java
@@ -759,7 +759,7 @@ public class PartitionedRegionClearWithAlterRegionDUnitTest implements Serializa
     private void shutdownMember(DistributionMessage message) {
       if (message instanceof PartitionedRegionClearMessage) {
         if (((PartitionedRegionClearMessage) message)
-            .getOp() == PartitionedRegionClearMessage.OperationType.OP_PR_CLEAR) {
+            .getOperationType() == PartitionedRegionClearMessage.OperationType.OP_PR_CLEAR) {
           DistributionMessageObserver.setInstance(null);
           InternalDistributedSystem.getConnectedInstance().stopReconnectingNoDisconnect();
           MembershipManagerHelper
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java
index fdb91c7..77537cb 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java
@@ -703,7 +703,7 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
     private void shutdownMember(DistributionMessage message) {
       if (message instanceof PartitionedRegionClearMessage) {
         if (((PartitionedRegionClearMessage) message)
-            .getOp() == PartitionedRegionClearMessage.OperationType.OP_PR_CLEAR) {
+            .getOperationType() == PartitionedRegionClearMessage.OperationType.OP_PR_CLEAR) {
           DistributionMessageObserver.setInstance(null);
           InternalDistributedSystem.getConnectedInstance().stopReconnectingNoDisconnect();
           MembershipManagerHelper
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClearMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClearMessage.java
index 724256b..36cdcb6 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClearMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClearMessage.java
@@ -12,14 +12,16 @@
  * or implied. See the License for the specific language governing permissions and limitations under
  * the License.
  */
-
 package org.apache.geode.internal.cache;
 
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.util.Objects;
 import java.util.Set;
 
+import org.apache.logging.log4j.Logger;
+
 import org.apache.geode.DataSerializer;
 import org.apache.geode.cache.CacheException;
 import org.apache.geode.cache.Operation;
@@ -32,7 +34,6 @@ import org.apache.geode.distributed.internal.ReplyMessage;
 import org.apache.geode.distributed.internal.ReplyProcessor21;
 import org.apache.geode.distributed.internal.ReplySender;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
-import org.apache.geode.internal.Assert;
 import org.apache.geode.internal.CopyOnWriteHashSet;
 import org.apache.geode.internal.NanoTimer;
 import org.apache.geode.internal.cache.partitioned.PartitionMessage;
@@ -41,96 +42,92 @@ import org.apache.geode.internal.serialization.DeserializationContext;
 import org.apache.geode.internal.serialization.SerializationContext;
 import org.apache.geode.logging.internal.log4j.api.LogService;
 
-/**
- * this message is for operations no the partition region level, could be sent by any originating
- * member to the other members hosting this partition region
- */
 public class PartitionedRegionClearMessage extends PartitionMessage {
+  private static final Logger logger = LogService.getLogger();
 
   public enum OperationType {
     OP_LOCK_FOR_PR_CLEAR, OP_UNLOCK_FOR_PR_CLEAR, OP_PR_CLEAR,
   }
 
-  private Object cbArg;
-
-  private OperationType op;
-
-  private EventID eventID;
-
+  private Object callbackArgument;
+  private OperationType operationType;
+  private EventID eventId;
   private PartitionedRegion partitionedRegion;
-
   private Set<Integer> bucketsCleared;
 
-  @Override
-  public EventID getEventID() {
-    return eventID;
+  public PartitionedRegionClearMessage() {
+    // nothing
   }
 
-  public PartitionedRegionClearMessage() {}
+  PartitionedRegionClearMessage(Set<InternalDistributedMember> recipients,
+      PartitionedRegion partitionedRegion, ReplyProcessor21 replyProcessor21,
+      PartitionedRegionClearMessage.OperationType operationType, RegionEventImpl regionEvent) {
+    super(recipients, partitionedRegion.getPRId(), replyProcessor21);
+    this.partitionedRegion = partitionedRegion;
+    this.operationType = operationType;
+    callbackArgument = regionEvent.getRawCallbackArgument();
+    eventId = regionEvent.getEventId();
+  }
 
-  PartitionedRegionClearMessage(Set<InternalDistributedMember> recipients, PartitionedRegion region,
-      ReplyProcessor21 processor, PartitionedRegionClearMessage.OperationType operationType,
-      final RegionEventImpl event) {
-    super(recipients, region.getPRId(), processor);
-    partitionedRegion = region;
-    op = operationType;
-    cbArg = event.getRawCallbackArgument();
-    eventID = event.getEventId();
+  @Override
+  public EventID getEventID() {
+    return eventId;
   }
 
-  public OperationType getOp() {
-    return op;
+  public OperationType getOperationType() {
+    return operationType;
   }
 
   public void send() {
-    Assert.assertTrue(getRecipients() != null, "ClearMessage NULL recipients set");
+    Objects.requireNonNull(getRecipients(), "ClearMessage NULL recipients set");
+
     setTransactionDistributed(partitionedRegion.getCache().getTxManager().isDistributed());
     partitionedRegion.getDistributionManager().putOutgoing(this);
   }
 
   @Override
-  protected Throwable processCheckForPR(PartitionedRegion pr,
+  protected Throwable processCheckForPR(PartitionedRegion partitionedRegion,
       DistributionManager distributionManager) {
-    if (pr != null && !pr.getDistributionAdvisor().isInitialized()) {
+    if (partitionedRegion != null && !partitionedRegion.getDistributionAdvisor().isInitialized()) {
       return new ForceReattemptException(
           String.format("%s : could not find partitioned region with Id %s",
               distributionManager.getDistributionManagerId(),
-              pr.getRegionIdentifier()));
+              partitionedRegion.getRegionIdentifier()));
     }
     return null;
   }
 
   @Override
-  protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm,
-      PartitionedRegion partitionedRegion,
-      long startTime) throws CacheException {
-
+  protected boolean operateOnPartitionedRegion(ClusterDistributionManager distributionManager,
+      PartitionedRegion partitionedRegion, long startTime) throws CacheException {
     if (partitionedRegion == null) {
       return true;
     }
-
     if (partitionedRegion.isDestroyed()) {
       return true;
     }
 
-    if (op == OperationType.OP_LOCK_FOR_PR_CLEAR) {
+    if (operationType == OperationType.OP_LOCK_FOR_PR_CLEAR) {
       partitionedRegion.getPartitionedRegionClear().obtainClearLockLocal(getSender());
-    } else if (op == OperationType.OP_UNLOCK_FOR_PR_CLEAR) {
+    } else if (operationType == OperationType.OP_UNLOCK_FOR_PR_CLEAR) {
       partitionedRegion.getPartitionedRegionClear().releaseClearLockLocal();
     } else {
       RegionEventImpl event =
-          new RegionEventImpl(partitionedRegion, Operation.REGION_CLEAR, this.cbArg, true,
-              partitionedRegion.getMyId(),
-              getEventID());
+          new RegionEventImpl(partitionedRegion, Operation.REGION_CLEAR, callbackArgument, true,
+              partitionedRegion.getMyId(), getEventID());
       bucketsCleared = partitionedRegion.getPartitionedRegionClear().clearRegionLocal(event);
     }
     return true;
   }
 
   @Override
-  protected void appendFields(StringBuilder buff) {
-    super.appendFields(buff);
-    buff.append(" cbArg=").append(this.cbArg).append(" op=").append(this.op);
+  protected void appendFields(StringBuilder stringBuilder) {
+    super.appendFields(stringBuilder);
+    stringBuilder
+        .append(" cbArg=")
+        .append(callbackArgument)
+        .append(" op=")
+        .append(operationType);
   }
 
   @Override
@@ -139,21 +136,32 @@ public class PartitionedRegionClearMessage extends PartitionMessage {
   }
 
   @Override
-  public void fromData(DataInput in,
-      DeserializationContext context) throws IOException, ClassNotFoundException {
+  public void fromData(DataInput in, DeserializationContext context)
+      throws IOException, ClassNotFoundException {
     super.fromData(in, context);
-    this.cbArg = DataSerializer.readObject(in);
-    op = PartitionedRegionClearMessage.OperationType.values()[in.readByte()];
-    eventID = DataSerializer.readObject(in);
+    callbackArgument = DataSerializer.readObject(in);
+    operationType = PartitionedRegionClearMessage.OperationType.values()[in.readByte()];
+    eventId = DataSerializer.readObject(in);
   }
 
   @Override
-  public void toData(DataOutput out,
-      SerializationContext context) throws IOException {
+  public void toData(DataOutput out, SerializationContext context) throws IOException {
     super.toData(out, context);
-    DataSerializer.writeObject(this.cbArg, out);
-    out.writeByte(op.ordinal());
-    DataSerializer.writeObject(eventID, out);
+    DataSerializer.writeObject(callbackArgument, out);
+    out.writeByte(operationType.ordinal());
+    DataSerializer.writeObject(eventId, out);
+  }
+
+  @Override
+  protected void sendReply(InternalDistributedMember recipient, int processorId,
+      DistributionManager distributionManager, ReplyException replyException,
+      PartitionedRegion partitionedRegion, long startTime) {
+    if (partitionedRegion != null && startTime > 0) {
+      partitionedRegion.getPrStats().endPartitionMessagesProcessing(startTime);
+    }
+    PartitionedRegionClearMessage.PartitionedRegionClearReplyMessage
+        .send(recipient, processorId, getReplySender(distributionManager), operationType,
+            bucketsCleared, replyException);
   }
 
   /**
@@ -161,97 +169,85 @@ public class PartitionedRegionClearMessage extends PartitionMessage {
    * received from the "far side"
    */
   public static class PartitionedRegionClearResponse extends ReplyProcessor21 {
+
     CopyOnWriteHashSet<Integer> bucketsCleared = new CopyOnWriteHashSet<>();
 
     public PartitionedRegionClearResponse(InternalDistributedSystem system,
-        Set<InternalDistributedMember> initMembers) {
-      super(system, initMembers);
+        Set<InternalDistributedMember> recipients) {
+      super(system, recipients);
     }
 
     @Override
-    public void process(DistributionMessage msg) {
-      if (msg instanceof PartitionedRegionClearReplyMessage) {
-        Set<Integer> buckets = ((PartitionedRegionClearReplyMessage) msg).bucketsCleared;
+    public void process(DistributionMessage message) {
+      if (message instanceof PartitionedRegionClearReplyMessage) {
+        Set<Integer> buckets = ((PartitionedRegionClearReplyMessage) message).bucketsCleared;
         if (buckets != null) {
           bucketsCleared.addAll(buckets);
         }
       }
-      super.process(msg, true);
-    }
-  }
-
-  @Override
-  protected void sendReply(InternalDistributedMember member, int processorId,
-      DistributionManager distributionManager, ReplyException ex,
-      PartitionedRegion partitionedRegion, long startTime) {
-    if (partitionedRegion != null) {
-      if (startTime > 0) {
-        partitionedRegion.getPrStats().endPartitionMessagesProcessing(startTime);
-      }
+      process(message, true);
     }
-    PartitionedRegionClearMessage.PartitionedRegionClearReplyMessage
-        .send(member, processorId, getReplySender(distributionManager), op, bucketsCleared,
-            ex);
   }
 
   public static class PartitionedRegionClearReplyMessage extends ReplyMessage {
 
     private Set<Integer> bucketsCleared;
 
-    private OperationType op;
+    private OperationType operationType;
 
     @Override
     public boolean getInlineProcess() {
       return true;
     }
 
+    public static void send(InternalDistributedMember recipient, int processorId,
+        ReplySender replySender, OperationType operationType, Set<Integer> bucketsCleared,
+        ReplyException replyException) {
+      Objects.requireNonNull(recipient, "partitionedRegionClearReplyMessage NULL reply message");
+
+      PartitionedRegionClearMessage.PartitionedRegionClearReplyMessage replyMessage =
+          new PartitionedRegionClearMessage.PartitionedRegionClearReplyMessage(processorId,
+              operationType, bucketsCleared, replyException);
+
+      replyMessage.setRecipient(recipient);
+      replySender.putOutgoing(replyMessage);
+    }
+
     /**
      * Empty constructor to conform to DataSerializable interface
      */
-    public PartitionedRegionClearReplyMessage() {}
+    public PartitionedRegionClearReplyMessage() {
+      // Empty constructor to conform to DataSerializable interface
+    }
 
-    private PartitionedRegionClearReplyMessage(int processorId, OperationType op,
-        Set<Integer> bucketsCleared, ReplyException ex) {
-      super();
+    private PartitionedRegionClearReplyMessage(int processorId, OperationType operationType,
+        Set<Integer> bucketsCleared, ReplyException replyException) {
       this.bucketsCleared = bucketsCleared;
-      this.op = op;
+      this.operationType = operationType;
       setProcessorId(processorId);
-      setException(ex);
-    }
-
-    /** Send an ack */
-    public static void send(InternalDistributedMember recipient, int processorId, ReplySender dm,
-        OperationType op, Set<Integer> bucketsCleared, ReplyException ex) {
-
-      Assert.assertTrue(recipient != null, "partitionedRegionClearReplyMessage NULL reply message");
-
-      PartitionedRegionClearMessage.PartitionedRegionClearReplyMessage m =
-          new PartitionedRegionClearMessage.PartitionedRegionClearReplyMessage(processorId, op,
-              bucketsCleared, ex);
-
-      m.setRecipient(recipient);
-      dm.putOutgoing(m);
+      setException(replyException);
     }
 
     /**
      * Processes this message. This method is invoked by the receiver of the message.
      *
-     * @param dm the distribution manager that is processing the message.
+     * @param distributionManager the distribution manager that is processing the message.
      */
     @Override
-    public void process(final DistributionManager dm, final ReplyProcessor21 rp) {
-      final long startTime = getTimestamp();
+    public void process(DistributionManager distributionManager,
+        ReplyProcessor21 replyProcessor21) {
+      long startTime = getTimestamp();
 
-      if (rp == null) {
-        if (LogService.getLogger().isTraceEnabled(LogMarker.DM_VERBOSE)) {
-          LogService.getLogger().trace(LogMarker.DM_VERBOSE, "{}: processor not found", this);
+      if (replyProcessor21 == null) {
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE, "{}: processor not found", this);
         }
         return;
       }
 
-      rp.process(this);
+      replyProcessor21.process(this);
 
-      dm.getStats().incReplyMessageTime(NanoTimer.getTime() - startTime);
+      distributionManager.getStats().incReplyMessageTime(NanoTimer.getTime() - startTime);
     }
 
     @Override
@@ -260,30 +256,29 @@ public class PartitionedRegionClearMessage extends PartitionMessage {
     }
 
     @Override
-    public void fromData(DataInput in,
-        DeserializationContext context) throws IOException, ClassNotFoundException {
+    public void fromData(DataInput in, DeserializationContext context)
+        throws IOException, ClassNotFoundException {
       super.fromData(in, context);
-      op = PartitionedRegionClearMessage.OperationType.values()[in.readByte()];
+      operationType = PartitionedRegionClearMessage.OperationType.values()[in.readByte()];
       bucketsCleared = DataSerializer.readObject(in);
     }
 
     @Override
-    public void toData(DataOutput out,
-        SerializationContext context) throws IOException {
+    public void toData(DataOutput out, SerializationContext context) throws IOException {
       super.toData(out, context);
-      out.writeByte(op.ordinal());
+      out.writeByte(operationType.ordinal());
       DataSerializer.writeObject(bucketsCleared, out);
     }
 
     @Override
     public String toString() {
-      StringBuffer sb = new StringBuffer();
-      sb.append("PartitionedRegionClearReplyMessage ")
-          .append("processorId=").append(this.processorId)
+      return new StringBuilder()
+          .append("PartitionedRegionClearReplyMessage ")
+          .append("processorId=").append(processorId)
           .append(" sender=").append(sender)
-          .append(" bucketsCleared ").append(this.bucketsCleared)
-          .append(" exception=").append(getException());
-      return sb.toString();
+          .append(" bucketsCleared ").append(bucketsCleared)
+          .append(" exception=").append(getException())
+          .toString();
     }
   }
 }
diff --git a/geode-cq/src/distributedTest/java/org/apache/geode/internal/cache/PartitionRegionClearHATest.java b/geode-cq/src/distributedTest/java/org/apache/geode/internal/cache/PartitionRegionClearHATest.java
index 7d0db0a..5497028 100644
--- a/geode-cq/src/distributedTest/java/org/apache/geode/internal/cache/PartitionRegionClearHATest.java
+++ b/geode-cq/src/distributedTest/java/org/apache/geode/internal/cache/PartitionRegionClearHATest.java
@@ -220,7 +220,7 @@ public class PartitionRegionClearHATest implements Serializable {
       if (message instanceof PartitionedRegionClearMessage) {
         PartitionedRegionClearMessage clearMessage = (PartitionedRegionClearMessage) message;
         if (clearMessage
-            .getOp() == PartitionedRegionClearMessage.OperationType.OP_UNLOCK_FOR_PR_CLEAR) {
+            .getOperationType() == PartitionedRegionClearMessage.OperationType.OP_UNLOCK_FOR_PR_CLEAR) {
           try {
             // count down to 1 so that we can go ahead and restart the server
             latch.countDown();

[geode] 08/17: GEODE-9132: Cleanup PartitionedRegionPartialClearException

Posted by nn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

nnag pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit fca9c02ba0a1956629d87b5ab8a5429f79b10b8b
Author: Kirk Lund <kl...@apache.org>
AuthorDate: Thu Apr 15 17:10:24 2021 -0700

    GEODE-9132: Cleanup PartitionedRegionPartialClearException
    
    * Generate serialVersionUID
    * Use full words for parameter names
---
 .../cache/PartitionedRegionPartialClearException.java      | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/geode-core/src/main/java/org/apache/geode/cache/PartitionedRegionPartialClearException.java b/geode-core/src/main/java/org/apache/geode/cache/PartitionedRegionPartialClearException.java
index 1ddb301..b84e775 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/PartitionedRegionPartialClearException.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/PartitionedRegionPartialClearException.java
@@ -21,14 +21,18 @@ package org.apache.geode.cache;
  */
 public class PartitionedRegionPartialClearException extends CacheRuntimeException {
 
-  public PartitionedRegionPartialClearException() {}
+  private static final long serialVersionUID = -3420558263697703892L;
 
-  public PartitionedRegionPartialClearException(String msg) {
-    super(msg);
+  public PartitionedRegionPartialClearException() {
+    // nothing
   }
 
-  public PartitionedRegionPartialClearException(String msg, Throwable cause) {
-    super(msg, cause);
+  public PartitionedRegionPartialClearException(String message) {
+    super(message);
+  }
+
+  public PartitionedRegionPartialClearException(String message, Throwable cause) {
+    super(message, cause);
   }
 
   public PartitionedRegionPartialClearException(Throwable cause) {

[geode] 09/17: GEODE-9132: Remove ClearPRMessage from sanctionedDataSerializables.txt

Posted by nn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

nnag pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 1399c29efa4999a633ba3f20348860b8462f8230
Author: Kirk Lund <kl...@apache.org>
AuthorDate: Thu Apr 15 17:12:10 2021 -0700

    GEODE-9132: Remove ClearPRMessage from sanctionedDataSerializables.txt
---
 .../org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt | 8 --------
 1 file changed, 8 deletions(-)

diff --git a/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt b/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
index cb4e6b3..d1a8742 100644
--- a/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
+++ b/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
@@ -1384,14 +1384,6 @@ org/apache/geode/internal/cache/partitioned/BucketSizeMessage$BucketSizeReplyMes
 fromData,27
 toData,27
 
-org/apache/geode/internal/cache/partitioned/ClearPRMessage,2
-fromData,30
-toData,44
-
-org/apache/geode/internal/cache/partitioned/ClearPRMessage$ClearReplyMessage,2
-fromData,17
-toData,17
-
 org/apache/geode/internal/cache/partitioned/ColocatedRegionDetails,2
 fromData,81
 toData,133

[geode] 16/17: GEODE-9132: Use factory method to avoid escaped reference

Posted by nn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

nnag pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 76ed734cda2de5977c62c578e378556ce91a52d9
Author: Kirk Lund <kl...@apache.org>
AuthorDate: Thu Apr 22 11:42:09 2021 -0700

    GEODE-9132: Use factory method to avoid escaped reference
---
 .../geode/internal/cache/PartitionedRegion.java    |  2 +-
 .../internal/cache/PartitionedRegionClear.java     | 28 +++++++++++++++++-----
 .../internal/cache/PartitionedRegionClearTest.java |  4 ++--
 3 files changed, 25 insertions(+), 9 deletions(-)

diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
index 37b5383..1cb4e5d 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
@@ -319,7 +319,7 @@ public class PartitionedRegion extends LocalRegion
     }
   };
 
-  private final PartitionedRegionClear partitionedRegionClear = new PartitionedRegionClear(this);
+  private final PartitionedRegionClear partitionedRegionClear = PartitionedRegionClear.create(this);
 
   /**
    * Global Region for storing PR config ( PRName->PRConfig). This region would be used to resolve
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
index b8597c1..5f4e589 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
@@ -64,7 +64,27 @@ public class PartitionedRegionClear {
   private final AssignBucketsToPartitions assignBucketsToPartitions;
   private final UpdateAttributesProcessorFactory updateAttributesProcessorFactory;
 
-  public PartitionedRegionClear(PartitionedRegion partitionedRegion) {
+  public static PartitionedRegionClear create(PartitionedRegion partitionedRegion) {
+    PartitionedRegionClear partitionedRegionClear = new PartitionedRegionClear(partitionedRegion);
+    partitionedRegionClear.initialize();
+    return partitionedRegionClear;
+  }
+
+  @VisibleForTesting
+  static PartitionedRegionClear create(PartitionedRegion partitionedRegion,
+      DistributedLockService distributedLockService,
+      ColocationLeaderRegionProvider colocationLeaderRegionProvider,
+      AssignBucketsToPartitions assignBucketsToPartitions,
+      UpdateAttributesProcessorFactory updateAttributesProcessorFactory) {
+    PartitionedRegionClear partitionedRegionClear =
+        new PartitionedRegionClear(partitionedRegion, distributedLockService,
+            colocationLeaderRegionProvider, assignBucketsToPartitions,
+            updateAttributesProcessorFactory);
+    partitionedRegionClear.initialize();
+    return partitionedRegionClear;
+  }
+
+  private PartitionedRegionClear(PartitionedRegion partitionedRegion) {
     this(partitionedRegion,
         partitionedRegion.getPartitionedRegionLockService(),
         ColocationHelper::getLeaderRegion,
@@ -72,8 +92,7 @@ public class PartitionedRegionClear {
         pr -> new UpdateAttributesProcessor(pr, true));
   }
 
-  @VisibleForTesting
-  PartitionedRegionClear(PartitionedRegion partitionedRegion,
+  private PartitionedRegionClear(PartitionedRegion partitionedRegion,
       DistributedLockService distributedLockService,
       ColocationLeaderRegionProvider colocationLeaderRegionProvider,
       AssignBucketsToPartitions assignBucketsToPartitions,
@@ -83,9 +102,6 @@ public class PartitionedRegionClear {
     this.colocationLeaderRegionProvider = colocationLeaderRegionProvider;
     this.assignBucketsToPartitions = assignBucketsToPartitions;
     this.updateAttributesProcessorFactory = updateAttributesProcessorFactory;
-
-    // TODO: initialize needs to move out of constructor to prevent escape of reference to 'this'
-    initialize();
   }
 
   private void initialize() {
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java
index 376fc8e..eafb991 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java
@@ -115,8 +115,8 @@ public class PartitionedRegionClearTest {
 
     doNothing().when(distributedLockService).unlock(anyString());
 
-    partitionedRegionClear = new PartitionedRegionClear(partitionedRegion, distributedLockService,
-        colocationLeaderRegionProvider, assignBucketsToPartitions,
+    partitionedRegionClear = PartitionedRegionClear.create(partitionedRegion,
+        distributedLockService, colocationLeaderRegionProvider, assignBucketsToPartitions,
         updateAttributesProcessorFactory);
   }
 

[geode] 13/17: GEODE-9132: PartitionedRegionClearWithConcurrentOperationsDUnitTest cleanup 3

Posted by nn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

nnag pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 1466a44cecebe10ec050fba8d85122445e3d4508
Author: Kirk Lund <kl...@apache.org>
AuthorDate: Mon Apr 19 16:42:22 2021 -0700

    GEODE-9132: PartitionedRegionClearWithConcurrentOperationsDUnitTest cleanup 3
---
 ...gionClearWithConcurrentOperationsDUnitTest.java | 99 ++++++++++++----------
 1 file changed, 52 insertions(+), 47 deletions(-)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java
index b2aacc0..7ef187f 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java
@@ -53,6 +53,7 @@ import org.apache.geode.distributed.internal.DistributionMessage;
 import org.apache.geode.distributed.internal.DistributionMessageObserver;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.distributed.internal.membership.api.MembershipManagerHelper;
+import org.apache.geode.internal.cache.PartitionedRegionClearMessage.OperationType;
 import org.apache.geode.internal.cache.versions.RegionVersionHolder;
 import org.apache.geode.internal.cache.versions.RegionVersionVector;
 import org.apache.geode.internal.cache.versions.VersionSource;
@@ -83,10 +84,10 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   private VM accessor;
 
   @Before
-  public void setUp() throws Exception {
-    server1 = getVM(TestVM.SERVER1.vmNumber);
-    server2 = getVM(TestVM.SERVER2.vmNumber);
-    accessor = getVM(TestVM.ACCESSOR.vmNumber);
+  public void setUp() {
+    server1 = getVM(TestVM.SERVER1.getVmId());
+    server2 = getVM(TestVM.SERVER2.getVmId());
+    accessor = getVM(TestVM.ACCESSOR.getVmId());
   }
 
   /**
@@ -102,7 +103,7 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   @TestCaseName("[{index}] {method}(Coordinator:{0}, RegionType:{1})")
   public void clearWithConcurrentPutGetRemoveShouldWorkCorrectly(TestVM coordinatorVM,
       RegionShortcut regionShortcut) throws InterruptedException {
-    parametrizedSetup(regionShortcut);
+    createRegions(regionShortcut);
 
     // Let all VMs continuously execute puts and gets for 60 seconds.
     final int workMillis = 60000;
@@ -113,7 +114,7 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
         accessor.invokeAsync(() -> executeRemoves(entries, workMillis)));
 
     // Clear the region every second for 60 seconds.
-    getVM(coordinatorVM.vmNumber).invoke(() -> executeClears(workMillis, 1000));
+    getVM(coordinatorVM.getVmId()).invoke(() -> executeClears(workMillis, 1000));
 
     // Let asyncInvocations finish.
     for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
@@ -138,7 +139,7 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   @TestCaseName("[{index}] {method}(Coordinator:{0}, RegionType:{1})")
   public void clearWithConcurrentPutAllRemoveAllShouldWorkCorrectly(TestVM coordinatorVM,
       RegionShortcut regionShortcut) throws InterruptedException {
-    parametrizedSetup(regionShortcut);
+    createRegions(regionShortcut);
 
     // Let all VMs continuously execute putAll and removeAll for 15 seconds.
     final int workMillis = 15000;
@@ -151,7 +152,7 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
         accessor.invokeAsync(() -> executeRemoveAlls(4000, 6000, workMillis)));
 
     // Clear the region every half second for 15 seconds.
-    getVM(coordinatorVM.vmNumber).invoke(() -> executeClears(workMillis, 500));
+    getVM(coordinatorVM.getVmId()).invoke(() -> executeClears(workMillis, 500));
 
     // Let asyncInvocations finish.
     for (AsyncInvocation<Void> asyncInvocation : asyncInvocationList) {
@@ -176,7 +177,7 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   @Parameters({"PARTITION", "PARTITION_REDUNDANT"})
   @TestCaseName("[{index}] {method}(RegionType:{0})")
   public void clearShouldFailWhenCoordinatorMemberIsBounced(RegionShortcut regionShortcut) {
-    parametrizedSetup(regionShortcut);
+    createRegions(regionShortcut);
     final int entries = 1000;
     populateRegion(accessor, entries, asList(accessor, server1, server2));
 
@@ -192,7 +193,7 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
     // Wait for member to get back online and assign all buckets.
     server1.invoke(() -> {
       cacheRule.createCache();
-      initDataStore(regionShortcut);
+      createDataStore(regionShortcut);
       await().untilAsserted(
           () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
       PartitionRegionHelper.assignBucketsToPartitions(cacheRule.getCache().getRegion(REGION_NAME));
@@ -219,7 +220,7 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   @TestCaseName("[{index}] {method}(Coordinator:{0})")
   public void clearOnRedundantPartitionRegionWithConcurrentPutGetRemoveShouldWorkCorrectlyWhenNonCoordinatorMembersAreBounced(
       TestVM coordinatorVM) throws InterruptedException {
-    parametrizedSetup(RegionShortcut.PARTITION_REDUNDANT);
+    createRegions(RegionShortcut.PARTITION_REDUNDANT);
     final int entries = 7500;
     populateRegion(accessor, entries, asList(accessor, server1, server2));
     server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
@@ -234,12 +235,12 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
 
     // Retry the clear operation on the region until success (server2 will go down, but other
     // members will eventually become primary for those buckets previously hosted by server2).
-    executeClearWithRetry(getVM(coordinatorVM.vmNumber));
+    executeClearWithRetry(getVM(coordinatorVM.getVmId()));
 
     // Wait for member to get back online.
     server2.invoke(() -> {
       cacheRule.createCache();
-      initDataStore(RegionShortcut.PARTITION_REDUNDANT);
+      createDataStore(RegionShortcut.PARTITION_REDUNDANT);
       await().untilAsserted(
           () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
     });
@@ -271,7 +272,7 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   @TestCaseName("[{index}] {method}(Coordinator:{0})")
   public void clearOnNonRedundantPartitionRegionWithConcurrentPutGetRemoveShouldFailWhenNonCoordinatorMembersAreBounced(
       TestVM coordinatorVM) throws InterruptedException {
-    parametrizedSetup(RegionShortcut.PARTITION);
+    createRegions(RegionShortcut.PARTITION);
     final int entries = 7500;
     populateRegion(accessor, entries, asList(accessor, server1, server2));
     server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
@@ -285,7 +286,7 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
         accessor.invokeAsync(() -> executeRemoves(entries, workMillis)));
 
     // Clear the region.
-    getVM(coordinatorVM.vmNumber).invoke(() -> {
+    getVM(coordinatorVM.getVmId()).invoke(() -> {
       assertThatThrownBy(() -> cacheRule.getCache().getRegion(REGION_NAME).clear())
           .isInstanceOf(PartitionedRegionPartialClearException.class);
     });
@@ -310,7 +311,7 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   @TestCaseName("[{index}] {method}(Coordinator:{0})")
   public void clearOnRedundantPartitionRegionWithConcurrentPutAllRemoveAllShouldWorkCorrectlyWhenNonCoordinatorMembersAreBounced(
       TestVM coordinatorVM) throws InterruptedException {
-    parametrizedSetup(RegionShortcut.PARTITION_REDUNDANT);
+    createRegions(RegionShortcut.PARTITION_REDUNDANT);
     server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
 
     // Let all VMs continuously execute putAll/removeAll for 30 seconds.
@@ -321,12 +322,12 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
 
     // Retry the clear operation on the region until success (server2 will go down, but other
     // members will eventually become primary for those buckets previously hosted by server2).
-    executeClearWithRetry(getVM(coordinatorVM.vmNumber));
+    executeClearWithRetry(getVM(coordinatorVM.getVmId()));
 
     // Wait for member to get back online.
     server2.invoke(() -> {
       cacheRule.createCache();
-      initDataStore(RegionShortcut.PARTITION_REDUNDANT);
+      createDataStore(RegionShortcut.PARTITION_REDUNDANT);
       await().untilAsserted(
           () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNotNull());
     });
@@ -356,7 +357,7 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   @TestCaseName("[{index}] {method}(Coordinator:{0})")
   public void clearOnNonRedundantPartitionRegionWithConcurrentPutAllRemoveAllShouldFailWhenNonCoordinatorMembersAreBounced(
       TestVM coordinatorVM) throws InterruptedException {
-    parametrizedSetup(RegionShortcut.PARTITION);
+    createRegions(RegionShortcut.PARTITION);
     server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
 
     final int workMillis = 30000;
@@ -365,7 +366,7 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
         accessor.invokeAsync(() -> executeRemoveAlls(2000, 4000, workMillis)));
 
     // Clear the region.
-    getVM(coordinatorVM.vmNumber).invoke(() -> {
+    getVM(coordinatorVM.getVmId()).invoke(() -> {
       assertThatThrownBy(() -> cacheRule.getCache().getRegion(REGION_NAME).clear())
           .isInstanceOf(PartitionedRegionPartialClearException.class);
     });
@@ -376,7 +377,7 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
     }
   }
 
-  private void initAccessor(RegionShortcut regionShortcut) {
+  private void createAccessor(RegionShortcut regionShortcut) {
     PartitionAttributes<String, String> attrs = new PartitionAttributesFactory<String, String>()
         .setTotalNumBuckets(BUCKETS)
         .setLocalMaxMemory(0)
@@ -388,7 +389,7 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
 
   }
 
-  private void initDataStore(RegionShortcut regionShortcut) {
+  private void createDataStore(RegionShortcut regionShortcut) {
     PartitionAttributes<String, String> attrs = new PartitionAttributesFactory<String, String>()
         .setTotalNumBuckets(BUCKETS)
         .create();
@@ -398,10 +399,10 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
         .create(REGION_NAME);
   }
 
-  private void parametrizedSetup(RegionShortcut regionShortcut) {
-    server1.invoke(() -> initDataStore(regionShortcut));
-    server2.invoke(() -> initDataStore(regionShortcut));
-    accessor.invoke(() -> initAccessor(regionShortcut));
+  private void createRegions(RegionShortcut regionShortcut) {
+    server1.invoke(() -> createDataStore(regionShortcut));
+    server2.invoke(() -> createDataStore(regionShortcut));
+    accessor.invoke(() -> createAccessor(regionShortcut));
   }
 
   private void waitForSilence() {
@@ -637,10 +638,14 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   private enum TestVM {
     ACCESSOR(0), SERVER1(1), SERVER2(2);
 
-    final int vmNumber;
+    private final int vmId;
 
-    TestVM(int vmNumber) {
-      this.vmNumber = vmNumber;
+    TestVM(int vmId) {
+      this.vmId = vmId;
+    }
+
+    int getVmId() {
+      return vmId;
     }
   }
 
@@ -656,24 +661,6 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
     }
 
     /**
-     * Shutdowns the VM whenever the message is an instance of
-     * {@link PartitionedRegionClearMessage}.
-     */
-    private void shutdownMember(DistributionMessage message) {
-      if (message instanceof PartitionedRegionClearMessage) {
-        if (((PartitionedRegionClearMessage) message)
-            .getOperationType() == PartitionedRegionClearMessage.OperationType.OP_PR_CLEAR) {
-          DistributionMessageObserver.setInstance(null);
-          InternalDistributedSystem.getConnectedInstance().stopReconnectingNoDisconnect();
-          MembershipManagerHelper
-              .crashDistributedSystem(InternalDistributedSystem.getConnectedInstance());
-          await().untilAsserted(
-              () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNull());
-        }
-      }
-    }
-
-    /**
      * Invoked only on clear coordinator VM.
      *
      * @param dm the distribution manager that received the message
@@ -702,5 +689,23 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
         super.beforeProcessMessage(dm, message);
       }
     }
+
+    /**
+     * Shutdowns the VM whenever the message is an instance of
+     * {@link PartitionedRegionClearMessage}.
+     */
+    private void shutdownMember(DistributionMessage message) {
+      if (message instanceof PartitionedRegionClearMessage) {
+        PartitionedRegionClearMessage clearMessage = (PartitionedRegionClearMessage) message;
+        if (clearMessage.getOperationType() == OperationType.OP_PR_CLEAR) {
+          DistributionMessageObserver.setInstance(null);
+          InternalDistributedSystem.getConnectedInstance().stopReconnectingNoDisconnect();
+          MembershipManagerHelper
+              .crashDistributedSystem(InternalDistributedSystem.getConnectedInstance());
+          await().untilAsserted(
+              () -> assertThat(InternalDistributedSystem.getConnectedInstance()).isNull());
+        }
+      }
+    }
   }
 }

[geode] 14/17: GEODE-9132: Fix assertion in ResourceUtils

Posted by nn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

nnag pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 38631fc908917f08b56202c5806fa7b218b723c8
Author: Kirk Lund <kl...@apache.org>
AuthorDate: Wed Apr 21 15:22:25 2021 -0700

    GEODE-9132: Fix assertion in ResourceUtils
---
 .../src/main/java/org/apache/geode/test/util/ResourceUtils.java       | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/geode-junit/src/main/java/org/apache/geode/test/util/ResourceUtils.java b/geode-junit/src/main/java/org/apache/geode/test/util/ResourceUtils.java
index cdd6f9d..948f4f0 100644
--- a/geode-junit/src/main/java/org/apache/geode/test/util/ResourceUtils.java
+++ b/geode-junit/src/main/java/org/apache/geode/test/util/ResourceUtils.java
@@ -251,7 +251,9 @@ public class ResourceUtils {
   public static void copyDirectoryResource(final URL resource, final File targetFolder) {
     try {
       File source = new File(resource.getPath());
-      assertThat(source.exists()).as("Source does not exist: " + resource.getPath());
+      assertThat(source)
+          .withFailMessage("Source does not exist: " + resource.getPath())
+          .exists();
       FileUtils.copyDirectory(source, targetFolder);
     } catch (IOException e) {
       throw new UncheckedIOException(e);

[geode] 07/17: GEODE-9132: Undelete unnecessary uses of final

Posted by nn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

nnag pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 6d75516344e05f2354e48bf16854aea2769b4acd
Author: Kirk Lund <kl...@apache.org>
AuthorDate: Thu Apr 15 17:08:52 2021 -0700

    GEODE-9132: Undelete unnecessary uses of final
---
 .../apache/geode/internal/cache/PartitionedRegionClearMessage.java | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClearMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClearMessage.java
index 36cdcb6..cd33f78 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClearMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClearMessage.java
@@ -61,7 +61,8 @@ public class PartitionedRegionClearMessage extends PartitionMessage {
 
   PartitionedRegionClearMessage(Set<InternalDistributedMember> recipients,
       PartitionedRegion partitionedRegion, ReplyProcessor21 replyProcessor21,
-      PartitionedRegionClearMessage.OperationType operationType, RegionEventImpl regionEvent) {
+      PartitionedRegionClearMessage.OperationType operationType,
+      final RegionEventImpl regionEvent) {
     super(recipients, partitionedRegion.getPRId(), replyProcessor21);
     this.partitionedRegion = partitionedRegion;
     this.operationType = operationType;
@@ -234,8 +235,8 @@ public class PartitionedRegionClearMessage extends PartitionMessage {
      * @param distributionManager the distribution manager that is processing the message.
      */
     @Override
-    public void process(DistributionManager distributionManager,
-        ReplyProcessor21 replyProcessor21) {
+    public void process(final DistributionManager distributionManager,
+        final ReplyProcessor21 replyProcessor21) {
       long startTime = getTimestamp();
 
       if (replyProcessor21 == null) {

[geode] 04/17: GEODE-9132: Fixup PartitionResponse constructors

Posted by nn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

nnag pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit f0d2f11946046e9791231cb26edfba183096ad6b
Author: Kirk Lund <kl...@apache.org>
AuthorDate: Fri Apr 9 15:22:46 2021 -0700

    GEODE-9132: Fixup PartitionResponse constructors
    
    * Chain constructors to only one constructor that calls super
    * Expose all arguments for dependency injection
    * Provide type to recipients
---
 .../cache/partitioned/PartitionMessage.java        | 32 ++++++++++++++--------
 1 file changed, 21 insertions(+), 11 deletions(-)

diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionMessage.java
index d676f92..dd6cdc0 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionMessage.java
@@ -14,6 +14,8 @@
  */
 package org.apache.geode.internal.cache.partitioned;
 
+import static java.util.Collections.singleton;
+
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
@@ -22,6 +24,7 @@ import java.util.Set;
 
 import org.apache.logging.log4j.Logger;
 
+import org.apache.geode.CancelCriterion;
 import org.apache.geode.CancelException;
 import org.apache.geode.InternalGemFireError;
 import org.apache.geode.SystemFailure;
@@ -747,24 +750,31 @@ public abstract class PartitionMessage extends DistributionMessage
      */
     boolean responseRequired;
 
-    public PartitionResponse(InternalDistributedSystem dm, Set initMembers) {
-      this(dm, initMembers, true);
+    protected PartitionResponse(InternalDistributedSystem dm,
+        Set<InternalDistributedMember> recipients) {
+      this(dm, recipients, true);
     }
 
-    public PartitionResponse(InternalDistributedSystem dm, Set initMembers, boolean register) {
-      super(dm, initMembers);
-      if (register) {
-        register();
-      }
+    protected PartitionResponse(InternalDistributedSystem system,
+        Set<InternalDistributedMember> recipients, boolean register) {
+      this(system.getDistributionManager(), system, recipients, system.getCancelCriterion(),
+          register);
+    }
+
+    protected PartitionResponse(InternalDistributedSystem system,
+        InternalDistributedMember member) {
+      this(system.getDistributionManager(), system, singleton(member), system.getCancelCriterion());
     }
 
-    public PartitionResponse(InternalDistributedSystem dm, InternalDistributedMember member) {
-      this(dm, member, true);
+    protected PartitionResponse(DistributionManager dm, InternalDistributedSystem system,
+        Set<InternalDistributedMember> recipients, CancelCriterion cancelCriterion) {
+      this(dm, system, recipients, cancelCriterion, true);
     }
 
-    public PartitionResponse(InternalDistributedSystem dm, InternalDistributedMember member,
+    private PartitionResponse(DistributionManager dm, InternalDistributedSystem system,
+        Set<InternalDistributedMember> recipients, CancelCriterion cancelCriterion,
         boolean register) {
-      super(dm, member);
+      super(dm, system, recipients, cancelCriterion);
       if (register) {
         register();
       }

[geode] 02/17: GEODE-7674: Clear on PR with lucene index should throw exception (#6317)

Posted by nn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

nnag pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit a75e7f1f527ea9d52ce3b485b9ea0c76a7994e06
Author: Xiaojian Zhou <ge...@users.noreply.github.com>
AuthorDate: Tue Apr 13 14:21:34 2021 -0700

    GEODE-7674: Clear on PR with lucene index should throw exception (#6317)
---
 .../internal/AsyncEventQueueFactoryImpl.java       |  6 ++++++
 .../asyncqueue/internal/AsyncEventQueueImpl.java   |  5 +++++
 .../org/apache/geode/cache/wan/GatewaySender.java  |  2 ++
 .../internal/cache/PartitionedRegionClear.java     | 11 +++++++++++
 .../internal/cache/wan/AbstractGatewaySender.java  | 12 +++++++++++
 .../cache/wan/GatewaySenderAttributes.java         |  7 +++++++
 .../internal/cache/wan/InternalGatewaySender.java  |  2 ++
 .../internal/cache/PartitionedRegionClearTest.java | 10 ++++++++--
 .../cache/lucene/LuceneIndexCreationDUnitTest.java | 23 ++++++++++++++++++++++
 .../cache/lucene/internal/LuceneIndexImpl.java     |  1 +
 10 files changed, 77 insertions(+), 2 deletions(-)

diff --git a/geode-core/src/main/java/org/apache/geode/cache/asyncqueue/internal/AsyncEventQueueFactoryImpl.java b/geode-core/src/main/java/org/apache/geode/cache/asyncqueue/internal/AsyncEventQueueFactoryImpl.java
index 700cc4b..089c058 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/asyncqueue/internal/AsyncEventQueueFactoryImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/asyncqueue/internal/AsyncEventQueueFactoryImpl.java
@@ -283,6 +283,12 @@ public class AsyncEventQueueFactoryImpl implements AsyncEventQueueFactory {
     return this;
   }
 
+  // keep this method internal
+  public AsyncEventQueueFactory setPartitionedRegionClearUnsupported(boolean status) {
+    gatewaySenderAttributes.partitionedRegionClearUnsupported = status;
+    return this;
+  }
+
   @Override
   public AsyncEventQueueFactory pauseEventDispatching() {
     pauseEventsDispatching = true;
diff --git a/geode-core/src/main/java/org/apache/geode/cache/asyncqueue/internal/AsyncEventQueueImpl.java b/geode-core/src/main/java/org/apache/geode/cache/asyncqueue/internal/AsyncEventQueueImpl.java
index df7c908..0e919f3 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/asyncqueue/internal/AsyncEventQueueImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/asyncqueue/internal/AsyncEventQueueImpl.java
@@ -201,6 +201,11 @@ public class AsyncEventQueueImpl implements InternalAsyncEventQueue {
     return sender.isForwardExpirationDestroy();
   }
 
+  // keep this method internal
+  public boolean isPartitionedRegionClearUnsupported() {
+    return sender.isPartitionedRegionClearUnsupported();
+  }
+
   public boolean waitUntilFlushed(long timeout, TimeUnit unit) throws InterruptedException {
     return sender.waitUntilFlushed(timeout, unit);
   }
diff --git a/geode-core/src/main/java/org/apache/geode/cache/wan/GatewaySender.java b/geode-core/src/main/java/org/apache/geode/cache/wan/GatewaySender.java
index 8e498c3..fb6a10f 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/wan/GatewaySender.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/wan/GatewaySender.java
@@ -101,6 +101,8 @@ public interface GatewaySender {
 
   boolean DEFAULT_FORWARD_EXPIRATION_DESTROY = false;
 
+  boolean DEFAULT_PARTITIONED_REGION_CLEAR_UNSUPPORTED = false;
+
   @Immutable
   OrderPolicy DEFAULT_ORDER_POLICY = OrderPolicy.KEY;
   /**
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
index 539f682..569f78c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
@@ -27,6 +27,8 @@ import org.apache.geode.cache.CacheWriterException;
 import org.apache.geode.cache.Operation;
 import org.apache.geode.cache.OperationAbortedException;
 import org.apache.geode.cache.PartitionedRegionPartialClearException;
+import org.apache.geode.cache.asyncqueue.AsyncEventQueue;
+import org.apache.geode.cache.asyncqueue.internal.AsyncEventQueueImpl;
 import org.apache.geode.cache.partition.PartitionRegionHelper;
 import org.apache.geode.distributed.internal.DistributionManager;
 import org.apache.geode.distributed.internal.MembershipListener;
@@ -396,6 +398,15 @@ public class PartitionedRegionClear {
       // Force all primary buckets to be created before clear.
       assignAllPrimaryBuckets();
 
+      for (AsyncEventQueue asyncEventQueue : partitionedRegion.getCache()
+          .getAsyncEventQueues(false)) {
+        if (((AsyncEventQueueImpl) asyncEventQueue).isPartitionedRegionClearUnsupported()) {
+          throw new UnsupportedOperationException(
+              "Clear is not supported on region " + partitionedRegion.getFullPath()
+                  + " because it has a lucene index");
+        }
+      }
+
       // do cacheWrite
       if (cacheWrite) {
         invokeCacheWriter(regionEvent);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySender.java b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySender.java
index 6760727..64dbc58 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySender.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySender.java
@@ -144,6 +144,12 @@ public abstract class AbstractGatewaySender implements InternalGatewaySender, Di
 
   protected boolean forwardExpirationDestroy;
 
+  /**
+   * An attribute to specify if Partitioned region clear operation is unsupported.
+   * Default is false.
+   */
+  protected boolean partitionedRegionClearUnsupported;
+
   protected GatewayEventSubstitutionFilter substitutionFilter;
 
   protected LocatorDiscoveryCallback locatorDiscoveryCallback;
@@ -291,6 +297,7 @@ public abstract class AbstractGatewaySender implements InternalGatewaySender, Di
     }
     this.isBucketSorted = attrs.isBucketSorted();
     this.forwardExpirationDestroy = attrs.isForwardExpirationDestroy();
+    this.partitionedRegionClearUnsupported = attrs.isPartitionedRegionClearUnsupported();
   }
 
   public GatewaySenderAdvisor getSenderAdvisor() {
@@ -388,6 +395,11 @@ public abstract class AbstractGatewaySender implements InternalGatewaySender, Di
   }
 
   @Override
+  public boolean isPartitionedRegionClearUnsupported() {
+    return this.partitionedRegionClearUnsupported;
+  }
+
+  @Override
   public boolean isManualStart() {
     return this.manualStart;
   }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/GatewaySenderAttributes.java b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/GatewaySenderAttributes.java
index 012fde6..8e78258 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/GatewaySenderAttributes.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/GatewaySenderAttributes.java
@@ -85,6 +85,9 @@ public class GatewaySenderAttributes {
 
   private boolean forwardExpirationDestroy = GatewaySender.DEFAULT_FORWARD_EXPIRATION_DESTROY;
 
+  public boolean partitionedRegionClearUnsupported =
+      GatewaySender.DEFAULT_PARTITIONED_REGION_CLEAR_UNSUPPORTED;
+
   private boolean enforceThreadsConnectSameReceiver =
       GatewaySender.DEFAULT_ENFORCE_THREADS_CONNECT_SAME_RECEIVER;
 
@@ -308,6 +311,10 @@ public class GatewaySenderAttributes {
     return this.forwardExpirationDestroy;
   }
 
+  public boolean isPartitionedRegionClearUnsupported() {
+    return this.partitionedRegionClearUnsupported;
+  }
+
   public boolean getEnforceThreadsConnectSameReceiver() {
     return this.enforceThreadsConnectSameReceiver;
   }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/InternalGatewaySender.java b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/InternalGatewaySender.java
index 13e36e7..d71297c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/InternalGatewaySender.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/InternalGatewaySender.java
@@ -40,6 +40,8 @@ public interface InternalGatewaySender extends GatewaySender {
 
   boolean isForwardExpirationDestroy();
 
+  boolean isPartitionedRegionClearUnsupported();
+
   boolean getIsMetaQueue();
 
   InternalCache getCache();
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java
index 3b66e67..721d236 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionClearTest.java
@@ -42,6 +42,7 @@ import org.mockito.ArgumentCaptor;
 import org.apache.geode.CancelCriterion;
 import org.apache.geode.cache.PartitionedRegionPartialClearException;
 import org.apache.geode.cache.Region;
+import org.apache.geode.cache.asyncqueue.AsyncEventQueue;
 import org.apache.geode.distributed.DistributedLockService;
 import org.apache.geode.distributed.internal.DMStats;
 import org.apache.geode.distributed.internal.DistributionManager;
@@ -56,15 +57,18 @@ import org.apache.geode.internal.serialization.KnownVersion;
 
 public class PartitionedRegionClearTest {
 
+  private GemFireCacheImpl cache;
+  private HashSet<AsyncEventQueue> allAEQs = new HashSet<>();
+  private PartitionedRegionClear partitionedRegionClear;
   private DistributionManager distributionManager;
   private PartitionedRegion partitionedRegion;
   private RegionAdvisor regionAdvisor;
   private InternalDistributedMember internalDistributedMember;
 
-  private PartitionedRegionClear partitionedRegionClear;
-
   @Before
   public void setUp() {
+
+    cache = mock(GemFireCacheImpl.class);
     distributionManager = mock(DistributionManager.class);
     internalDistributedMember = mock(InternalDistributedMember.class);
     partitionedRegion = mock(PartitionedRegion.class);
@@ -73,6 +77,8 @@ public class PartitionedRegionClearTest {
     when(distributionManager.getDistributionManagerId()).thenReturn(internalDistributedMember);
     when(distributionManager.getId()).thenReturn(internalDistributedMember);
     when(internalDistributedMember.getVersion()).thenReturn(KnownVersion.CURRENT);
+    when(partitionedRegion.getCache()).thenReturn(cache);
+    when(cache.getAsyncEventQueues(false)).thenReturn(allAEQs);
     when(partitionedRegion.getDistributionManager()).thenReturn(distributionManager);
     when(partitionedRegion.getName()).thenReturn("prRegion");
     when(partitionedRegion.getRegionAdvisor()).thenReturn(regionAdvisor);
diff --git a/geode-lucene/src/distributedTest/java/org/apache/geode/cache/lucene/LuceneIndexCreationDUnitTest.java b/geode-lucene/src/distributedTest/java/org/apache/geode/cache/lucene/LuceneIndexCreationDUnitTest.java
index c5f3c9e..7e1ddd0 100644
--- a/geode-lucene/src/distributedTest/java/org/apache/geode/cache/lucene/LuceneIndexCreationDUnitTest.java
+++ b/geode-lucene/src/distributedTest/java/org/apache/geode/cache/lucene/LuceneIndexCreationDUnitTest.java
@@ -28,7 +28,9 @@ import static org.apache.geode.cache.lucene.test.LuceneTestUtilities.INDEX_NAME;
 import static org.apache.geode.cache.lucene.test.LuceneTestUtilities.REGION_NAME;
 import static org.apache.geode.test.util.ResourceUtils.createTempFileFromResource;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThrows;
 import static org.junit.Assert.fail;
 
 import java.io.FileInputStream;
@@ -107,6 +109,27 @@ public class LuceneIndexCreationDUnitTest extends LuceneDUnitTest {
   }
 
   @Test
+  public void verifyThrowExceptionWhenClearOnRegionWithLuceneIndex() {
+    SerializableRunnableIF createIndex = getFieldsIndexWithOneField();
+    dataStore1.invoke(() -> {
+      initDataStore(createIndex, RegionTestableType.PARTITION_REDUNDANT);
+      Region<Object, Object> region = cache.getRegion(REGION_NAME);
+      assertNotNull(region);
+      assertThrows(UnsupportedOperationException.class, () -> region.clear());
+    });
+  }
+
+  @Test
+  public void verifyNotThrowExceptionWhenClearOnRegionWithoutLuceneIndex() {
+    dataStore1.invoke(() -> {
+      initDataStore(RegionTestableType.PARTITION_REDUNDANT);
+      Region<Object, Object> region = cache.getRegion(REGION_NAME);
+      assertNotNull(region);
+      region.clear();
+    });
+  }
+
+  @Test
   public void verifyThatEmptyListIsOutputWhenThereAreNoIndexesInTheSystem() {
     dataStore1.invoke(() -> verifyIndexList(0));
     dataStore2.invoke(() -> verifyIndexList(0));
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneIndexImpl.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneIndexImpl.java
index 8525e88..5be2b28 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneIndexImpl.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneIndexImpl.java
@@ -200,6 +200,7 @@ public abstract class LuceneIndexImpl implements InternalLuceneIndex {
     factory.setDiskStoreName(attributes.getDiskStoreName());
     factory.setDiskSynchronous(true);
     factory.setForwardExpirationDestroy(true);
+    factory.setPartitionedRegionClearUnsupported(true);
     return factory;
   }
 

[geode] 03/17: GEODE-9132: Delete ClearPRMessage

Posted by nn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

nnag pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit a6b7732f9388ac1a83410ae48f3c25fab66d0e63
Author: Kirk Lund <kl...@apache.org>
AuthorDate: Fri Apr 9 15:06:40 2021 -0700

    GEODE-9132: Delete ClearPRMessage
---
 .../org/apache/geode/internal/DSFIDFactory.java    |   3 -
 .../geode/internal/cache/PartitionedRegion.java    |  10 -
 .../internal/cache/partitioned/ClearPRMessage.java | 320 ---------------------
 .../internal/cache/PartitionedRegionTest.java      |  15 -
 .../cache/partitioned/ClearPRMessageTest.java      | 260 -----------------
 5 files changed, 608 deletions(-)

diff --git a/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java b/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java
index f0658a6..5ad6058 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/DSFIDFactory.java
@@ -290,7 +290,6 @@ import org.apache.geode.internal.cache.partitioned.BucketCountLoadProbe;
 import org.apache.geode.internal.cache.partitioned.BucketProfileUpdateMessage;
 import org.apache.geode.internal.cache.partitioned.BucketSizeMessage;
 import org.apache.geode.internal.cache.partitioned.BucketSizeMessage.BucketSizeReplyMessage;
-import org.apache.geode.internal.cache.partitioned.ClearPRMessage;
 import org.apache.geode.internal.cache.partitioned.ContainsKeyValueMessage;
 import org.apache.geode.internal.cache.partitioned.ContainsKeyValueMessage.ContainsKeyValueReplyMessage;
 import org.apache.geode.internal.cache.partitioned.CreateBucketMessage;
@@ -991,8 +990,6 @@ public class DSFIDFactory implements DataSerializableFixedID {
     serializer.registerDSFID(GATEWAY_SENDER_QUEUE_ENTRY_SYNCHRONIZATION_ENTRY,
         GatewaySenderQueueEntrySynchronizationOperation.GatewaySenderQueueEntrySynchronizationEntry.class);
     serializer.registerDSFID(ABORT_BACKUP_REQUEST, AbortBackupRequest.class);
-    serializer.registerDSFID(PR_CLEAR_MESSAGE, ClearPRMessage.class);
-    serializer.registerDSFID(PR_CLEAR_REPLY_MESSAGE, ClearPRMessage.ClearReplyMessage.class);
     serializer.registerDSFID(HOST_AND_PORT, HostAndPort.class);
     serializer.registerDSFID(DISTRIBUTED_PING_MESSAGE, DistributedPingMessage.class);
   }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
index a62b2b5..37b5383 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
@@ -180,7 +180,6 @@ import org.apache.geode.internal.cache.execute.PartitionedRegionFunctionResultWa
 import org.apache.geode.internal.cache.execute.RegionFunctionContextImpl;
 import org.apache.geode.internal.cache.execute.ServerToClientFunctionResultSender;
 import org.apache.geode.internal.cache.ha.ThreadIdentifier;
-import org.apache.geode.internal.cache.partitioned.ClearPRMessage;
 import org.apache.geode.internal.cache.partitioned.ContainsKeyValueMessage;
 import org.apache.geode.internal.cache.partitioned.ContainsKeyValueMessage.ContainsKeyValueResponse;
 import org.apache.geode.internal.cache.partitioned.DestroyMessage;
@@ -2182,15 +2181,6 @@ public class PartitionedRegion extends LocalRegion
     throw new UnsupportedOperationException();
   }
 
-  List<ClearPRMessage> createClearPRMessages(EventID eventID) {
-    ArrayList<ClearPRMessage> clearMsgList = new ArrayList<>();
-    for (int bucketId = 0; bucketId < getTotalNumberOfBuckets(); bucketId++) {
-      ClearPRMessage clearPRMessage = new ClearPRMessage(bucketId, eventID);
-      clearMsgList.add(clearPRMessage);
-    }
-    return clearMsgList;
-  }
-
   @Override
   void basicLocalClear(RegionEventImpl event) {
     throw new UnsupportedOperationException();
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java
deleted file mode 100644
index 2603b78..0000000
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ClearPRMessage.java
+++ /dev/null
@@ -1,320 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.geode.internal.cache.partitioned;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.Set;
-
-import org.apache.logging.log4j.Logger;
-
-import org.apache.geode.DataSerializer;
-import org.apache.geode.annotations.VisibleForTesting;
-import org.apache.geode.cache.CacheException;
-import org.apache.geode.cache.Operation;
-import org.apache.geode.cache.persistence.PartitionOfflineException;
-import org.apache.geode.distributed.DistributedMember;
-import org.apache.geode.distributed.internal.ClusterDistributionManager;
-import org.apache.geode.distributed.internal.DirectReplyProcessor;
-import org.apache.geode.distributed.internal.DistributionManager;
-import org.apache.geode.distributed.internal.InternalDistributedSystem;
-import org.apache.geode.distributed.internal.ReplyException;
-import org.apache.geode.distributed.internal.ReplyMessage;
-import org.apache.geode.distributed.internal.ReplyProcessor21;
-import org.apache.geode.distributed.internal.ReplySender;
-import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
-import org.apache.geode.internal.Assert;
-import org.apache.geode.internal.InternalDataSerializer;
-import org.apache.geode.internal.NanoTimer;
-import org.apache.geode.internal.cache.BucketRegion;
-import org.apache.geode.internal.cache.EventID;
-import org.apache.geode.internal.cache.ForceReattemptException;
-import org.apache.geode.internal.cache.PartitionedRegion;
-import org.apache.geode.internal.cache.RegionEventImpl;
-import org.apache.geode.internal.logging.log4j.LogMarker;
-import org.apache.geode.internal.serialization.DeserializationContext;
-import org.apache.geode.internal.serialization.SerializationContext;
-import org.apache.geode.logging.internal.log4j.api.LogService;
-
-public class ClearPRMessage extends PartitionMessageWithDirectReply {
-  private static final Logger logger = LogService.getLogger();
-
-  private Integer bucketId;
-
-  private EventID eventID;
-
-  public static final String BUCKET_NON_PRIMARY_MESSAGE =
-      "The bucket region on target member is no longer primary";
-  public static final String EXCEPTION_THROWN_DURING_CLEAR_OPERATION =
-      "An exception was thrown during the local clear operation: ";
-
-  /**
-   * state from operateOnRegion that must be preserved for transmission from the waiting pool
-   */
-  transient boolean result = false;
-
-  /**
-   * Empty constructor to satisfy {@link DataSerializer}requirements
-   */
-  public ClearPRMessage() {}
-
-  public ClearPRMessage(int bucketId, EventID eventID) {
-    this.bucketId = bucketId;
-    this.eventID = eventID;
-  }
-
-  public void initMessage(PartitionedRegion region, Set<InternalDistributedMember> recipients,
-      DirectReplyProcessor replyProcessor) {
-    this.resetRecipients();
-    if (recipients != null) {
-      setRecipients(recipients);
-    }
-    this.regionId = region.getPRId();
-    this.processor = replyProcessor;
-    this.processorId = replyProcessor == null ? 0 : replyProcessor.getProcessorId();
-    if (replyProcessor != null) {
-      replyProcessor.enableSevereAlertProcessing();
-    }
-  }
-
-  public ClearResponse send(DistributedMember recipient, PartitionedRegion region)
-      throws ForceReattemptException {
-    Set<InternalDistributedMember> recipients =
-        Collections.singleton((InternalDistributedMember) recipient);
-    ClearResponse clearResponse = new ClearResponse(region.getSystem(), recipients);
-    initMessage(region, recipients, clearResponse);
-    if (logger.isDebugEnabled()) {
-      logger.debug("ClearPRMessage.send: recipient is {}, msg is {}", recipient, this);
-    }
-
-    Set<InternalDistributedMember> failures = region.getDistributionManager().putOutgoing(this);
-    if (failures != null && failures.size() > 0) {
-      throw new ForceReattemptException("Failed sending <" + this + "> due to " + failures);
-    }
-    return clearResponse;
-  }
-
-  @Override
-  public int getDSFID() {
-    return PR_CLEAR_MESSAGE;
-  }
-
-  @Override
-  public void toData(DataOutput out, SerializationContext context) throws IOException {
-    super.toData(out, context);
-    if (bucketId == null) {
-      InternalDataSerializer.writeSignedVL(-1, out);
-    } else {
-      InternalDataSerializer.writeSignedVL(bucketId, out);
-    }
-    DataSerializer.writeObject(this.eventID, out);
-  }
-
-  @Override
-  public void fromData(DataInput in, DeserializationContext context)
-      throws IOException, ClassNotFoundException {
-    super.fromData(in, context);
-    this.bucketId = (int) InternalDataSerializer.readSignedVL(in);
-    this.eventID = (EventID) DataSerializer.readObject(in);
-  }
-
-  @Override
-  public EventID getEventID() {
-    return null;
-  }
-
-  /**
-   * This method is called upon receipt and make the desired changes to the PartitionedRegion Note:
-   * It is very important that this message does NOT cause any deadlocks as the sender will wait
-   * indefinitely for the acknowledgement
-   */
-  @Override
-  @VisibleForTesting
-  protected boolean operateOnPartitionedRegion(ClusterDistributionManager distributionManager,
-      PartitionedRegion region, long startTime) {
-    try {
-      this.result = doLocalClear(region);
-    } catch (ForceReattemptException ex) {
-      sendReply(getSender(), getProcessorId(), distributionManager, new ReplyException(ex), region,
-          startTime);
-      return false;
-    }
-    return this.result;
-  }
-
-  public Integer getBucketId() {
-    return this.bucketId;
-  }
-
-  public boolean doLocalClear(PartitionedRegion region)
-      throws ForceReattemptException {
-    // Retrieve local bucket region which matches target bucketId
-    BucketRegion bucketRegion =
-        region.getDataStore().getInitializedBucketForId(null, this.bucketId);
-
-    boolean lockedForPrimary = bucketRegion.doLockForPrimary(false);
-    // Check if we obtained primary lock, throw exception if not
-    if (!lockedForPrimary) {
-      throw new ForceReattemptException(BUCKET_NON_PRIMARY_MESSAGE);
-    }
-    try {
-      RegionEventImpl regionEvent = new RegionEventImpl(bucketRegion, Operation.REGION_CLEAR, null,
-          false, region.getMyId(), eventID);
-      bucketRegion.cmnClearRegion(regionEvent, false, true);
-    } catch (PartitionOfflineException poe) {
-      logger.info(
-          "All members holding data for bucket {} are offline, no more retries will be attempted",
-          this.bucketId,
-          poe);
-      throw poe;
-    } catch (Exception ex) {
-      throw new ForceReattemptException(
-          EXCEPTION_THROWN_DURING_CLEAR_OPERATION + ex.getClass().getName(), ex);
-    } finally {
-      bucketRegion.doUnlockForPrimary();
-    }
-
-    return true;
-  }
-
-  @Override
-  public boolean canStartRemoteTransaction() {
-    return false;
-  }
-
-  @Override
-  protected void sendReply(InternalDistributedMember member, int processorId,
-      DistributionManager distributionManager, ReplyException ex,
-      PartitionedRegion partitionedRegion, long startTime) {
-    if (partitionedRegion != null) {
-      if (startTime > 0) {
-        partitionedRegion.getPrStats().endPartitionMessagesProcessing(startTime);
-      }
-    }
-    ClearReplyMessage.send(member, processorId, getReplySender(distributionManager), this.result,
-        ex);
-  }
-
-  @Override
-  protected void appendFields(StringBuilder buff) {
-    super.appendFields(buff);
-    buff.append("; bucketId=").append(this.bucketId);
-  }
-
-  public static class ClearReplyMessage extends ReplyMessage {
-    @Override
-    public boolean getInlineProcess() {
-      return true;
-    }
-
-    /**
-     * Empty constructor to conform to DataSerializable interface
-     */
-    @SuppressWarnings("unused")
-    public ClearReplyMessage() {}
-
-    private ClearReplyMessage(int processorId, boolean result, ReplyException ex) {
-      super();
-      setProcessorId(processorId);
-      if (ex != null) {
-        setException(ex);
-      } else {
-        setReturnValue(result);
-      }
-    }
-
-    /**
-     * Send an ack
-     */
-    public static void send(InternalDistributedMember recipient, int processorId,
-        ReplySender replySender,
-        boolean result, ReplyException ex) {
-      Assert.assertNotNull(recipient, "ClearReplyMessage recipient was NULL.");
-      ClearReplyMessage message = new ClearReplyMessage(processorId, result, ex);
-      message.setRecipient(recipient);
-      replySender.putOutgoing(message);
-    }
-
-    /**
-     * Processes this message. This method is invoked by the receiver of the message.
-     *
-     * @param distributionManager the distribution manager that is processing the message.
-     */
-    @Override
-    public void process(final DistributionManager distributionManager,
-        final ReplyProcessor21 replyProcessor) {
-      final long startTime = getTimestamp();
-      if (replyProcessor == null) {
-        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
-          logger.trace(LogMarker.DM_VERBOSE, "{}: processor not found", this);
-        }
-        return;
-      }
-      if (replyProcessor instanceof ClearResponse) {
-        ((ClearResponse) replyProcessor).setResponse(this);
-      }
-      replyProcessor.process(this);
-
-      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
-        logger.trace(LogMarker.DM_VERBOSE, "{} processed {}", replyProcessor, this);
-      }
-      distributionManager.getStats().incReplyMessageTime(NanoTimer.getTime() - startTime);
-    }
-
-    @Override
-    public int getDSFID() {
-      return PR_CLEAR_REPLY_MESSAGE;
-    }
-
-    @Override
-    public String toString() {
-      StringBuilder stringBuilder = new StringBuilder(super.toString());
-      stringBuilder.append(" returnValue=");
-      stringBuilder.append(getReturnValue());
-      return stringBuilder.toString();
-    }
-  }
-
-  /**
-   * A processor to capture the value returned by {@link ClearPRMessage}
-   */
-  public static class ClearResponse extends PartitionResponse {
-    private volatile boolean returnValue;
-
-    public ClearResponse(InternalDistributedSystem distributedSystem,
-        Set<InternalDistributedMember> recipients) {
-      super(distributedSystem, recipients, false);
-    }
-
-    public void setResponse(ClearReplyMessage response) {
-      if (response.getException() == null) {
-        this.returnValue = (boolean) response.getReturnValue();
-      }
-    }
-
-    /**
-     * @return the result of the remote clear operation
-     * @throws ForceReattemptException if the peer is no longer available
-     * @throws CacheException if the peer generates an error
-     */
-    public boolean waitForResult() throws CacheException, ForceReattemptException {
-      waitForCacheException();
-      return this.returnValue;
-    }
-  }
-}
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionTest.java
index f99b74b..2a2897d 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionTest.java
@@ -40,7 +40,6 @@ import static org.mockito.quality.Strictness.STRICT_STUBS;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
@@ -73,7 +72,6 @@ import org.apache.geode.distributed.internal.DistributionManager;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
 import org.apache.geode.internal.cache.control.InternalResourceManager;
-import org.apache.geode.internal.cache.partitioned.ClearPRMessage;
 import org.apache.geode.internal.cache.partitioned.colocation.ColocationLoggerFactory;
 
 @RunWith(JUnitParamsRunner.class)
@@ -217,19 +215,6 @@ public class PartitionedRegionTest {
   }
 
   @Test
-  public void createClearPRMessagesShouldCreateMessagePerBucket() {
-    PartitionedRegion spyPartitionedRegion = spy(partitionedRegion);
-    RegionEventImpl regionEvent =
-        new RegionEventImpl(spyPartitionedRegion, Operation.REGION_CLEAR, null, false,
-            spyPartitionedRegion.getMyId(), true);
-    when(spyPartitionedRegion.getTotalNumberOfBuckets()).thenReturn(3);
-    EventID eventID = new EventID(spyPartitionedRegion.getCache().getDistributedSystem());
-    List<ClearPRMessage> msgs = spyPartitionedRegion.createClearPRMessages(eventID);
-    assertThat(msgs.size()).isEqualTo(3);
-  }
-
-
-  @Test
   public void getBucketNodeForReadOrWriteReturnsPrimaryNodeForRegisterInterest() {
     // ARRANGE
     EntryEventImpl clientEvent = mock(EntryEventImpl.class);
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/ClearPRMessageTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/ClearPRMessageTest.java
deleted file mode 100644
index acdd4fc..0000000
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/ClearPRMessageTest.java
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.internal.cache.partitioned;
-
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.assertj.core.api.Assertions.assertThatThrownBy;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyBoolean;
-import static org.mockito.ArgumentMatchers.anyInt;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.ArgumentMatchers.notNull;
-import static org.mockito.Mockito.RETURNS_DEEP_STUBS;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-import java.util.HashSet;
-import java.util.Set;
-
-import org.junit.Before;
-import org.junit.Test;
-
-import org.apache.geode.distributed.internal.ClusterDistributionManager;
-import org.apache.geode.distributed.internal.DMStats;
-import org.apache.geode.distributed.internal.DistributionManager;
-import org.apache.geode.distributed.internal.ReplyException;
-import org.apache.geode.distributed.internal.ReplySender;
-import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
-import org.apache.geode.internal.cache.BucketRegion;
-import org.apache.geode.internal.cache.ForceReattemptException;
-import org.apache.geode.internal.cache.PartitionedRegion;
-import org.apache.geode.internal.cache.PartitionedRegionDataStore;
-import org.apache.geode.internal.cache.PartitionedRegionStats;
-import org.apache.geode.internal.cache.RegionEventImpl;
-
-public class ClearPRMessageTest {
-
-  ClearPRMessage message;
-  PartitionedRegion region;
-  PartitionedRegionDataStore dataStore;
-  BucketRegion bucketRegion;
-
-  @Before
-  public void setup() throws ForceReattemptException {
-    message = spy(new ClearPRMessage());
-    InternalDistributedMember member = mock(InternalDistributedMember.class);
-    region = mock(PartitionedRegion.class, RETURNS_DEEP_STUBS);
-    dataStore = mock(PartitionedRegionDataStore.class);
-    when(region.getDataStore()).thenReturn(dataStore);
-    when(region.getFullPath()).thenReturn("/test");
-    bucketRegion = mock(BucketRegion.class);
-    when(dataStore.getInitializedBucketForId(any(), any())).thenReturn(bucketRegion);
-    RegionEventImpl bucketRegionEventImpl = mock(RegionEventImpl.class);
-  }
-
-  @Test
-  public void doLocalClearThrowsExceptionWhenBucketIsNotPrimaryAtFirstCheck() {
-    when(bucketRegion.isPrimary()).thenReturn(false);
-
-    assertThatThrownBy(() -> message.doLocalClear(region))
-        .isInstanceOf(ForceReattemptException.class)
-        .hasMessageContaining(ClearPRMessage.BUCKET_NON_PRIMARY_MESSAGE);
-  }
-
-  @Test
-  public void doLocalClearThrowsExceptionWhenLockCannotBeObtained() {
-    when(bucketRegion.doLockForPrimary(false)).thenReturn(false);
-
-    assertThatThrownBy(() -> message.doLocalClear(region))
-        .isInstanceOf(ForceReattemptException.class)
-        .hasMessageContaining(ClearPRMessage.BUCKET_NON_PRIMARY_MESSAGE);
-  }
-
-  @Test
-  public void doLocalClearThrowsForceReattemptExceptionWhenAnExceptionIsThrownDuringClearOperation() {
-    NullPointerException exception = new NullPointerException("Error encountered");
-    doThrow(exception).when(bucketRegion).cmnClearRegion(any(), anyBoolean(), anyBoolean());
-
-    when(bucketRegion.doLockForPrimary(false)).thenReturn(true);
-
-    assertThatThrownBy(() -> message.doLocalClear(region))
-        .isInstanceOf(ForceReattemptException.class)
-        .hasMessageContaining(ClearPRMessage.EXCEPTION_THROWN_DURING_CLEAR_OPERATION);
-
-    // Confirm that cmnClearRegion was called
-    verify(bucketRegion, times(1)).cmnClearRegion(any(), anyBoolean(), anyBoolean());
-  }
-
-  @Test
-  public void doLocalClearInvokesCmnClearRegionWhenBucketIsPrimaryAndLockIsObtained()
-      throws ForceReattemptException {
-
-    // Be primary on the first check, then be not primary on the second check
-    when(bucketRegion.doLockForPrimary(false)).thenReturn(true);
-    assertThat(message.doLocalClear(region)).isTrue();
-
-    // Confirm that cmnClearRegion was called
-    verify(bucketRegion, times(1)).cmnClearRegion(any(), anyBoolean(), anyBoolean());
-  }
-
-  @Test
-  public void initMessageSetsReplyProcessorCorrectlyWithDefinedReplyProcessor() {
-    InternalDistributedMember sender = mock(InternalDistributedMember.class);
-
-    Set<InternalDistributedMember> recipients = new HashSet<>();
-    recipients.add(sender);
-
-    ClearPRMessage.ClearResponse mockProcessor = mock(ClearPRMessage.ClearResponse.class);
-    int mockProcessorId = 5;
-    when(mockProcessor.getProcessorId()).thenReturn(mockProcessorId);
-
-    message.initMessage(region, recipients, mockProcessor);
-
-    verify(mockProcessor, times(1)).enableSevereAlertProcessing();
-    assertThat(message.getProcessorId()).isEqualTo(mockProcessorId);
-  }
-
-  @Test
-  public void initMessageSetsProcessorIdToZeroWithNullProcessor() {
-    message.initMessage(region, null, null);
-
-    assertThat(message.getProcessorId()).isEqualTo(0);
-  }
-
-  @Test
-  public void sendThrowsExceptionIfPutOutgoingMethodReturnsNonNullSetOfFailures() {
-    InternalDistributedMember recipient = mock(InternalDistributedMember.class);
-
-    DistributionManager distributionManager = mock(DistributionManager.class);
-    when(region.getDistributionManager()).thenReturn(distributionManager);
-
-    doNothing().when(message).initMessage(any(), any(), any());
-    Set<InternalDistributedMember> failures = new HashSet<>();
-    failures.add(recipient);
-
-    when(distributionManager.putOutgoing(message)).thenReturn(failures);
-
-    assertThatThrownBy(() -> message.send(recipient, region))
-        .isInstanceOf(ForceReattemptException.class)
-        .hasMessageContaining("Failed sending <" + message + ">");
-  }
-
-  @SuppressWarnings("ResultOfMethodCallIgnored")
-  @Test
-  public void operateOnPartitionedRegionCallsSendReplyWithNoExceptionWhenDoLocalClearSucceeds()
-      throws ForceReattemptException {
-    ClusterDistributionManager distributionManager = mock(ClusterDistributionManager.class);
-    InternalDistributedMember sender = mock(InternalDistributedMember.class);
-    int processorId = 1000;
-    int startTime = 0;
-
-    doReturn(0).when(message).getBucketId();
-    doReturn(true).when(message).doLocalClear(region);
-    doReturn(sender).when(message).getSender();
-    doReturn(processorId).when(message).getProcessorId();
-
-    // We don't want to deal with mocking the behavior of sendReply() in this test, so we mock it to
-    // do nothing and verify later that it was called with proper input
-    doNothing().when(message).sendReply(any(), anyInt(), any(), any(), any(), anyLong());
-
-    message.operateOnPartitionedRegion(distributionManager, region, startTime);
-    assertThat(message.result).isTrue();
-
-    verify(message, times(0)).sendReply(sender, processorId, distributionManager, null, region,
-        startTime);
-  }
-
-  @SuppressWarnings("ResultOfMethodCallIgnored")
-  @Test
-  public void operateOnPartitionedRegionCallsSendReplyWithExceptionWhenDoLocalClearFailsWithException()
-      throws ForceReattemptException {
-    ClusterDistributionManager distributionManager = mock(ClusterDistributionManager.class);
-    InternalDistributedMember sender = mock(InternalDistributedMember.class);
-    int processorId = 1000;
-    int startTime = 0;
-    ForceReattemptException exception =
-        new ForceReattemptException(ClearPRMessage.BUCKET_NON_PRIMARY_MESSAGE);
-
-    doReturn(0).when(message).getBucketId();
-    doThrow(exception).when(message).doLocalClear(region);
-    doReturn(sender).when(message).getSender();
-    doReturn(processorId).when(message).getProcessorId();
-
-    // We don't want to deal with mocking the behavior of sendReply() in this test, so we mock it to
-    // do nothing and verify later that it was called with proper input
-    doNothing().when(message).sendReply(any(), anyInt(), any(), any(), any(), anyLong());
-
-    message.operateOnPartitionedRegion(distributionManager, region, startTime);
-
-    verify(message, times(1)).sendReply(any(), anyInt(), any(), notNull(), any(), anyLong());
-  }
-
-  @Test
-  public void sendReplyEndsMessageProcessingIfWeHaveARegionAndHaveStartedProcessing() {
-    DistributionManager distributionManager = mock(DistributionManager.class);
-    InternalDistributedMember recipient = mock(InternalDistributedMember.class);
-    PartitionedRegionStats partitionedRegionStats = mock(PartitionedRegionStats.class);
-    when(region.getPrStats()).thenReturn(partitionedRegionStats);
-
-    int processorId = 1000;
-    int startTime = 10000;
-    ReplyException exception = new ReplyException(ClearPRMessage.BUCKET_NON_PRIMARY_MESSAGE);
-
-    ReplySender replySender = mock(ReplySender.class);
-    doReturn(replySender).when(message).getReplySender(distributionManager);
-
-    message.sendReply(recipient, processorId, distributionManager, exception, region, startTime);
-
-    verify(partitionedRegionStats, times(1)).endPartitionMessagesProcessing(startTime);
-  }
-
-  @Test
-  public void sendReplyDoesNotEndMessageProcessingIfStartTimeIsZero() {
-    DistributionManager distributionManager = mock(DistributionManager.class);
-    InternalDistributedMember recipient = mock(InternalDistributedMember.class);
-    PartitionedRegionStats partitionedRegionStats = mock(PartitionedRegionStats.class);
-    when(region.getPrStats()).thenReturn(partitionedRegionStats);
-
-    int processorId = 1000;
-    int startTime = 0;
-    ReplyException exception = new ReplyException(ClearPRMessage.BUCKET_NON_PRIMARY_MESSAGE);
-
-    ReplySender replySender = mock(ReplySender.class);
-    doReturn(replySender).when(message).getReplySender(distributionManager);
-
-    message.sendReply(recipient, processorId, distributionManager, exception, region, startTime);
-
-    verify(partitionedRegionStats, times(0)).endPartitionMessagesProcessing(startTime);
-  }
-
-  @Test
-  public void clearReplyMessageProcessCallsSetResponseIfReplyProcessorIsInstanceOfClearResponse() {
-    DistributionManager distributionManager = mock(DistributionManager.class);
-    DMStats mockStats = mock(DMStats.class);
-    when(distributionManager.getStats()).thenReturn(mockStats);
-    ClearPRMessage.ClearReplyMessage clearReplyMessage = new ClearPRMessage.ClearReplyMessage();
-    ClearPRMessage.ClearResponse mockProcessor = mock(ClearPRMessage.ClearResponse.class);
-
-    clearReplyMessage.process(distributionManager, mockProcessor);
-
-    verify(mockProcessor, times(1)).setResponse(clearReplyMessage);
-  }
-}

[geode] 11/17: GEODE-9132: PartitionedRegionClearWithConcurrentOperationsDUnitTest cleanup 1

Posted by nn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

nnag pushed a commit to branch feature/GEODE-7665
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 41eba463fb32d8ecd227c264854c5788190aab60
Author: Kirk Lund <kl...@apache.org>
AuthorDate: Mon Apr 19 14:38:02 2021 -0700

    GEODE-9132: PartitionedRegionClearWithConcurrentOperationsDUnitTest cleanup 1
---
 ...gionClearWithConcurrentOperationsDUnitTest.java | 97 +++++++++-------------
 1 file changed, 41 insertions(+), 56 deletions(-)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java
index 77537cb..c9a1e5b 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionClearWithConcurrentOperationsDUnitTest.java
@@ -28,11 +28,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
 import java.util.stream.IntStream;
 
 import junitparams.JUnitParamsRunner;
@@ -61,7 +56,6 @@ import org.apache.geode.distributed.internal.membership.api.MembershipManagerHel
 import org.apache.geode.internal.cache.versions.RegionVersionHolder;
 import org.apache.geode.internal.cache.versions.RegionVersionVector;
 import org.apache.geode.internal.cache.versions.VersionSource;
-import org.apache.geode.test.awaitility.GeodeAwaitility;
 import org.apache.geode.test.dunit.AsyncInvocation;
 import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.dunit.rules.CacheRule;
@@ -73,15 +67,16 @@ import org.apache.geode.test.dunit.rules.DistributedRule;
  * added or removed.
  */
 @RunWith(JUnitParamsRunner.class)
+@SuppressWarnings("serial")
 public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements Serializable {
-  private static final Integer BUCKETS = 13;
+
+  private static final int BUCKETS = 13;
   private static final String REGION_NAME = "PartitionedRegion";
   private static final String TEST_CASE_NAME =
       "[{index}] {method}(Coordinator:{0}, RegionType:{1})";
 
   @Rule
   public DistributedRule distributedRule = new DistributedRule(3);
-
   @Rule
   public CacheRule cacheRule = CacheRule.builder().createCacheInAll().build();
 
@@ -89,22 +84,6 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   private VM server2;
   private VM accessor;
 
-  private enum TestVM {
-    ACCESSOR(0), SERVER1(1), SERVER2(2);
-
-    final int vmNumber;
-
-    TestVM(int vmNumber) {
-      this.vmNumber = vmNumber;
-    }
-  }
-
-  static RegionShortcut[] regionTypes() {
-    return new RegionShortcut[] {
-        RegionShortcut.PARTITION, RegionShortcut.PARTITION_REDUNDANT
-    };
-  }
-
   @SuppressWarnings("unused")
   static TestVM[] coordinators() {
     return new TestVM[] {
@@ -114,7 +93,7 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
 
   @SuppressWarnings("unused")
   static Object[] coordinatorsAndRegionTypes() {
-    ArrayList<Object[]> parameters = new ArrayList<>();
+    List<Object[]> parameters = new ArrayList<>();
     RegionShortcut[] regionShortcuts = regionTypes();
 
     Arrays.stream(regionShortcuts).forEach(regionShortcut -> {
@@ -125,6 +104,12 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
     return parameters.toArray();
   }
 
+  private static RegionShortcut[] regionTypes() {
+    return new RegionShortcut[] {
+        RegionShortcut.PARTITION, RegionShortcut.PARTITION_REDUNDANT
+    };
+  }
+
   @Before
   public void setUp() throws Exception {
     server1 = getVM(TestVM.SERVER1.vmNumber);
@@ -178,7 +163,7 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   /**
    * Populates the region and verifies the data on the selected VMs.
    */
-  private void populateRegion(VM feeder, int entryCount, List<VM> vms) {
+  private void populateRegion(VM feeder, int entryCount, Iterable<VM> vms) {
     feeder.invoke(() -> {
       Region<String, String> region = cacheRule.getCache().getRegion(REGION_NAME);
       IntStream.range(0, entryCount).forEach(i -> region.put(String.valueOf(i), "Value_" + i));
@@ -240,14 +225,13 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
    * Asserts that the region data is consistent across buckets.
    */
   private void assertRegionBucketsConsistency() throws ForceReattemptException {
-    List<BucketDump> bucketDumps;
     PartitionedRegion region = (PartitionedRegion) cacheRule.getCache().getRegion(REGION_NAME);
     // Redundant copies + 1 primary.
     int expectedCopies = region.getRedundantCopies() + 1;
 
     for (int bId = 0; bId < BUCKETS; bId++) {
       final int bucketId = bId;
-      bucketDumps = region.getAllBucketEntries(bucketId);
+      List<BucketDump> bucketDumps = region.getAllBucketEntries(bucketId);
       assertThat(bucketDumps.size())
           .as("Bucket " + bucketId + " should have " + expectedCopies + " copies, but has "
               + bucketDumps.size())
@@ -379,26 +363,16 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
 
   /**
    * Continuously execute clear operations on the PartitionedRegion every periodInMillis for the
-   * given
-   * durationInMillis.
+   * given durationInMillis.
    */
-  private void executeClears(final long durationInMillis, final long periodInMillis)
-      throws InterruptedException {
+  private void executeClears(final long durationInMillis, final long periodInMillis) {
     Cache cache = cacheRule.getCache();
-    AtomicLong invocationCount = new AtomicLong(0);
     Region<String, String> region = cache.getRegion(REGION_NAME);
-    Long minimumInvocationCount = (durationInMillis / periodInMillis);
-    ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
-    ScheduledFuture<?> scheduledFuture = executor.scheduleWithFixedDelay(() -> {
+    long minimumInvocationCount = durationInMillis / periodInMillis;
+
+    for (int invocationCount = 0; invocationCount < minimumInvocationCount; invocationCount++) {
       region.clear();
-      invocationCount.incrementAndGet();
-    }, 0, periodInMillis, TimeUnit.MILLISECONDS);
-
-    await().untilAsserted(
-        () -> assertThat(invocationCount.get()).isGreaterThanOrEqualTo(minimumInvocationCount));
-    scheduledFuture.cancel(false);
-    executor.shutdown();
-    executor.awaitTermination(GeodeAwaitility.getTimeout().getSeconds(), TimeUnit.SECONDS);
+    }
   }
 
   /**
@@ -413,11 +387,11 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   @Parameters(method = "coordinatorsAndRegionTypes")
   public void clearWithConcurrentPutGetRemoveShouldWorkCorrectly(TestVM coordinatorVM,
       RegionShortcut regionShortcut) throws InterruptedException {
-    final int entries = 15000;
-    final int workMillis = 60000;
     parametrizedSetup(regionShortcut);
 
     // Let all VMs continuously execute puts and gets for 60 seconds.
+    final int workMillis = 60000;
+    final int entries = 15000;
     List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
         server1.invokeAsync(() -> executePuts(entries, workMillis)),
         server2.invokeAsync(() -> executeGets(entries, workMillis)),
@@ -448,10 +422,10 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   @Parameters(method = "coordinatorsAndRegionTypes")
   public void clearWithConcurrentPutAllRemoveAllShouldWorkCorrectly(TestVM coordinatorVM,
       RegionShortcut regionShortcut) throws InterruptedException {
-    final int workMillis = 15000;
     parametrizedSetup(regionShortcut);
 
     // Let all VMs continuously execute putAll and removeAll for 15 seconds.
+    final int workMillis = 15000;
     List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
         server1.invokeAsync(() -> executePutAlls(0, 2000, workMillis)),
         server1.invokeAsync(() -> executeRemoveAlls(0, 2000, workMillis)),
@@ -486,8 +460,8 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   @TestCaseName("[{index}] {method}(RegionType:{0})")
   @Parameters(method = "regionTypes")
   public void clearShouldFailWhenCoordinatorMemberIsBounced(RegionShortcut regionShortcut) {
-    final int entries = 1000;
     parametrizedSetup(regionShortcut);
+    final int entries = 1000;
     populateRegion(accessor, entries, asList(accessor, server1, server2));
 
     // Set the CoordinatorMemberKiller and try to clear the region
@@ -529,13 +503,13 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   @TestCaseName("[{index}] {method}(Coordinator:{0})")
   public void clearOnRedundantPartitionRegionWithConcurrentPutGetRemoveShouldWorkCorrectlyWhenNonCoordinatorMembersAreBounced(
       TestVM coordinatorVM) throws InterruptedException {
-    final int entries = 7500;
-    final int workMillis = 30000;
     parametrizedSetup(RegionShortcut.PARTITION_REDUNDANT);
+    final int entries = 7500;
     populateRegion(accessor, entries, asList(accessor, server1, server2));
     server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
 
     // Let all VMs (except the one to kill) continuously execute gets, put and removes for 30"
+    final int workMillis = 30000;
     List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
         server1.invokeAsync(() -> executeGets(entries, workMillis)),
         server1.invokeAsync(() -> executePuts(entries, workMillis)),
@@ -581,13 +555,13 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   @TestCaseName("[{index}] {method}(Coordinator:{0})")
   public void clearOnNonRedundantPartitionRegionWithConcurrentPutGetRemoveShouldFailWhenNonCoordinatorMembersAreBounced(
       TestVM coordinatorVM) throws InterruptedException {
-    final int entries = 7500;
-    final int workMillis = 30000;
     parametrizedSetup(RegionShortcut.PARTITION);
+    final int entries = 7500;
     populateRegion(accessor, entries, asList(accessor, server1, server2));
     server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
 
     // Let all VMs (except the one to kill) continuously execute gets, put and removes for 30"
+    final int workMillis = 30000;
     List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
         server1.invokeAsync(() -> executeGets(entries, workMillis)),
         server1.invokeAsync(() -> executePuts(entries, workMillis)),
@@ -620,11 +594,11 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   @TestCaseName("[{index}] {method}(Coordinator:{0})")
   public void clearOnRedundantPartitionRegionWithConcurrentPutAllRemoveAllShouldWorkCorrectlyWhenNonCoordinatorMembersAreBounced(
       TestVM coordinatorVM) throws InterruptedException {
-    final int workMillis = 30000;
     parametrizedSetup(RegionShortcut.PARTITION_REDUNDANT);
     server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
 
     // Let all VMs continuously execute putAll/removeAll for 30 seconds.
+    final int workMillis = 30000;
     List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
         server1.invokeAsync(() -> executePutAlls(0, 6000, workMillis)),
         accessor.invokeAsync(() -> executeRemoveAlls(2000, 4000, workMillis)));
@@ -666,10 +640,10 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
   @TestCaseName("[{index}] {method}(Coordinator:{0})")
   public void clearOnNonRedundantPartitionRegionWithConcurrentPutAllRemoveAllShouldFailWhenNonCoordinatorMembersAreBounced(
       TestVM coordinatorVM) throws InterruptedException {
-    final int workMillis = 30000;
     parametrizedSetup(RegionShortcut.PARTITION);
     server2.invoke(() -> DistributionMessageObserver.setInstance(new MemberKiller(false)));
 
+    final int workMillis = 30000;
     List<AsyncInvocation<Void>> asyncInvocationList = Arrays.asList(
         server1.invokeAsync(() -> executePutAlls(0, 6000, workMillis)),
         accessor.invokeAsync(() -> executeRemoveAlls(2000, 4000, workMillis)));
@@ -686,13 +660,24 @@ public class PartitionedRegionClearWithConcurrentOperationsDUnitTest implements
     }
   }
 
+  private enum TestVM {
+    ACCESSOR(0), SERVER1(1), SERVER2(2);
+
+    final int vmNumber;
+
+    TestVM(int vmNumber) {
+      this.vmNumber = vmNumber;
+    }
+  }
+
   /**
    * Shutdowns a coordinator member while the clear operation is in progress.
    */
-  public static class MemberKiller extends DistributionMessageObserver {
+  private static class MemberKiller extends DistributionMessageObserver {
+
     private final boolean coordinator;
 
-    public MemberKiller(boolean coordinator) {
+    private MemberKiller(boolean coordinator) {
       this.coordinator = coordinator;
     }